mirror of
https://github.com/rancher/types.git
synced 2025-06-26 13:41:33 +00:00
Add vendor for Prometheus operator
This commit is contained in:
parent
3775fd11c9
commit
6adc7c9769
@ -3,3 +3,4 @@ github.com/rancher/types
|
||||
|
||||
github.com/pkg/errors v0.8.0
|
||||
github.com/rancher/norman 1d24e0fc0b0a92dfc48012e82219e0d584cb8b0b transitive=true
|
||||
github.com/coreos/prometheus-operator v0.25.0
|
||||
|
5
vendor/github.com/PuerkitoBio/purell/.gitignore
generated
vendored
Normal file
5
vendor/github.com/PuerkitoBio/purell/.gitignore
generated
vendored
Normal file
@ -0,0 +1,5 @@
|
||||
*.sublime-*
|
||||
.DS_Store
|
||||
*.swp
|
||||
*.swo
|
||||
tags
|
7
vendor/github.com/PuerkitoBio/purell/.travis.yml
generated
vendored
Normal file
7
vendor/github.com/PuerkitoBio/purell/.travis.yml
generated
vendored
Normal file
@ -0,0 +1,7 @@
|
||||
language: go
|
||||
|
||||
go:
|
||||
- 1.4
|
||||
- 1.5
|
||||
- 1.6
|
||||
- tip
|
12
vendor/github.com/PuerkitoBio/purell/LICENSE
generated
vendored
Normal file
12
vendor/github.com/PuerkitoBio/purell/LICENSE
generated
vendored
Normal file
@ -0,0 +1,12 @@
|
||||
Copyright (c) 2012, Martin Angers
|
||||
All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
|
||||
|
||||
* Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
|
||||
|
||||
* Neither the name of the author nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
185
vendor/github.com/PuerkitoBio/purell/README.md
generated
vendored
Normal file
185
vendor/github.com/PuerkitoBio/purell/README.md
generated
vendored
Normal file
@ -0,0 +1,185 @@
|
||||
# Purell
|
||||
|
||||
Purell is a tiny Go library to normalize URLs. It returns a pure URL. Pure-ell. Sanitizer and all. Yeah, I know...
|
||||
|
||||
Based on the [wikipedia paper][wiki] and the [RFC 3986 document][rfc].
|
||||
|
||||
[](http://travis-ci.org/PuerkitoBio/purell)
|
||||
|
||||
## Install
|
||||
|
||||
`go get github.com/PuerkitoBio/purell`
|
||||
|
||||
## Changelog
|
||||
|
||||
* **2016-07-27 (v1.0.0)** : Normalize IDN to ASCII (thanks to @zenovich).
|
||||
* **2015-02-08** : Add fix for relative paths issue ([PR #5][pr5]) and add fix for unnecessary encoding of reserved characters ([see issue #7][iss7]).
|
||||
* **v0.2.0** : Add benchmarks, Attempt IDN support.
|
||||
* **v0.1.0** : Initial release.
|
||||
|
||||
## Examples
|
||||
|
||||
From `example_test.go` (note that in your code, you would import "github.com/PuerkitoBio/purell", and would prefix references to its methods and constants with "purell."):
|
||||
|
||||
```go
|
||||
package purell
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/url"
|
||||
)
|
||||
|
||||
func ExampleNormalizeURLString() {
|
||||
if normalized, err := NormalizeURLString("hTTp://someWEBsite.com:80/Amazing%3f/url/",
|
||||
FlagLowercaseScheme|FlagLowercaseHost|FlagUppercaseEscapes); err != nil {
|
||||
panic(err)
|
||||
} else {
|
||||
fmt.Print(normalized)
|
||||
}
|
||||
// Output: http://somewebsite.com:80/Amazing%3F/url/
|
||||
}
|
||||
|
||||
func ExampleMustNormalizeURLString() {
|
||||
normalized := MustNormalizeURLString("hTTpS://someWEBsite.com:443/Amazing%fa/url/",
|
||||
FlagsUnsafeGreedy)
|
||||
fmt.Print(normalized)
|
||||
|
||||
// Output: http://somewebsite.com/Amazing%FA/url
|
||||
}
|
||||
|
||||
func ExampleNormalizeURL() {
|
||||
if u, err := url.Parse("Http://SomeUrl.com:8080/a/b/.././c///g?c=3&a=1&b=9&c=0#target"); err != nil {
|
||||
panic(err)
|
||||
} else {
|
||||
normalized := NormalizeURL(u, FlagsUsuallySafeGreedy|FlagRemoveDuplicateSlashes|FlagRemoveFragment)
|
||||
fmt.Print(normalized)
|
||||
}
|
||||
|
||||
// Output: http://someurl.com:8080/a/c/g?c=3&a=1&b=9&c=0
|
||||
}
|
||||
```
|
||||
|
||||
## API
|
||||
|
||||
As seen in the examples above, purell offers three methods, `NormalizeURLString(string, NormalizationFlags) (string, error)`, `MustNormalizeURLString(string, NormalizationFlags) (string)` and `NormalizeURL(*url.URL, NormalizationFlags) (string)`. They all normalize the provided URL based on the specified flags. Here are the available flags:
|
||||
|
||||
```go
|
||||
const (
|
||||
// Safe normalizations
|
||||
FlagLowercaseScheme NormalizationFlags = 1 << iota // HTTP://host -> http://host, applied by default in Go1.1
|
||||
FlagLowercaseHost // http://HOST -> http://host
|
||||
FlagUppercaseEscapes // http://host/t%ef -> http://host/t%EF
|
||||
FlagDecodeUnnecessaryEscapes // http://host/t%41 -> http://host/tA
|
||||
FlagEncodeNecessaryEscapes // http://host/!"#$ -> http://host/%21%22#$
|
||||
FlagRemoveDefaultPort // http://host:80 -> http://host
|
||||
FlagRemoveEmptyQuerySeparator // http://host/path? -> http://host/path
|
||||
|
||||
// Usually safe normalizations
|
||||
FlagRemoveTrailingSlash // http://host/path/ -> http://host/path
|
||||
FlagAddTrailingSlash // http://host/path -> http://host/path/ (should choose only one of these add/remove trailing slash flags)
|
||||
FlagRemoveDotSegments // http://host/path/./a/b/../c -> http://host/path/a/c
|
||||
|
||||
// Unsafe normalizations
|
||||
FlagRemoveDirectoryIndex // http://host/path/index.html -> http://host/path/
|
||||
FlagRemoveFragment // http://host/path#fragment -> http://host/path
|
||||
FlagForceHTTP // https://host -> http://host
|
||||
FlagRemoveDuplicateSlashes // http://host/path//a///b -> http://host/path/a/b
|
||||
FlagRemoveWWW // http://www.host/ -> http://host/
|
||||
FlagAddWWW // http://host/ -> http://www.host/ (should choose only one of these add/remove WWW flags)
|
||||
FlagSortQuery // http://host/path?c=3&b=2&a=1&b=1 -> http://host/path?a=1&b=1&b=2&c=3
|
||||
|
||||
// Normalizations not in the wikipedia article, required to cover tests cases
|
||||
// submitted by jehiah
|
||||
FlagDecodeDWORDHost // http://1113982867 -> http://66.102.7.147
|
||||
FlagDecodeOctalHost // http://0102.0146.07.0223 -> http://66.102.7.147
|
||||
FlagDecodeHexHost // http://0x42660793 -> http://66.102.7.147
|
||||
FlagRemoveUnnecessaryHostDots // http://.host../path -> http://host/path
|
||||
FlagRemoveEmptyPortSeparator // http://host:/path -> http://host/path
|
||||
|
||||
// Convenience set of safe normalizations
|
||||
FlagsSafe NormalizationFlags = FlagLowercaseHost | FlagLowercaseScheme | FlagUppercaseEscapes | FlagDecodeUnnecessaryEscapes | FlagEncodeNecessaryEscapes | FlagRemoveDefaultPort | FlagRemoveEmptyQuerySeparator
|
||||
|
||||
// For convenience sets, "greedy" uses the "remove trailing slash" and "remove www. prefix" flags,
|
||||
// while "non-greedy" uses the "add (or keep) the trailing slash" and "add www. prefix".
|
||||
|
||||
// Convenience set of usually safe normalizations (includes FlagsSafe)
|
||||
FlagsUsuallySafeGreedy NormalizationFlags = FlagsSafe | FlagRemoveTrailingSlash | FlagRemoveDotSegments
|
||||
FlagsUsuallySafeNonGreedy NormalizationFlags = FlagsSafe | FlagAddTrailingSlash | FlagRemoveDotSegments
|
||||
|
||||
// Convenience set of unsafe normalizations (includes FlagsUsuallySafe)
|
||||
FlagsUnsafeGreedy NormalizationFlags = FlagsUsuallySafeGreedy | FlagRemoveDirectoryIndex | FlagRemoveFragment | FlagForceHTTP | FlagRemoveDuplicateSlashes | FlagRemoveWWW | FlagSortQuery
|
||||
FlagsUnsafeNonGreedy NormalizationFlags = FlagsUsuallySafeNonGreedy | FlagRemoveDirectoryIndex | FlagRemoveFragment | FlagForceHTTP | FlagRemoveDuplicateSlashes | FlagAddWWW | FlagSortQuery
|
||||
|
||||
// Convenience set of all available flags
|
||||
FlagsAllGreedy = FlagsUnsafeGreedy | FlagDecodeDWORDHost | FlagDecodeOctalHost | FlagDecodeHexHost | FlagRemoveUnnecessaryHostDots | FlagRemoveEmptyPortSeparator
|
||||
FlagsAllNonGreedy = FlagsUnsafeNonGreedy | FlagDecodeDWORDHost | FlagDecodeOctalHost | FlagDecodeHexHost | FlagRemoveUnnecessaryHostDots | FlagRemoveEmptyPortSeparator
|
||||
)
|
||||
```
|
||||
|
||||
For convenience, the set of flags `FlagsSafe`, `FlagsUsuallySafe[Greedy|NonGreedy]`, `FlagsUnsafe[Greedy|NonGreedy]` and `FlagsAll[Greedy|NonGreedy]` are provided for the similarly grouped normalizations on [wikipedia's URL normalization page][wiki]. You can add (using the bitwise OR `|` operator) or remove (using the bitwise AND NOT `&^` operator) individual flags from the sets if required, to build your own custom set.
|
||||
|
||||
The [full godoc reference is available on gopkgdoc][godoc].
|
||||
|
||||
Some things to note:
|
||||
|
||||
* `FlagDecodeUnnecessaryEscapes`, `FlagEncodeNecessaryEscapes`, `FlagUppercaseEscapes` and `FlagRemoveEmptyQuerySeparator` are always implicitly set, because internally, the URL string is parsed as an URL object, which automatically decodes unnecessary escapes, uppercases and encodes necessary ones, and removes empty query separators (an unnecessary `?` at the end of the url). So this operation cannot **not** be done. For this reason, `FlagRemoveEmptyQuerySeparator` (as well as the other three) has been included in the `FlagsSafe` convenience set, instead of `FlagsUnsafe`, where Wikipedia puts it.
|
||||
|
||||
* The `FlagDecodeUnnecessaryEscapes` decodes the following escapes (*from -> to*):
|
||||
- %24 -> $
|
||||
- %26 -> &
|
||||
- %2B-%3B -> +,-./0123456789:;
|
||||
- %3D -> =
|
||||
- %40-%5A -> @ABCDEFGHIJKLMNOPQRSTUVWXYZ
|
||||
- %5F -> _
|
||||
- %61-%7A -> abcdefghijklmnopqrstuvwxyz
|
||||
- %7E -> ~
|
||||
|
||||
|
||||
* When the `NormalizeURL` function is used (passing an URL object), this source URL object is modified (that is, after the call, the URL object will be modified to reflect the normalization).
|
||||
|
||||
* The *replace IP with domain name* normalization (`http://208.77.188.166/ → http://www.example.com/`) is obviously not possible for a library without making some network requests. This is not implemented in purell.
|
||||
|
||||
* The *remove unused query string parameters* and *remove default query parameters* are also not implemented, since this is a very case-specific normalization, and it is quite trivial to do with an URL object.
|
||||
|
||||
### Safe vs Usually Safe vs Unsafe
|
||||
|
||||
Purell allows you to control the level of risk you take while normalizing an URL. You can aggressively normalize, play it totally safe, or anything in between.
|
||||
|
||||
Consider the following URL:
|
||||
|
||||
`HTTPS://www.RooT.com/toto/t%45%1f///a/./b/../c/?z=3&w=2&a=4&w=1#invalid`
|
||||
|
||||
Normalizing with the `FlagsSafe` gives:
|
||||
|
||||
`https://www.root.com/toto/tE%1F///a/./b/../c/?z=3&w=2&a=4&w=1#invalid`
|
||||
|
||||
With the `FlagsUsuallySafeGreedy`:
|
||||
|
||||
`https://www.root.com/toto/tE%1F///a/c?z=3&w=2&a=4&w=1#invalid`
|
||||
|
||||
And with `FlagsUnsafeGreedy`:
|
||||
|
||||
`http://root.com/toto/tE%1F/a/c?a=4&w=1&w=2&z=3`
|
||||
|
||||
## TODOs
|
||||
|
||||
* Add a class/default instance to allow specifying custom directory index names? At the moment, removing directory index removes `(^|/)((?:default|index)\.\w{1,4})$`.
|
||||
|
||||
## Thanks / Contributions
|
||||
|
||||
@rogpeppe
|
||||
@jehiah
|
||||
@opennota
|
||||
@pchristopher1275
|
||||
@zenovich
|
||||
|
||||
## License
|
||||
|
||||
The [BSD 3-Clause license][bsd].
|
||||
|
||||
[bsd]: http://opensource.org/licenses/BSD-3-Clause
|
||||
[wiki]: http://en.wikipedia.org/wiki/URL_normalization
|
||||
[rfc]: http://tools.ietf.org/html/rfc3986#section-6
|
||||
[godoc]: http://go.pkgdoc.org/github.com/PuerkitoBio/purell
|
||||
[pr5]: https://github.com/PuerkitoBio/purell/pull/5
|
||||
[iss7]: https://github.com/PuerkitoBio/purell/issues/7
|
375
vendor/github.com/PuerkitoBio/purell/purell.go
generated
vendored
Normal file
375
vendor/github.com/PuerkitoBio/purell/purell.go
generated
vendored
Normal file
@ -0,0 +1,375 @@
|
||||
/*
|
||||
Package purell offers URL normalization as described on the wikipedia page:
|
||||
http://en.wikipedia.org/wiki/URL_normalization
|
||||
*/
|
||||
package purell
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"net/url"
|
||||
"regexp"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/PuerkitoBio/urlesc"
|
||||
"golang.org/x/net/idna"
|
||||
"golang.org/x/text/secure/precis"
|
||||
"golang.org/x/text/unicode/norm"
|
||||
)
|
||||
|
||||
// A set of normalization flags determines how a URL will
|
||||
// be normalized.
|
||||
type NormalizationFlags uint
|
||||
|
||||
const (
|
||||
// Safe normalizations
|
||||
FlagLowercaseScheme NormalizationFlags = 1 << iota // HTTP://host -> http://host, applied by default in Go1.1
|
||||
FlagLowercaseHost // http://HOST -> http://host
|
||||
FlagUppercaseEscapes // http://host/t%ef -> http://host/t%EF
|
||||
FlagDecodeUnnecessaryEscapes // http://host/t%41 -> http://host/tA
|
||||
FlagEncodeNecessaryEscapes // http://host/!"#$ -> http://host/%21%22#$
|
||||
FlagRemoveDefaultPort // http://host:80 -> http://host
|
||||
FlagRemoveEmptyQuerySeparator // http://host/path? -> http://host/path
|
||||
|
||||
// Usually safe normalizations
|
||||
FlagRemoveTrailingSlash // http://host/path/ -> http://host/path
|
||||
FlagAddTrailingSlash // http://host/path -> http://host/path/ (should choose only one of these add/remove trailing slash flags)
|
||||
FlagRemoveDotSegments // http://host/path/./a/b/../c -> http://host/path/a/c
|
||||
|
||||
// Unsafe normalizations
|
||||
FlagRemoveDirectoryIndex // http://host/path/index.html -> http://host/path/
|
||||
FlagRemoveFragment // http://host/path#fragment -> http://host/path
|
||||
FlagForceHTTP // https://host -> http://host
|
||||
FlagRemoveDuplicateSlashes // http://host/path//a///b -> http://host/path/a/b
|
||||
FlagRemoveWWW // http://www.host/ -> http://host/
|
||||
FlagAddWWW // http://host/ -> http://www.host/ (should choose only one of these add/remove WWW flags)
|
||||
FlagSortQuery // http://host/path?c=3&b=2&a=1&b=1 -> http://host/path?a=1&b=1&b=2&c=3
|
||||
|
||||
// Normalizations not in the wikipedia article, required to cover tests cases
|
||||
// submitted by jehiah
|
||||
FlagDecodeDWORDHost // http://1113982867 -> http://66.102.7.147
|
||||
FlagDecodeOctalHost // http://0102.0146.07.0223 -> http://66.102.7.147
|
||||
FlagDecodeHexHost // http://0x42660793 -> http://66.102.7.147
|
||||
FlagRemoveUnnecessaryHostDots // http://.host../path -> http://host/path
|
||||
FlagRemoveEmptyPortSeparator // http://host:/path -> http://host/path
|
||||
|
||||
// Convenience set of safe normalizations
|
||||
FlagsSafe NormalizationFlags = FlagLowercaseHost | FlagLowercaseScheme | FlagUppercaseEscapes | FlagDecodeUnnecessaryEscapes | FlagEncodeNecessaryEscapes | FlagRemoveDefaultPort | FlagRemoveEmptyQuerySeparator
|
||||
|
||||
// For convenience sets, "greedy" uses the "remove trailing slash" and "remove www. prefix" flags,
|
||||
// while "non-greedy" uses the "add (or keep) the trailing slash" and "add www. prefix".
|
||||
|
||||
// Convenience set of usually safe normalizations (includes FlagsSafe)
|
||||
FlagsUsuallySafeGreedy NormalizationFlags = FlagsSafe | FlagRemoveTrailingSlash | FlagRemoveDotSegments
|
||||
FlagsUsuallySafeNonGreedy NormalizationFlags = FlagsSafe | FlagAddTrailingSlash | FlagRemoveDotSegments
|
||||
|
||||
// Convenience set of unsafe normalizations (includes FlagsUsuallySafe)
|
||||
FlagsUnsafeGreedy NormalizationFlags = FlagsUsuallySafeGreedy | FlagRemoveDirectoryIndex | FlagRemoveFragment | FlagForceHTTP | FlagRemoveDuplicateSlashes | FlagRemoveWWW | FlagSortQuery
|
||||
FlagsUnsafeNonGreedy NormalizationFlags = FlagsUsuallySafeNonGreedy | FlagRemoveDirectoryIndex | FlagRemoveFragment | FlagForceHTTP | FlagRemoveDuplicateSlashes | FlagAddWWW | FlagSortQuery
|
||||
|
||||
// Convenience set of all available flags
|
||||
FlagsAllGreedy = FlagsUnsafeGreedy | FlagDecodeDWORDHost | FlagDecodeOctalHost | FlagDecodeHexHost | FlagRemoveUnnecessaryHostDots | FlagRemoveEmptyPortSeparator
|
||||
FlagsAllNonGreedy = FlagsUnsafeNonGreedy | FlagDecodeDWORDHost | FlagDecodeOctalHost | FlagDecodeHexHost | FlagRemoveUnnecessaryHostDots | FlagRemoveEmptyPortSeparator
|
||||
)
|
||||
|
||||
const (
|
||||
defaultHttpPort = ":80"
|
||||
defaultHttpsPort = ":443"
|
||||
)
|
||||
|
||||
// Regular expressions used by the normalizations
|
||||
var rxPort = regexp.MustCompile(`(:\d+)/?$`)
|
||||
var rxDirIndex = regexp.MustCompile(`(^|/)((?:default|index)\.\w{1,4})$`)
|
||||
var rxDupSlashes = regexp.MustCompile(`/{2,}`)
|
||||
var rxDWORDHost = regexp.MustCompile(`^(\d+)((?:\.+)?(?:\:\d*)?)$`)
|
||||
var rxOctalHost = regexp.MustCompile(`^(0\d*)\.(0\d*)\.(0\d*)\.(0\d*)((?:\.+)?(?:\:\d*)?)$`)
|
||||
var rxHexHost = regexp.MustCompile(`^0x([0-9A-Fa-f]+)((?:\.+)?(?:\:\d*)?)$`)
|
||||
var rxHostDots = regexp.MustCompile(`^(.+?)(:\d+)?$`)
|
||||
var rxEmptyPort = regexp.MustCompile(`:+$`)
|
||||
|
||||
// Map of flags to implementation function.
|
||||
// FlagDecodeUnnecessaryEscapes has no action, since it is done automatically
|
||||
// by parsing the string as an URL. Same for FlagUppercaseEscapes and FlagRemoveEmptyQuerySeparator.
|
||||
|
||||
// Since maps have undefined traversing order, make a slice of ordered keys
|
||||
var flagsOrder = []NormalizationFlags{
|
||||
FlagLowercaseScheme,
|
||||
FlagLowercaseHost,
|
||||
FlagRemoveDefaultPort,
|
||||
FlagRemoveDirectoryIndex,
|
||||
FlagRemoveDotSegments,
|
||||
FlagRemoveFragment,
|
||||
FlagForceHTTP, // Must be after remove default port (because https=443/http=80)
|
||||
FlagRemoveDuplicateSlashes,
|
||||
FlagRemoveWWW,
|
||||
FlagAddWWW,
|
||||
FlagSortQuery,
|
||||
FlagDecodeDWORDHost,
|
||||
FlagDecodeOctalHost,
|
||||
FlagDecodeHexHost,
|
||||
FlagRemoveUnnecessaryHostDots,
|
||||
FlagRemoveEmptyPortSeparator,
|
||||
FlagRemoveTrailingSlash, // These two (add/remove trailing slash) must be last
|
||||
FlagAddTrailingSlash,
|
||||
}
|
||||
|
||||
// ... and then the map, where order is unimportant
|
||||
var flags = map[NormalizationFlags]func(*url.URL){
|
||||
FlagLowercaseScheme: lowercaseScheme,
|
||||
FlagLowercaseHost: lowercaseHost,
|
||||
FlagRemoveDefaultPort: removeDefaultPort,
|
||||
FlagRemoveDirectoryIndex: removeDirectoryIndex,
|
||||
FlagRemoveDotSegments: removeDotSegments,
|
||||
FlagRemoveFragment: removeFragment,
|
||||
FlagForceHTTP: forceHTTP,
|
||||
FlagRemoveDuplicateSlashes: removeDuplicateSlashes,
|
||||
FlagRemoveWWW: removeWWW,
|
||||
FlagAddWWW: addWWW,
|
||||
FlagSortQuery: sortQuery,
|
||||
FlagDecodeDWORDHost: decodeDWORDHost,
|
||||
FlagDecodeOctalHost: decodeOctalHost,
|
||||
FlagDecodeHexHost: decodeHexHost,
|
||||
FlagRemoveUnnecessaryHostDots: removeUnncessaryHostDots,
|
||||
FlagRemoveEmptyPortSeparator: removeEmptyPortSeparator,
|
||||
FlagRemoveTrailingSlash: removeTrailingSlash,
|
||||
FlagAddTrailingSlash: addTrailingSlash,
|
||||
}
|
||||
|
||||
// MustNormalizeURLString returns the normalized string, and panics if an error occurs.
|
||||
// It takes an URL string as input, as well as the normalization flags.
|
||||
func MustNormalizeURLString(u string, f NormalizationFlags) string {
|
||||
result, e := NormalizeURLString(u, f)
|
||||
if e != nil {
|
||||
panic(e)
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
// NormalizeURLString returns the normalized string, or an error if it can't be parsed into an URL object.
|
||||
// It takes an URL string as input, as well as the normalization flags.
|
||||
func NormalizeURLString(u string, f NormalizationFlags) (string, error) {
|
||||
if parsed, e := url.Parse(u); e != nil {
|
||||
return "", e
|
||||
} else {
|
||||
options := make([]precis.Option, 1, 3)
|
||||
options[0] = precis.IgnoreCase
|
||||
if f&FlagLowercaseHost == FlagLowercaseHost {
|
||||
options = append(options, precis.FoldCase())
|
||||
}
|
||||
options = append(options, precis.Norm(norm.NFC))
|
||||
profile := precis.NewFreeform(options...)
|
||||
if parsed.Host, e = idna.ToASCII(profile.NewTransformer().String(parsed.Host)); e != nil {
|
||||
return "", e
|
||||
}
|
||||
return NormalizeURL(parsed, f), nil
|
||||
}
|
||||
panic("Unreachable code.")
|
||||
}
|
||||
|
||||
// NormalizeURL returns the normalized string.
|
||||
// It takes a parsed URL object as input, as well as the normalization flags.
|
||||
func NormalizeURL(u *url.URL, f NormalizationFlags) string {
|
||||
for _, k := range flagsOrder {
|
||||
if f&k == k {
|
||||
flags[k](u)
|
||||
}
|
||||
}
|
||||
return urlesc.Escape(u)
|
||||
}
|
||||
|
||||
func lowercaseScheme(u *url.URL) {
|
||||
if len(u.Scheme) > 0 {
|
||||
u.Scheme = strings.ToLower(u.Scheme)
|
||||
}
|
||||
}
|
||||
|
||||
func lowercaseHost(u *url.URL) {
|
||||
if len(u.Host) > 0 {
|
||||
u.Host = strings.ToLower(u.Host)
|
||||
}
|
||||
}
|
||||
|
||||
func removeDefaultPort(u *url.URL) {
|
||||
if len(u.Host) > 0 {
|
||||
scheme := strings.ToLower(u.Scheme)
|
||||
u.Host = rxPort.ReplaceAllStringFunc(u.Host, func(val string) string {
|
||||
if (scheme == "http" && val == defaultHttpPort) || (scheme == "https" && val == defaultHttpsPort) {
|
||||
return ""
|
||||
}
|
||||
return val
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func removeTrailingSlash(u *url.URL) {
|
||||
if l := len(u.Path); l > 0 {
|
||||
if strings.HasSuffix(u.Path, "/") {
|
||||
u.Path = u.Path[:l-1]
|
||||
}
|
||||
} else if l = len(u.Host); l > 0 {
|
||||
if strings.HasSuffix(u.Host, "/") {
|
||||
u.Host = u.Host[:l-1]
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func addTrailingSlash(u *url.URL) {
|
||||
if l := len(u.Path); l > 0 {
|
||||
if !strings.HasSuffix(u.Path, "/") {
|
||||
u.Path += "/"
|
||||
}
|
||||
} else if l = len(u.Host); l > 0 {
|
||||
if !strings.HasSuffix(u.Host, "/") {
|
||||
u.Host += "/"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func removeDotSegments(u *url.URL) {
|
||||
if len(u.Path) > 0 {
|
||||
var dotFree []string
|
||||
var lastIsDot bool
|
||||
|
||||
sections := strings.Split(u.Path, "/")
|
||||
for _, s := range sections {
|
||||
if s == ".." {
|
||||
if len(dotFree) > 0 {
|
||||
dotFree = dotFree[:len(dotFree)-1]
|
||||
}
|
||||
} else if s != "." {
|
||||
dotFree = append(dotFree, s)
|
||||
}
|
||||
lastIsDot = (s == "." || s == "..")
|
||||
}
|
||||
// Special case if host does not end with / and new path does not begin with /
|
||||
u.Path = strings.Join(dotFree, "/")
|
||||
if u.Host != "" && !strings.HasSuffix(u.Host, "/") && !strings.HasPrefix(u.Path, "/") {
|
||||
u.Path = "/" + u.Path
|
||||
}
|
||||
// Special case if the last segment was a dot, make sure the path ends with a slash
|
||||
if lastIsDot && !strings.HasSuffix(u.Path, "/") {
|
||||
u.Path += "/"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func removeDirectoryIndex(u *url.URL) {
|
||||
if len(u.Path) > 0 {
|
||||
u.Path = rxDirIndex.ReplaceAllString(u.Path, "$1")
|
||||
}
|
||||
}
|
||||
|
||||
func removeFragment(u *url.URL) {
|
||||
u.Fragment = ""
|
||||
}
|
||||
|
||||
func forceHTTP(u *url.URL) {
|
||||
if strings.ToLower(u.Scheme) == "https" {
|
||||
u.Scheme = "http"
|
||||
}
|
||||
}
|
||||
|
||||
func removeDuplicateSlashes(u *url.URL) {
|
||||
if len(u.Path) > 0 {
|
||||
u.Path = rxDupSlashes.ReplaceAllString(u.Path, "/")
|
||||
}
|
||||
}
|
||||
|
||||
func removeWWW(u *url.URL) {
|
||||
if len(u.Host) > 0 && strings.HasPrefix(strings.ToLower(u.Host), "www.") {
|
||||
u.Host = u.Host[4:]
|
||||
}
|
||||
}
|
||||
|
||||
func addWWW(u *url.URL) {
|
||||
if len(u.Host) > 0 && !strings.HasPrefix(strings.ToLower(u.Host), "www.") {
|
||||
u.Host = "www." + u.Host
|
||||
}
|
||||
}
|
||||
|
||||
func sortQuery(u *url.URL) {
|
||||
q := u.Query()
|
||||
|
||||
if len(q) > 0 {
|
||||
arKeys := make([]string, len(q))
|
||||
i := 0
|
||||
for k, _ := range q {
|
||||
arKeys[i] = k
|
||||
i++
|
||||
}
|
||||
sort.Strings(arKeys)
|
||||
buf := new(bytes.Buffer)
|
||||
for _, k := range arKeys {
|
||||
sort.Strings(q[k])
|
||||
for _, v := range q[k] {
|
||||
if buf.Len() > 0 {
|
||||
buf.WriteRune('&')
|
||||
}
|
||||
buf.WriteString(fmt.Sprintf("%s=%s", k, urlesc.QueryEscape(v)))
|
||||
}
|
||||
}
|
||||
|
||||
// Rebuild the raw query string
|
||||
u.RawQuery = buf.String()
|
||||
}
|
||||
}
|
||||
|
||||
func decodeDWORDHost(u *url.URL) {
|
||||
if len(u.Host) > 0 {
|
||||
if matches := rxDWORDHost.FindStringSubmatch(u.Host); len(matches) > 2 {
|
||||
var parts [4]int64
|
||||
|
||||
dword, _ := strconv.ParseInt(matches[1], 10, 0)
|
||||
for i, shift := range []uint{24, 16, 8, 0} {
|
||||
parts[i] = dword >> shift & 0xFF
|
||||
}
|
||||
u.Host = fmt.Sprintf("%d.%d.%d.%d%s", parts[0], parts[1], parts[2], parts[3], matches[2])
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func decodeOctalHost(u *url.URL) {
|
||||
if len(u.Host) > 0 {
|
||||
if matches := rxOctalHost.FindStringSubmatch(u.Host); len(matches) > 5 {
|
||||
var parts [4]int64
|
||||
|
||||
for i := 1; i <= 4; i++ {
|
||||
parts[i-1], _ = strconv.ParseInt(matches[i], 8, 0)
|
||||
}
|
||||
u.Host = fmt.Sprintf("%d.%d.%d.%d%s", parts[0], parts[1], parts[2], parts[3], matches[5])
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func decodeHexHost(u *url.URL) {
|
||||
if len(u.Host) > 0 {
|
||||
if matches := rxHexHost.FindStringSubmatch(u.Host); len(matches) > 2 {
|
||||
// Conversion is safe because of regex validation
|
||||
parsed, _ := strconv.ParseInt(matches[1], 16, 0)
|
||||
// Set host as DWORD (base 10) encoded host
|
||||
u.Host = fmt.Sprintf("%d%s", parsed, matches[2])
|
||||
// The rest is the same as decoding a DWORD host
|
||||
decodeDWORDHost(u)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func removeUnncessaryHostDots(u *url.URL) {
|
||||
if len(u.Host) > 0 {
|
||||
if matches := rxHostDots.FindStringSubmatch(u.Host); len(matches) > 1 {
|
||||
// Trim the leading and trailing dots
|
||||
u.Host = strings.Trim(matches[1], ".")
|
||||
if len(matches) > 2 {
|
||||
u.Host += matches[2]
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func removeEmptyPortSeparator(u *url.URL) {
|
||||
if len(u.Host) > 0 {
|
||||
u.Host = rxEmptyPort.ReplaceAllString(u.Host, "")
|
||||
}
|
||||
}
|
11
vendor/github.com/PuerkitoBio/urlesc/.travis.yml
generated
vendored
Normal file
11
vendor/github.com/PuerkitoBio/urlesc/.travis.yml
generated
vendored
Normal file
@ -0,0 +1,11 @@
|
||||
language: go
|
||||
|
||||
go:
|
||||
- 1.4
|
||||
- tip
|
||||
|
||||
install:
|
||||
- go build .
|
||||
|
||||
script:
|
||||
- go test -v
|
27
vendor/github.com/PuerkitoBio/urlesc/LICENSE
generated
vendored
Normal file
27
vendor/github.com/PuerkitoBio/urlesc/LICENSE
generated
vendored
Normal file
@ -0,0 +1,27 @@
|
||||
Copyright (c) 2012 The Go Authors. All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
||||
met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
* Redistributions in binary form must reproduce the above
|
||||
copyright notice, this list of conditions and the following disclaimer
|
||||
in the documentation and/or other materials provided with the
|
||||
distribution.
|
||||
* Neither the name of Google Inc. nor the names of its
|
||||
contributors may be used to endorse or promote products derived from
|
||||
this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
16
vendor/github.com/PuerkitoBio/urlesc/README.md
generated
vendored
Normal file
16
vendor/github.com/PuerkitoBio/urlesc/README.md
generated
vendored
Normal file
@ -0,0 +1,16 @@
|
||||
urlesc [](https://travis-ci.org/PuerkitoBio/urlesc) [](http://godoc.org/github.com/PuerkitoBio/urlesc)
|
||||
======
|
||||
|
||||
Package urlesc implements query escaping as per RFC 3986.
|
||||
|
||||
It contains some parts of the net/url package, modified so as to allow
|
||||
some reserved characters incorrectly escaped by net/url (see [issue 5684](https://github.com/golang/go/issues/5684)).
|
||||
|
||||
## Install
|
||||
|
||||
go get github.com/PuerkitoBio/urlesc
|
||||
|
||||
## License
|
||||
|
||||
Go license (BSD-3-Clause)
|
||||
|
180
vendor/github.com/PuerkitoBio/urlesc/urlesc.go
generated
vendored
Normal file
180
vendor/github.com/PuerkitoBio/urlesc/urlesc.go
generated
vendored
Normal file
@ -0,0 +1,180 @@
|
||||
// Copyright 2009 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package urlesc implements query escaping as per RFC 3986.
|
||||
// It contains some parts of the net/url package, modified so as to allow
|
||||
// some reserved characters incorrectly escaped by net/url.
|
||||
// See https://github.com/golang/go/issues/5684
|
||||
package urlesc
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"net/url"
|
||||
"strings"
|
||||
)
|
||||
|
||||
type encoding int
|
||||
|
||||
const (
|
||||
encodePath encoding = 1 + iota
|
||||
encodeUserPassword
|
||||
encodeQueryComponent
|
||||
encodeFragment
|
||||
)
|
||||
|
||||
// Return true if the specified character should be escaped when
|
||||
// appearing in a URL string, according to RFC 3986.
|
||||
func shouldEscape(c byte, mode encoding) bool {
|
||||
// §2.3 Unreserved characters (alphanum)
|
||||
if 'A' <= c && c <= 'Z' || 'a' <= c && c <= 'z' || '0' <= c && c <= '9' {
|
||||
return false
|
||||
}
|
||||
|
||||
switch c {
|
||||
case '-', '.', '_', '~': // §2.3 Unreserved characters (mark)
|
||||
return false
|
||||
|
||||
// §2.2 Reserved characters (reserved)
|
||||
case ':', '/', '?', '#', '[', ']', '@', // gen-delims
|
||||
'!', '$', '&', '\'', '(', ')', '*', '+', ',', ';', '=': // sub-delims
|
||||
// Different sections of the URL allow a few of
|
||||
// the reserved characters to appear unescaped.
|
||||
switch mode {
|
||||
case encodePath: // §3.3
|
||||
// The RFC allows sub-delims and : @.
|
||||
// '/', '[' and ']' can be used to assign meaning to individual path
|
||||
// segments. This package only manipulates the path as a whole,
|
||||
// so we allow those as well. That leaves only ? and # to escape.
|
||||
return c == '?' || c == '#'
|
||||
|
||||
case encodeUserPassword: // §3.2.1
|
||||
// The RFC allows : and sub-delims in
|
||||
// userinfo. The parsing of userinfo treats ':' as special so we must escape
|
||||
// all the gen-delims.
|
||||
return c == ':' || c == '/' || c == '?' || c == '#' || c == '[' || c == ']' || c == '@'
|
||||
|
||||
case encodeQueryComponent: // §3.4
|
||||
// The RFC allows / and ?.
|
||||
return c != '/' && c != '?'
|
||||
|
||||
case encodeFragment: // §4.1
|
||||
// The RFC text is silent but the grammar allows
|
||||
// everything, so escape nothing but #
|
||||
return c == '#'
|
||||
}
|
||||
}
|
||||
|
||||
// Everything else must be escaped.
|
||||
return true
|
||||
}
|
||||
|
||||
// QueryEscape escapes the string so it can be safely placed
|
||||
// inside a URL query.
|
||||
func QueryEscape(s string) string {
|
||||
return escape(s, encodeQueryComponent)
|
||||
}
|
||||
|
||||
func escape(s string, mode encoding) string {
|
||||
spaceCount, hexCount := 0, 0
|
||||
for i := 0; i < len(s); i++ {
|
||||
c := s[i]
|
||||
if shouldEscape(c, mode) {
|
||||
if c == ' ' && mode == encodeQueryComponent {
|
||||
spaceCount++
|
||||
} else {
|
||||
hexCount++
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if spaceCount == 0 && hexCount == 0 {
|
||||
return s
|
||||
}
|
||||
|
||||
t := make([]byte, len(s)+2*hexCount)
|
||||
j := 0
|
||||
for i := 0; i < len(s); i++ {
|
||||
switch c := s[i]; {
|
||||
case c == ' ' && mode == encodeQueryComponent:
|
||||
t[j] = '+'
|
||||
j++
|
||||
case shouldEscape(c, mode):
|
||||
t[j] = '%'
|
||||
t[j+1] = "0123456789ABCDEF"[c>>4]
|
||||
t[j+2] = "0123456789ABCDEF"[c&15]
|
||||
j += 3
|
||||
default:
|
||||
t[j] = s[i]
|
||||
j++
|
||||
}
|
||||
}
|
||||
return string(t)
|
||||
}
|
||||
|
||||
var uiReplacer = strings.NewReplacer(
|
||||
"%21", "!",
|
||||
"%27", "'",
|
||||
"%28", "(",
|
||||
"%29", ")",
|
||||
"%2A", "*",
|
||||
)
|
||||
|
||||
// unescapeUserinfo unescapes some characters that need not to be escaped as per RFC3986.
|
||||
func unescapeUserinfo(s string) string {
|
||||
return uiReplacer.Replace(s)
|
||||
}
|
||||
|
||||
// Escape reassembles the URL into a valid URL string.
|
||||
// The general form of the result is one of:
|
||||
//
|
||||
// scheme:opaque
|
||||
// scheme://userinfo@host/path?query#fragment
|
||||
//
|
||||
// If u.Opaque is non-empty, String uses the first form;
|
||||
// otherwise it uses the second form.
|
||||
//
|
||||
// In the second form, the following rules apply:
|
||||
// - if u.Scheme is empty, scheme: is omitted.
|
||||
// - if u.User is nil, userinfo@ is omitted.
|
||||
// - if u.Host is empty, host/ is omitted.
|
||||
// - if u.Scheme and u.Host are empty and u.User is nil,
|
||||
// the entire scheme://userinfo@host/ is omitted.
|
||||
// - if u.Host is non-empty and u.Path begins with a /,
|
||||
// the form host/path does not add its own /.
|
||||
// - if u.RawQuery is empty, ?query is omitted.
|
||||
// - if u.Fragment is empty, #fragment is omitted.
|
||||
func Escape(u *url.URL) string {
|
||||
var buf bytes.Buffer
|
||||
if u.Scheme != "" {
|
||||
buf.WriteString(u.Scheme)
|
||||
buf.WriteByte(':')
|
||||
}
|
||||
if u.Opaque != "" {
|
||||
buf.WriteString(u.Opaque)
|
||||
} else {
|
||||
if u.Scheme != "" || u.Host != "" || u.User != nil {
|
||||
buf.WriteString("//")
|
||||
if ui := u.User; ui != nil {
|
||||
buf.WriteString(unescapeUserinfo(ui.String()))
|
||||
buf.WriteByte('@')
|
||||
}
|
||||
if h := u.Host; h != "" {
|
||||
buf.WriteString(h)
|
||||
}
|
||||
}
|
||||
if u.Path != "" && u.Path[0] != '/' && u.Host != "" {
|
||||
buf.WriteByte('/')
|
||||
}
|
||||
buf.WriteString(escape(u.Path, encodePath))
|
||||
}
|
||||
if u.RawQuery != "" {
|
||||
buf.WriteByte('?')
|
||||
buf.WriteString(u.RawQuery)
|
||||
}
|
||||
if u.Fragment != "" {
|
||||
buf.WriteByte('#')
|
||||
buf.WriteString(escape(u.Fragment, encodeFragment))
|
||||
}
|
||||
return buf.String()
|
||||
}
|
14
vendor/github.com/coreos/prometheus-operator/.editorconfig
generated
vendored
Normal file
14
vendor/github.com/coreos/prometheus-operator/.editorconfig
generated
vendored
Normal file
@ -0,0 +1,14 @@
|
||||
root = true
|
||||
|
||||
[*.py]
|
||||
indent_style = space
|
||||
end_of_line = lf
|
||||
insert_final_newline = true
|
||||
max_line_length = 100
|
||||
trim_trailing_whitespace = true
|
||||
indent_size = 4
|
||||
|
||||
[*.yaml]
|
||||
indent_style = space
|
||||
indent_size = 2
|
||||
trim_trailing_whitespace = true
|
19
vendor/github.com/coreos/prometheus-operator/.gitignore
generated
vendored
Normal file
19
vendor/github.com/coreos/prometheus-operator/.gitignore
generated
vendored
Normal file
@ -0,0 +1,19 @@
|
||||
/operator
|
||||
prometheus-config-reloader
|
||||
.build/
|
||||
*~
|
||||
*.tgz
|
||||
requirements.lock
|
||||
.idea
|
||||
*.iml
|
||||
.DS_Store
|
||||
__pycache__
|
||||
.env/
|
||||
.history/
|
||||
.vscode/
|
||||
tmp
|
||||
|
||||
# These are empty target files, created on every docker build. Their sole
|
||||
# purpose is to track the last target execution time to evalualte, whether the
|
||||
# container needds to be rebuild
|
||||
hack/*-image
|
13
vendor/github.com/coreos/prometheus-operator/.header
generated
vendored
Normal file
13
vendor/github.com/coreos/prometheus-operator/.header
generated
vendored
Normal file
@ -0,0 +1,13 @@
|
||||
// Copyright 2018 The prometheus-operator Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
36
vendor/github.com/coreos/prometheus-operator/.promu.yml
generated
vendored
Normal file
36
vendor/github.com/coreos/prometheus-operator/.promu.yml
generated
vendored
Normal file
@ -0,0 +1,36 @@
|
||||
repository:
|
||||
path: github.com/coreos/prometheus-operator
|
||||
build:
|
||||
flags: -a -tags netgo
|
||||
binaries:
|
||||
- name: operator
|
||||
path: ./cmd/operator
|
||||
tarball:
|
||||
files:
|
||||
- LICENSE
|
||||
- NOTICE
|
||||
crossbuild:
|
||||
platforms:
|
||||
- linux/amd64
|
||||
#- linux/386
|
||||
#- darwin/amd64
|
||||
#- darwin/386
|
||||
#- windows/amd64
|
||||
#- windows/386
|
||||
#- freebsd/amd64
|
||||
#- freebsd/386
|
||||
#- openbsd/amd64
|
||||
#- openbsd/386
|
||||
#- netbsd/amd64
|
||||
#- netbsd/386
|
||||
#- dragonfly/amd64
|
||||
#- linux/arm
|
||||
#- linux/arm64
|
||||
#- freebsd/arm
|
||||
#- openbsd/arm
|
||||
#- linux/mips64
|
||||
#- linux/mips64le
|
||||
#- netbsd/arm
|
||||
#- linux/ppc64
|
||||
#- linux/ppc64le
|
||||
|
35
vendor/github.com/coreos/prometheus-operator/.travis.yml
generated
vendored
Normal file
35
vendor/github.com/coreos/prometheus-operator/.travis.yml
generated
vendored
Normal file
@ -0,0 +1,35 @@
|
||||
sudo: required
|
||||
language: go
|
||||
go:
|
||||
- "1.11"
|
||||
services:
|
||||
- docker
|
||||
before_install:
|
||||
- pip install --user awscli
|
||||
- export PATH=$PATH:$HOME/.local/bin
|
||||
jobs:
|
||||
include:
|
||||
- stage: Sanity check and tests
|
||||
# Check generated contents are up to date and code is formatted.
|
||||
script: make --always-make format generate-in-docker && git diff --exit-code
|
||||
- script: cd contrib/kube-prometheus && make test-in-docker
|
||||
# Build Prometheus Operator rule config map to rule file crds cli tool
|
||||
- script: cd cmd/po-rule-migration && go install
|
||||
# Ensure vendor folder matches vendor.json
|
||||
- script: ./scripts/golang-dep-ensure.sh
|
||||
# Unit tests
|
||||
- script: make test-unit
|
||||
# E2e tests
|
||||
- script: ./scripts/travis-e2e.sh
|
||||
- script: ./scripts/travis-e2e-helm.sh
|
||||
|
||||
- stage: deploy
|
||||
script: skip
|
||||
deploy:
|
||||
provider: script
|
||||
script: make helm-sync-s3
|
||||
on:
|
||||
branch: master
|
||||
|
||||
- stage: push-docker-image
|
||||
script: ./scripts/travis-push-docker-image.sh
|
390
vendor/github.com/coreos/prometheus-operator/CHANGELOG.md
generated
vendored
Normal file
390
vendor/github.com/coreos/prometheus-operator/CHANGELOG.md
generated
vendored
Normal file
@ -0,0 +1,390 @@
|
||||
## 0.25.0 / 2018-10-24
|
||||
|
||||
* [FEATURE] Allow passing additional alert relabel configs in Prometheus custom resource (#2022)
|
||||
* [FEATURE] Add ability to mount custom ConfigMaps into Alertmanager and Prometheus (#2028)
|
||||
|
||||
## 0.24.0 / 2018-10-11
|
||||
|
||||
This release has a breaking changes for `prometheus_operator_.*` metrics.
|
||||
|
||||
`prometheus_operator_alertmanager_reconcile_errors_total` and `prometheus_operator_prometheus_reconcile_errors_total`
|
||||
are now combined and called `prometheus_operator_reconcile_errors_total`.
|
||||
Instead the metric has a "controller" label which indicates the errors from the Prometheus or Alertmanager controller.
|
||||
|
||||
The same happened with `prometheus_operator_alertmanager_spec_replicas` and `prometheus_operator_prometheus_spec_replicas`
|
||||
which is now called `prometheus_operator_spec_replicas` and also has the "controller" label.
|
||||
|
||||
The `prometheus_operator_triggered_total` metric now has a "controller" label as well and finally instruments the
|
||||
Alertmanager controller.
|
||||
|
||||
For a full description see: https://github.com/coreos/prometheus-operator/pull/1984#issue-221139702
|
||||
|
||||
In order to support multiple namespaces, the `--namespace` flag changed to `--namespaces`
|
||||
and accepts and comma-separated list of namespaces as a string.
|
||||
|
||||
* [CHANGE] Default to Node Exporter v0.16.0 (#1812)
|
||||
* [CHANGE] Update to Go 1.11 (#1855)
|
||||
* [CHANGE] Default to Prometheus v2.4.3 (#1929) (#1983)
|
||||
* [CHANGE] Default to Thanos v0.1.0 (#1954)
|
||||
* [CHANGE] Overhaul metrics while adding triggerBy metric for Alertmanager (#1984)
|
||||
* [CHANGE] Add multi namespace support (#1813)
|
||||
* [FEATURE] Add SHA field to Prometheus, Alertmanager and Thanos for images (#1847) (#1854)
|
||||
* [FEATURE] Add configuration for priority class to be assigned to Pods (#1875)
|
||||
* [FEATURE] Configure sampleLimit per ServiceMonitor (#1895)
|
||||
* [FEATURE] Add additionalPeers to Alertmanager (#1878)
|
||||
* [FEATURE] Add podTargetLabels to ServiceMonitors (#1880)
|
||||
* [FEATURE] Relabel target name for Pods (#1896)
|
||||
* [FEATURE] Allow configuration of relabel_configs per ServiceMonitor (#1879)
|
||||
* [FEATURE] Add illegal update reconciliation by deleting StatefulSet (#1931)
|
||||
* [ENHANCEMENT] Set Thanos cluster and grpc ip from pod.ip (#1836)
|
||||
* [BUGFIX] Add square brackets around pod IPs for IPv6 support (#1881)
|
||||
* [BUGFIX] Allow periods in secret name (#1907)
|
||||
* [BUGFIX] Add BearerToken in generateRemoteReadConfig (#1956)
|
||||
|
||||
## 0.23.2 / 2018-08-23
|
||||
|
||||
* [BUGFIX] Do not abort kubelet endpoints update due to nodes without IP addresses defined (#1816)
|
||||
|
||||
## 0.23.1 / 2018-08-13
|
||||
|
||||
* [BUGFIX] Fix high CPU usage of Prometheus Operator when annotating Prometheus resource (#1785)
|
||||
|
||||
## 0.23.0 / 2018-08-06
|
||||
|
||||
* [CHANGE] Deprecate specification of Prometheus rules via ConfigMaps in favor of `PrometheusRule` CRDs
|
||||
* [FEATURE] Introduce new flag to control logging format (#1475)
|
||||
* [FEATURE] Ensure Prometheus Operator container runs as `nobody` user by default (#1393)
|
||||
* [BUGFIX] Fix reconciliation of Prometheus StatefulSets due to ServiceMonitors and PrometheusRules changes when a single namespace is being watched (#1749)
|
||||
|
||||
## 0.22.2 / 2018-07-24
|
||||
|
||||
[BUGFIX] Do not migrate rule config map for Prometheus statefulset on rule config map to PrometheusRule migration (#1679)
|
||||
|
||||
## 0.22.1 / 2018-07-19
|
||||
|
||||
* [ENHANCEMENT] Enable operation when CRDs are created externally (#1640)
|
||||
* [BUGFIX] Do not watch for new namespaces if a specific namespace has been selected (#1640)
|
||||
|
||||
## 0.22.0 / 2018-07-09
|
||||
|
||||
* [FEATURE] Allow setting volume name via volumetemplateclaimtemplate in prom and alertmanager (#1538)
|
||||
* [FEATURE] Allow setting custom tags of container images (#1584)
|
||||
* [ENHANCEMENT] Update default Thanos to v0.1.0-rc.2 (#1585)
|
||||
* [ENHANCEMENT] Split rule config map mounted into Prometheus if it exceeds Kubernetes config map limit (#1562)
|
||||
* [BUGFIX] Mount Prometheus data volume into Thanos sidecar & pass correct path to Thanos sidecar (#1583)
|
||||
|
||||
## 0.21.0 / 2018-06-28
|
||||
|
||||
* [CHANGE] Default to Prometheus v2.3.1.
|
||||
* [CHANGE] Default to Alertmanager v0.15.0.
|
||||
* [FEATURE] Make remote write queue configurations configurable.
|
||||
* [FEATURE] Add Thanos integration (experimental).
|
||||
* [BUGFIX] Fix usage of console templates and libraries.
|
||||
|
||||
## 0.20.0 / 2018-06-05
|
||||
|
||||
With this release we introduce a new Custom Resource Definition - the
|
||||
`PrometheusRule` CRD. It addresses the need for rule syntax validation and rule
|
||||
selection across namespaces. `PrometheusRule` replaces the configuration of
|
||||
Prometheus rules via K8s ConfigMaps. There are two migration paths:
|
||||
|
||||
1. Automated live migration: If the Prometheus Operator finds Kubernetes
|
||||
ConfigMaps that match the `RuleSelector` in a `Prometheus` specification, it
|
||||
will convert them to matching `PrometheusRule` resources.
|
||||
|
||||
2. Manual migration: We provide a basic CLI tool to convert Kubernetes
|
||||
ConfigMaps to `PrometheusRule` resources.
|
||||
|
||||
```bash
|
||||
go get -u github.com/coreos/prometheus-operator/cmd/po-rule-migration
|
||||
po-rule-migration \
|
||||
--rule-config-map=<path-to-config-map> \
|
||||
--rule-crds-destination=<path-to-rule-crd-destination>
|
||||
```
|
||||
|
||||
* [FEATURE] Add leveled logging to Prometheus Operator (#1277)
|
||||
* [FEATURE] Allow additional Alertmanager configuration in Prometheus CRD (#1338)
|
||||
* [FEATURE] Introduce `PrometheusRule` Custom Resource Definition (#1333)
|
||||
* [ENHANCEMENT] Allow Prometheus to consider all namespaces to find ServiceMonitors (#1278)
|
||||
* [BUGFIX] Do not attempt to set default memory request for Prometheus 2.0 (#1275)
|
||||
|
||||
## 0.19.0 / 2018-04-25
|
||||
|
||||
* [FEATURE] Allow specifying additional Prometheus scrape configs via secret (#1246)
|
||||
* [FEATURE] Enable Thanos sidecar (#1219)
|
||||
* [FEATURE] Make AM log level configurable (#1192)
|
||||
* [ENHANCEMENT] Enable Prometheus to select Service Monitors outside own namespace (#1227)
|
||||
* [ENHANCEMENT] Enrich Prometheus operator CRD registration error handling (#1208)
|
||||
* [BUGFIX] Allow up to 10m for Prometheus pod on startup for data recovery (#1232)
|
||||
|
||||
## 0.18.1 / 2018-04-09
|
||||
|
||||
* [BUGFIX] Fix alertmanager >=0.15.0 cluster gossip communication (#1193)
|
||||
|
||||
## 0.18.0 / 2018-03-04
|
||||
|
||||
From this release onwards only Kubernetes versions v1.8 and higher are supported. If you have an older version of Kubernetes and the Prometheus Operator running, we recommend upgrading Kubernetes first and then the Prometheus Operator.
|
||||
|
||||
While multiple validation issues have been fixed, it will remain a beta feature in this release. If you want to update validations, you need to either apply the CustomResourceDefinitions located in `example/prometheus-operator-crd` or delete all CRDs and restart the Prometheus Operator.
|
||||
|
||||
Some changes cause Prometheus and Alertmanager clusters to be redeployed. If you do not have persistent storage backing your data, this means you will loose the amount of data equal to your retention time.
|
||||
|
||||
* [CHANGE] Use canonical `/prometheus` and `/alertmanager` as data dirs in containers.
|
||||
* [FEATURE] Allow configuring Prometheus and Alertmanager servers to listen on loopback interface, allowing proxies to be the ingress point of those Pods.
|
||||
* [FEATURE] Allow configuring additional containers in Prometheus and Alertmanager Pods.
|
||||
* [FEATURE] Add ability to whitelist Kubernetes labels to become Prometheus labels.
|
||||
* [FEATURE] Allow specifying additional secrets for Alertmanager Pods to mount.
|
||||
* [FEATURE] Allow specifying `bearer_token_file` for Alertmanger configurations of Prometheus objects in order to authenticate with Alertmanager.
|
||||
* [FEATURE] Allow specifying TLS configuration for Alertmanger configurations of Prometheus objects.
|
||||
* [FEATURE] Add metrics for reconciliation errors: `prometheus_operator_alertmanager_reconcile_errors_total` and `prometheus_operator_prometheus_reconcile_errors_total`.
|
||||
* [FEATURE] Support `read_recent` and `required_matchers` fields for remote read configurations.
|
||||
* [FEATURE] Allow disabling any defaults of `SecurityContext` fields of Pods.
|
||||
* [BUGFIX] Handle Alertmanager >=v0.15.0 breaking changes correctly.
|
||||
* [BUGFIX] Fix invalid validations for metric relabeling fields.
|
||||
* [BUGFIX] Fix validations for `AlertingSpec`.
|
||||
* [BUGFIX] Fix validations for deprecated storage fields.
|
||||
* [BUGFIX] Fix remote read and write basic auth support.
|
||||
* [BUGFIX] Fix properly propagate errors of Prometheus config reloader.
|
||||
|
||||
## 0.17.0 / 2018-02-15
|
||||
|
||||
This release adds validations as a beta feature. It will only be installed on new clusters, existing CRD definitions will not be updated, this will be done in a future release. Please try out this feature and give us feedback!
|
||||
|
||||
* [CHANGE] Default Prometheus version v2.2.0-rc.0.
|
||||
* [CHANGE] Default Alertmanager version v0.14.0.
|
||||
* [FEATURE] Generate and add CRD validations.
|
||||
* [FEATURE] Add ability to set `serviceAccountName` for Alertmanager Pods.
|
||||
* [FEATURE] Add ability to specify custom `securityContext` for Alertmanager Pods.
|
||||
* [ENHANCEMENT] Default to non-root security context for Alertmanager Pods.
|
||||
|
||||
## 0.16.1 / 2018-01-16
|
||||
|
||||
* [CHANGE] Default to Alertmanager v0.13.0.
|
||||
* [BUGFIX] Alertmanager flags must be double dashed starting v0.13.0.
|
||||
|
||||
## 0.16.0 / 2018-01-11
|
||||
|
||||
* [FEATURE] Add support for specifying remote storage configurations.
|
||||
* [FEATURE] Add ability to specify log level.
|
||||
* [FEATURE] Add support for dropping metrics at scrape time.
|
||||
* [ENHANCEMENT] Ensure that resource limit can't make Pods unschedulable.
|
||||
* [ENHANCEMENT] Allow configuring emtpyDir volumes
|
||||
* [BUGFIX] Use `--storage.tsdb.no-lockfile` for Prometheus 2.0.
|
||||
* [BUGFIX] Fix Alertmanager default storage.path.
|
||||
|
||||
## 0.15.0 / 2017-11-22
|
||||
|
||||
* [CHANGE] Default Prometheus version v2.0.0
|
||||
* [BUGFIX] Generate ExternalLabels deterministically
|
||||
* [BUGFIX] Fix incorrect mount path of Alertmanager data volume
|
||||
* [EXPERIMENTAL] Add ability to specify CRD Kind name
|
||||
|
||||
## 0.14.1 / 2017-11-01
|
||||
|
||||
* [BUGFIX] Ignore illegal change of PodManagementPolicy to StatefulSet.
|
||||
|
||||
## 0.14.0 / 2017-10-19
|
||||
|
||||
* [CHANGE] Default Prometheus version v2.0.0-rc.1.
|
||||
* [CHANGE] Default Alertmanager version v0.9.1.
|
||||
* [BUGFIX] Set StatefulSet replicas to 0 if 0 is specified in Alertmanager/Prometheus object.
|
||||
* [BUGFIX] Glob for all files in a ConfigMap as rule files.
|
||||
* [FEATURE] Add ability to run Prometheus Operator for a single namespace.
|
||||
* [FEATURE] Add ability to specify CRD api group.
|
||||
* [FEATURE] Use readiness and health endpoints of Prometheus 1.8+.
|
||||
* [ENHANCEMENT] Add OwnerReferences to managed objects.
|
||||
* [ENHANCEMENT] Use parallel pod creation strategy for Prometheus StatefulSets.
|
||||
|
||||
## 0.13.0 / 2017-09-21
|
||||
|
||||
After a long period of not having broken any functionality in the Prometheus Operator, we have decided to promote the status of this project to beta.
|
||||
|
||||
Compatibility guarantees and migration strategies continue to be the same as for the `v0.12.0` release.
|
||||
|
||||
* [CHANGE] Remove analytics collection.
|
||||
* [BUGFIX] Fix memory leak in kubelet endpoints sync.
|
||||
* [FEATURE] Allow setting global default `scrape_interval`.
|
||||
* [FEATURE] Allow setting Pod objectmeta to Prometheus and Alertmanger objects.
|
||||
* [FEATURE] Allow setting tolerations and affinity for Prometheus and Alertmanager objects.
|
||||
|
||||
## 0.12.0 / 2017-08-24
|
||||
|
||||
Starting with this release only Kubernetes `v1.7.x` and up is supported as CustomResourceDefinitions are a requirement for the Prometheus Operator and are only available from those versions and up.
|
||||
|
||||
Additionally all objects have been promoted from `v1alpha1` to `v1`. On start up of this version of the Prometheus Operator the previously used `ThirdPartyResource`s and the associated `v1alpha1` objects will be automatically migrated to their `v1` equivalent `CustomResourceDefinition`.
|
||||
|
||||
* [CHANGE] All manifests created and used by the Prometheus Operator have been promoted from `v1alpha1` to `v1`.
|
||||
* [CHANGE] Use Kubernetes `CustomResourceDefinition`s instead of `ThirdPartyResource`s.
|
||||
* [FEATURE] Add ability to set scrape timeout to `ServiceMonitor`.
|
||||
* [ENHANCEMENT] Use `StatefulSet` rolling deployments.
|
||||
* [ENHANCEMENT] Properly set `SecurityContext` for Prometheus 2.0 deployments.
|
||||
* [ENHANCEMENT] Enable web lifecycle APIs for Prometheus 2.0 deployments.
|
||||
|
||||
## 0.11.2 / 2017-09-21
|
||||
|
||||
* [BUGFIX] Fix memory leak in kubelet endpoints sync.
|
||||
|
||||
## 0.11.1 / 2017-07-28
|
||||
|
||||
* [ENHANCEMENT] Add profiling endpoints.
|
||||
* [BUGFIX] Adapt Alertmanager storage usage to not use deprecated storage definition.
|
||||
|
||||
## 0.11.0 / 2017-07-20
|
||||
|
||||
Warning: This release deprecates the previously used storage definition in favor of upstream PersistentVolumeClaim templates. While this should not have an immediate effect on a running cluster, Prometheus object definitions that have storage configured need to be adapted. The previously existing fields are still there, but have no effect anymore.
|
||||
|
||||
* [FEATURE] Add Prometheus 2.0 alpha3 support.
|
||||
* [FEATURE] Use PVC templates instead of custom storage definition.
|
||||
* [FEATURE] Add cAdvisor port to kubelet sync.
|
||||
* [FEATURE] Allow default base images to be configurable.
|
||||
* [FEATURE] Configure Prometheus to only use necessary namespaces.
|
||||
* [ENHANCEMENT] Improve rollout detection for Alertmanager clusters.
|
||||
* [BUGFIX] Fix targetPort relabeling.
|
||||
|
||||
## 0.10.2 / 2017-06-21
|
||||
|
||||
* [BUGFIX] Use computed route prefix instead of directly from manifest.
|
||||
|
||||
## 0.10.1 / 2017-06-13
|
||||
|
||||
Attention: if the basic auth feature was previously used, the `key` and `name`
|
||||
fields need to be switched. This was not intentional, and the bug is not fixed,
|
||||
but causes this change.
|
||||
|
||||
* [CHANGE] Prometheus default version v1.7.1.
|
||||
* [CHANGE] Alertmanager default version v0.7.1.
|
||||
* [BUGFIX] Fix basic auth secret key selector `key` and `name` switched.
|
||||
* [BUGFIX] Fix route prefix flag not always being set for Prometheus.
|
||||
* [BUGFIX] Fix nil panic on replica metrics.
|
||||
* [FEATURE] Add ability to specify Alertmanager path prefix for Prometheus.
|
||||
|
||||
## 0.10.0 / 2017-06-09
|
||||
|
||||
* [CHANGE] Prometheus route prefix defaults to root.
|
||||
* [CHANGE] Default to Prometheus v1.7.0.
|
||||
* [CHANGE] Default to Alertmanager v0.7.0.
|
||||
* [FEATURE] Add route prefix support to Alertmanager resource.
|
||||
* [FEATURE] Add metrics on expected replicas.
|
||||
* [FEATURE] Support for runing Alertmanager v0.7.0.
|
||||
* [BUGFIX] Fix sensitive rollout triggering.
|
||||
|
||||
## 0.9.1 / 2017-05-18
|
||||
|
||||
* [FEATURE] Add experimental Prometheus 2.0 support.
|
||||
* [FEATURE] Add support for setting Prometheus external labels.
|
||||
* [BUGFIX] Fix non-deterministic config generation.
|
||||
|
||||
## 0.9.0 / 2017-05-09
|
||||
|
||||
* [CHANGE] The `kubelet-object` flag has been renamed to `kubelet-service`.
|
||||
* [CHANGE] Remove automatic relabelling of Pod and Service labels onto targets.
|
||||
* [CHANGE] Remove "non-namespaced" alpha annotation in favor of `honor_labels`.
|
||||
* [FEATURE] Add ability make use of the Prometheus `honor_labels` configuration option.
|
||||
* [FEATURE] Add ability to specify image pull secrets for Prometheus and Alertmanager pods.
|
||||
* [FEATURE] Add basic auth configuration option through ServiceMonitor.
|
||||
* [ENHANCEMENT] Add liveness and readiness probes to Prometheus and Alertmanger pods.
|
||||
* [ENHANCEMENT] Add default resource requests for Alertmanager pods.
|
||||
* [ENHANCEMENT] Fallback to ExternalIPs when InternalIPs are not available in kubelet sync.
|
||||
* [ENHANCEMENT] Improved change detection to trigger Prometheus rollout.
|
||||
* [ENHANCEMENT] Do not delete unmanaged Prometheus configuration Secret.
|
||||
|
||||
## 0.8.2 / 2017-04-20
|
||||
|
||||
* [ENHANCEMENT] Use new Prometheus 1.6 storage flags and make it default.
|
||||
|
||||
## 0.8.1 / 2017-04-13
|
||||
|
||||
* [ENHANCEMENT] Include kubelet insecure port in kubelet Enpdoints object.
|
||||
|
||||
## 0.8.0 / 2017-04-07
|
||||
|
||||
* [FEATURE] Add ability to mount custom secrets into Prometheus Pods. Note that
|
||||
secrets cannot be modified after creation, if the list if modified after
|
||||
creation it will not effect the Prometheus Pods.
|
||||
* [FEATURE] Attach pod and service name as labels to Pod targets.
|
||||
|
||||
## 0.7.0 / 2017-03-17
|
||||
|
||||
This release introduces breaking changes to the generated StatefulSet's
|
||||
PodTemplate, which cannot be modified for StatefulSets. The Prometheus and
|
||||
Alertmanager objects have to be deleted and recreated for the StatefulSets to
|
||||
be created properly.
|
||||
|
||||
* [CHANGE] Use Secrets instead of ConfigMaps for configurations.
|
||||
* [FEATURE] Allow ConfigMaps containing rules to be selected via label selector.
|
||||
* [FEATURE] `nodeSelector` added to the Alertmanager kind.
|
||||
* [ENHANCEMENT] Use Prometheus v2 chunk encoding by default.
|
||||
* [BUGFIX] Fix Alertmanager cluster mesh initialization.
|
||||
|
||||
## 0.6.0 / 2017-02-28
|
||||
|
||||
* [FEATURE] Allow not tagging targets with the `namespace` label.
|
||||
* [FEATURE] Allow specifying `ServiceAccountName` to be used by Prometheus pods.
|
||||
* [ENHANCEMENT] Label governing services to uniquely identify them.
|
||||
* [ENHANCEMENT] Reconcile Serive and Endpoints objects.
|
||||
* [ENHANCEMENT] General stability improvements.
|
||||
* [BUGFIX] Hostname cannot be fqdn when syncing kubelets into Endpoints object.
|
||||
|
||||
## 0.5.1 / 2017-02-17
|
||||
|
||||
* [BUGFIX] Use correct governing `Service` for Prometheus `StatefulSet`.
|
||||
|
||||
## 0.5.0 / 2017-02-15
|
||||
|
||||
* [FEATURE] Allow synchronizing kubelets into an `Endpoints` object.
|
||||
* [FEATURE] Allow specifying custom configmap-reload image
|
||||
|
||||
## 0.4.0 / 2017-02-02
|
||||
|
||||
* [CHANGE] Split endpoint and job in separate labels instead of a single
|
||||
concatenated one.
|
||||
* [BUGFIX] Properly exit on errors communicating with the apiserver.
|
||||
|
||||
## 0.3.0 / 2017-01-31
|
||||
|
||||
This release introduces breaking changes to the underlying naming schemes. It
|
||||
is recommended to destroy existing Prometheus and Alertmanager objects and
|
||||
recreate them with new namings.
|
||||
|
||||
With this release support for `v1.4.x` clusters is dropped. Changes will not be
|
||||
backported to the `0.1.x` release series anymore.
|
||||
|
||||
* [CHANGE] Prefixed StatefulSet namings based on managing resource
|
||||
* [FEATURE] Pass labels and annotations through to StatefulSets
|
||||
* [FEATURE] Add tls config to use for Prometheus target scraping
|
||||
* [FEATURE] Add configurable `routePrefix` for Prometheus
|
||||
* [FEATURE] Add node selector to Prometheus TPR
|
||||
* [ENHANCEMENT] Stability improvements
|
||||
|
||||
## 0.2.3 / 2017-01-05
|
||||
|
||||
* [BUGFIX] Fix config reloading when using external url.
|
||||
|
||||
## 0.1.3 / 2017-01-05
|
||||
|
||||
The `0.1.x` releases are backport releases with Kubernetes `v1.4.x` compatibility.
|
||||
|
||||
* [BUGFIX] Fix config reloading when using external url.
|
||||
|
||||
## 0.2.2 / 2017-01-03
|
||||
|
||||
* [FEATURE] Add ability to set the external url the Prometheus/Alertmanager instances will be available under.
|
||||
|
||||
## 0.1.2 / 2017-01-03
|
||||
|
||||
The `0.1.x` releases are backport releases with Kubernetes `v1.4.x` compatibility.
|
||||
|
||||
* [FEATURE] Add ability to set the external url the Prometheus/Alertmanager instances will be available under.
|
||||
|
||||
## 0.2.1 / 2016-12-23
|
||||
|
||||
* [BUGFIX] Fix `subPath` behavior when not using storage provisioning
|
||||
|
||||
## 0.2.0 / 2016-12-20
|
||||
|
||||
This release requires a Kubernetes cluster >=1.5.0. See the readme for
|
||||
instructions on how to upgrade if you are currently running on a lower version
|
||||
with the operator.
|
||||
|
||||
* [CHANGE] Use StatefulSet instead of PetSet
|
||||
* [BUGFIX] Fix Prometheus config generation for labels containing "-"
|
77
vendor/github.com/coreos/prometheus-operator/CONTRIBUTING.md
generated
vendored
Normal file
77
vendor/github.com/coreos/prometheus-operator/CONTRIBUTING.md
generated
vendored
Normal file
@ -0,0 +1,77 @@
|
||||
# How to Contribute
|
||||
|
||||
CoreOS projects are [Apache 2.0 licensed](LICENSE) and accept contributions via
|
||||
GitHub pull requests. This document outlines some of the conventions on
|
||||
development workflow, commit message formatting, contact points and other
|
||||
resources to make it easier to get your contribution accepted.
|
||||
|
||||
# Certificate of Origin
|
||||
|
||||
By contributing to this project you agree to the Developer Certificate of
|
||||
Origin (DCO). This document was created by the Linux Kernel community and is a
|
||||
simple statement that you, as a contributor, have the legal right to make the
|
||||
contribution. See the [DCO](DCO) file for details.
|
||||
|
||||
# Email and Chat
|
||||
|
||||
The project currently uses the general CoreOS email list and IRC channel:
|
||||
- Email: [coreos-dev](https://groups.google.com/forum/#!forum/coreos-dev)
|
||||
- IRC: #[coreos](irc://irc.freenode.org:6667/#coreos) IRC channel on freenode.org
|
||||
|
||||
Please avoid emailing maintainers found in the MAINTAINERS file directly. They
|
||||
are very busy and read the mailing lists.
|
||||
|
||||
## Getting Started
|
||||
|
||||
- Fork the repository on GitHub
|
||||
- Read the [README](README.md) for build and test instructions
|
||||
- Play with the project, submit bugs, submit patches!
|
||||
|
||||
## Contribution Flow
|
||||
|
||||
This is a rough outline of what a contributor's workflow looks like:
|
||||
|
||||
- Create a topic branch from where you want to base your work (usually master).
|
||||
- Make commits of logical units.
|
||||
- Make sure your commit messages are in the proper format (see below).
|
||||
- Push your changes to a topic branch in your fork of the repository.
|
||||
- Make sure the tests pass, and add any new tests as appropriate.
|
||||
- Submit a pull request to the original repository.
|
||||
|
||||
Thanks for your contributions!
|
||||
|
||||
### Coding Style
|
||||
|
||||
CoreOS projects written in Go follow a set of style guidelines that we've documented
|
||||
[here](https://github.com/coreos/docs/tree/master/golang). Please follow them when
|
||||
working on your contributions.
|
||||
|
||||
### Format of the Commit Message
|
||||
|
||||
We follow a rough convention for commit messages that is designed to answer two
|
||||
questions: what changed and why. The subject line should feature the what and
|
||||
the body of the commit should describe the why.
|
||||
|
||||
```
|
||||
scripts: add the test-cluster command
|
||||
|
||||
this uses tmux to setup a test cluster that you can easily kill and
|
||||
start for debugging.
|
||||
|
||||
Fixes #38
|
||||
```
|
||||
|
||||
The format can be described more formally as follows:
|
||||
|
||||
```
|
||||
<subsystem>: <what changed>
|
||||
<BLANK LINE>
|
||||
<why this change was made>
|
||||
<BLANK LINE>
|
||||
<footer>
|
||||
```
|
||||
|
||||
The first line is the subject and should be no longer than 70 characters, the
|
||||
second line is always blank, and other lines should be wrapped at 80 characters.
|
||||
This allows the message to be easier to read on GitHub as well as in various
|
||||
git tools.
|
36
vendor/github.com/coreos/prometheus-operator/DCO
generated
vendored
Normal file
36
vendor/github.com/coreos/prometheus-operator/DCO
generated
vendored
Normal file
@ -0,0 +1,36 @@
|
||||
Developer Certificate of Origin
|
||||
Version 1.1
|
||||
|
||||
Copyright (C) 2004, 2006 The Linux Foundation and its contributors.
|
||||
660 York Street, Suite 102,
|
||||
San Francisco, CA 94110 USA
|
||||
|
||||
Everyone is permitted to copy and distribute verbatim copies of this
|
||||
license document, but changing it is not allowed.
|
||||
|
||||
|
||||
Developer's Certificate of Origin 1.1
|
||||
|
||||
By making a contribution to this project, I certify that:
|
||||
|
||||
(a) The contribution was created in whole or in part by me and I
|
||||
have the right to submit it under the open source license
|
||||
indicated in the file; or
|
||||
|
||||
(b) The contribution is based upon previous work that, to the best
|
||||
of my knowledge, is covered under an appropriate open source
|
||||
license and I have the right under that license to submit that
|
||||
work with modifications, whether created in whole or in part
|
||||
by me, under the same open source license (unless I am
|
||||
permitted to submit under a different license), as indicated
|
||||
in the file; or
|
||||
|
||||
(c) The contribution was provided directly to me by some other
|
||||
person who certified (a), (b) or (c) and I have not modified
|
||||
it.
|
||||
|
||||
(d) I understand and agree that this project and the contribution
|
||||
are public and that a record of the contribution (including all
|
||||
personal information I submit with it, including my sign-off) is
|
||||
maintained indefinitely and may be redistributed consistent with
|
||||
this project or the open source license(s) involved.
|
8
vendor/github.com/coreos/prometheus-operator/Dockerfile
generated
vendored
Normal file
8
vendor/github.com/coreos/prometheus-operator/Dockerfile
generated
vendored
Normal file
@ -0,0 +1,8 @@
|
||||
FROM quay.io/prometheus/busybox:latest
|
||||
|
||||
ADD operator /bin/operator
|
||||
|
||||
# On busybox 'nobody' has uid `65534'
|
||||
USER 65534
|
||||
|
||||
ENTRYPOINT ["/bin/operator"]
|
802
vendor/github.com/coreos/prometheus-operator/Gopkg.lock
generated
vendored
Normal file
802
vendor/github.com/coreos/prometheus-operator/Gopkg.lock
generated
vendored
Normal file
@ -0,0 +1,802 @@
|
||||
# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'.
|
||||
|
||||
|
||||
[[projects]]
|
||||
digest = "1:8e47871087b94913898333f37af26732faaab30cdb41571136cf7aec9921dae7"
|
||||
name = "github.com/PuerkitoBio/purell"
|
||||
packages = ["."]
|
||||
pruneopts = ""
|
||||
revision = "0bcb03f4b4d0a9428594752bd2a3b9aa0a9d4bd4"
|
||||
version = "v1.1.0"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:331a419049c2be691e5ba1d24342fc77c7e767a80c666a18fd8a9f7b82419c1c"
|
||||
name = "github.com/PuerkitoBio/urlesc"
|
||||
packages = ["."]
|
||||
pruneopts = ""
|
||||
revision = "de5bf2ad457846296e2031421a34e2568e304e35"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:a74730e052a45a3fab1d310fdef2ec17ae3d6af16228421e238320846f2aaec8"
|
||||
name = "github.com/alecthomas/template"
|
||||
packages = [
|
||||
".",
|
||||
"parse",
|
||||
]
|
||||
pruneopts = ""
|
||||
revision = "a0175ee3bccc567396460bf5acd36800cb10c49c"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:8483994d21404c8a1d489f6be756e25bfccd3b45d65821f25695577791a08e68"
|
||||
name = "github.com/alecthomas/units"
|
||||
packages = ["."]
|
||||
pruneopts = ""
|
||||
revision = "2efee857e7cfd4f3d0138cc3cbb1b4966962b93a"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:6c169045362a7dfcb016ece49d587c393cde5c9eb964f7e638783a60bad955ec"
|
||||
name = "github.com/ant31/crd-validation"
|
||||
packages = ["pkg"]
|
||||
pruneopts = ""
|
||||
revision = "30f8a35d0ac2d8a2825c78ff47885979e3ee1121"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:c0bec5f9b98d0bc872ff5e834fac186b807b656683bd29cb82fb207a1513fabb"
|
||||
name = "github.com/beorn7/perks"
|
||||
packages = ["quantile"]
|
||||
pruneopts = ""
|
||||
revision = "3a771d992973f24aa725d07868b467d1ddfceafb"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:79421244ba5848aae4b0a5c41e633a04e4894cb0b164a219dc8c15ec7facb7f1"
|
||||
name = "github.com/blang/semver"
|
||||
packages = ["."]
|
||||
pruneopts = ""
|
||||
revision = "2ee87856327ba09384cabd113bc6b5d174e9ec0f"
|
||||
version = "v3.5.1"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:56c130d885a4aacae1dd9c7b71cfe39912c7ebc1ff7d2b46083c8812996dc43b"
|
||||
name = "github.com/davecgh/go-spew"
|
||||
packages = ["spew"]
|
||||
pruneopts = ""
|
||||
revision = "346938d642f2ec3594ed81d874461961cd0faa76"
|
||||
version = "v1.1.0"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:bae950b148164e197399891aac35cb829798fab7c3b1519aad8a4d497fbbdd2d"
|
||||
name = "github.com/emicklei/go-restful"
|
||||
packages = [
|
||||
".",
|
||||
"log",
|
||||
]
|
||||
pruneopts = ""
|
||||
revision = "26b41036311f2da8242db402557a0dbd09dc83da"
|
||||
version = "v2.6.0"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:eb53021a8aa3f599d29c7102e65026242bdedce998a54837dc67f14b6a97c5fd"
|
||||
name = "github.com/fsnotify/fsnotify"
|
||||
packages = ["."]
|
||||
pruneopts = ""
|
||||
revision = "c2828203cd70a50dcccfb2761f8b1f8ceef9a8e9"
|
||||
version = "v1.4.7"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:b13707423743d41665fd23f0c36b2f37bb49c30e94adb813319c44188a51ba22"
|
||||
name = "github.com/ghodss/yaml"
|
||||
packages = ["."]
|
||||
pruneopts = ""
|
||||
revision = "0ca9ea5df5451ffdf184b4428c902747c2c11cd7"
|
||||
version = "v1.0.0"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:44ec1082ba97d89ce860abcc6ee3f0cf24e658d3efb8531b0f0a52f0781e4243"
|
||||
name = "github.com/go-kit/kit"
|
||||
packages = [
|
||||
"log",
|
||||
"log/level",
|
||||
]
|
||||
pruneopts = ""
|
||||
revision = "4dc7be5d2d12881735283bcab7352178e190fc71"
|
||||
version = "v0.6.0"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:6a4a01d58b227c4b6b11111b9f172ec5c17682b82724e58e6daf3f19f4faccd8"
|
||||
name = "github.com/go-logfmt/logfmt"
|
||||
packages = ["."]
|
||||
pruneopts = ""
|
||||
revision = "390ab7935ee28ec6b286364bba9b4dd6410cb3d5"
|
||||
version = "v0.3.0"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:1287439f7765209116509fffff2b8f853845e4b35572b41a1aadda42cbcffcc2"
|
||||
name = "github.com/go-openapi/jsonpointer"
|
||||
packages = ["."]
|
||||
pruneopts = ""
|
||||
revision = "779f45308c19820f1a69e9a4cd965f496e0da10f"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:07ac8ac445f68b0bc063d11845d479fb7e09c906ead7a8c4165b59777df09d74"
|
||||
name = "github.com/go-openapi/jsonreference"
|
||||
packages = ["."]
|
||||
pruneopts = ""
|
||||
revision = "36d33bfe519efae5632669801b180bf1a245da3b"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:5f0a74fa1ec2b94dea099354efde184e2eaf3f5eeb2ea5e30ba60ef564a93d33"
|
||||
name = "github.com/go-openapi/spec"
|
||||
packages = ["."]
|
||||
pruneopts = ""
|
||||
revision = "d8000b5bfbd1147255710505a27c735b6b2ae2ac"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:cd78635f557f3af547b70c0bd59a4ada288137cc22da142e72950f2a2560e5b3"
|
||||
name = "github.com/go-openapi/swag"
|
||||
packages = ["."]
|
||||
pruneopts = ""
|
||||
revision = "ceb469cb0fdf2d792f28d771bc05da6c606f55e5"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:9ca737b471693542351e112c9e86be9bf7385e42256893a09ecb2a98e2036f74"
|
||||
name = "github.com/go-stack/stack"
|
||||
packages = ["."]
|
||||
pruneopts = ""
|
||||
revision = "259ab82a6cad3992b4e21ff5cac294ccb06474bc"
|
||||
version = "v1.7.0"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:0a3f6a0c68ab8f3d455f8892295503b179e571b7fefe47cc6c556405d1f83411"
|
||||
name = "github.com/gogo/protobuf"
|
||||
packages = [
|
||||
"proto",
|
||||
"sortkeys",
|
||||
]
|
||||
pruneopts = ""
|
||||
revision = "1adfc126b41513cc696b209667c8656ea7aac67c"
|
||||
version = "v1.0.0"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:107b233e45174dbab5b1324201d092ea9448e58243ab9f039e4c0f332e121e3a"
|
||||
name = "github.com/golang/glog"
|
||||
packages = ["."]
|
||||
pruneopts = ""
|
||||
revision = "23def4e6c14b4da8ac2ed8007337bc5eb5007998"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:bcb38c8fc9b21bb8682ce2d605a7d4aeb618abc7f827e3ac0b27c0371fdb23fb"
|
||||
name = "github.com/golang/protobuf"
|
||||
packages = [
|
||||
"proto",
|
||||
"ptypes",
|
||||
"ptypes/any",
|
||||
"ptypes/duration",
|
||||
"ptypes/timestamp",
|
||||
]
|
||||
pruneopts = ""
|
||||
revision = "925541529c1fa6821df4e44ce2723319eb2be768"
|
||||
version = "v1.0.0"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:be28c0531a755f2178acf1e327e6f5a8a3968feb5f2567cdc968064253141751"
|
||||
name = "github.com/google/btree"
|
||||
packages = ["."]
|
||||
pruneopts = ""
|
||||
revision = "e89373fe6b4a7413d7acd6da1725b83ef713e6e4"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:754f77e9c839b24778a4b64422236d38515301d2baeb63113aa3edc42e6af692"
|
||||
name = "github.com/google/gofuzz"
|
||||
packages = ["."]
|
||||
pruneopts = ""
|
||||
revision = "24818f796faf91cd76ec7bddd72458fbced7a6c1"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:2a131706ff80636629ab6373f2944569b8252ecc018cda8040931b05d32e3c16"
|
||||
name = "github.com/googleapis/gnostic"
|
||||
packages = [
|
||||
"OpenAPIv2",
|
||||
"compiler",
|
||||
"extensions",
|
||||
]
|
||||
pruneopts = ""
|
||||
revision = "ee43cbb60db7bd22502942cccbc39059117352ab"
|
||||
version = "v0.1.0"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:009a1928b8c096338b68b5822d838a72b4d8520715c1463614476359f3282ec8"
|
||||
name = "github.com/gregjones/httpcache"
|
||||
packages = [
|
||||
".",
|
||||
"diskcache",
|
||||
]
|
||||
pruneopts = ""
|
||||
revision = "9cad4c3443a7200dd6400aef47183728de563a38"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:98b38236d3f349e1376d786c1c3d097ab81f93f6850857a95c8ef9ca361f28d6"
|
||||
name = "github.com/hashicorp/go-version"
|
||||
packages = ["."]
|
||||
pruneopts = ""
|
||||
revision = "4fe82ae3040f80a03d04d2cccb5606a626b8e1ee"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:9c776d7d9c54b7ed89f119e449983c3f24c0023e75001d6092442412ebca6b94"
|
||||
name = "github.com/hashicorp/golang-lru"
|
||||
packages = [
|
||||
".",
|
||||
"simplelru",
|
||||
]
|
||||
pruneopts = ""
|
||||
revision = "0fb14efe8c47ae851c0034ed7a448854d3d34cf3"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:23bc0b496ba341c6e3ba24d6358ff4a40a704d9eb5f9a3bd8e8fbd57ad869013"
|
||||
name = "github.com/imdario/mergo"
|
||||
packages = ["."]
|
||||
pruneopts = ""
|
||||
revision = "163f41321a19dd09362d4c63cc2489db2015f1f4"
|
||||
version = "0.3.2"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:a28707e37a9cfab71db6df5a0b7a938d171bd7bd27addacf6522b1d7a90c728b"
|
||||
name = "github.com/improbable-eng/thanos"
|
||||
packages = [
|
||||
"pkg/reloader",
|
||||
"pkg/runutil",
|
||||
]
|
||||
pruneopts = ""
|
||||
revision = "2f5a6a17635815e336457658eb10928e2061c2ed"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:31c6f3c4f1e15fcc24fcfc9f5f24603ff3963c56d6fa162116493b4025fb6acc"
|
||||
name = "github.com/json-iterator/go"
|
||||
packages = ["."]
|
||||
pruneopts = ""
|
||||
revision = "f2b4162afba35581b6d4a50d3b8f34e33c144682"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:1ed9eeebdf24aadfbca57eb50e6455bd1d2474525e0f0d4454de8c8e9bc7ee9a"
|
||||
name = "github.com/kr/logfmt"
|
||||
packages = ["."]
|
||||
pruneopts = ""
|
||||
revision = "b84e30acd515aadc4b783ad4ff83aff3299bdfe0"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:ee97ec8a00b2424570c1ce53d7b410e96fbd4c241b29df134276ff6aa3750335"
|
||||
name = "github.com/kylelemons/godebug"
|
||||
packages = [
|
||||
"diff",
|
||||
"pretty",
|
||||
]
|
||||
pruneopts = ""
|
||||
revision = "d65d576e9348f5982d7f6d83682b694e731a45c6"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:1f6522b148fa2fb779a962e572511b808e731a50f5f6a2b148672aec15a97873"
|
||||
name = "github.com/mailru/easyjson"
|
||||
packages = [
|
||||
"buffer",
|
||||
"jlexer",
|
||||
"jwriter",
|
||||
]
|
||||
pruneopts = ""
|
||||
revision = "517203d186eb343d3df4068565cc0446b450d2c4"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:4c23ced97a470b17d9ffd788310502a077b9c1f60221a85563e49696276b4147"
|
||||
name = "github.com/matttproud/golang_protobuf_extensions"
|
||||
packages = ["pbutil"]
|
||||
pruneopts = ""
|
||||
revision = "3247c84500bff8d9fb6d579d800f20b3e091582c"
|
||||
version = "v1.0.0"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:0de0f377aeccd41384e883c59c6f184c9db01c96db33a2724a1eaadd60f92629"
|
||||
name = "github.com/mitchellh/hashstructure"
|
||||
packages = ["."]
|
||||
pruneopts = ""
|
||||
revision = "2bca23e0e452137f789efbc8610126fd8b94f73b"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:0c0ff2a89c1bb0d01887e1dac043ad7efbf3ec77482ef058ac423d13497e16fd"
|
||||
name = "github.com/modern-go/concurrent"
|
||||
packages = ["."]
|
||||
pruneopts = ""
|
||||
revision = "bacd9c7ef1dd9b15be4a9909b8ac7a4e313eec94"
|
||||
version = "1.0.3"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:420f9231f816eeca3ff5aab070caac3ed7f27e4d37ded96ce9de3d7a7a2e31ad"
|
||||
name = "github.com/modern-go/reflect2"
|
||||
packages = ["."]
|
||||
pruneopts = ""
|
||||
revision = "1df9eeb2bb81f327b96228865c5687bc2194af3f"
|
||||
version = "1.0.0"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:94e9081cc450d2cdf4e6886fc2c06c07272f86477df2d74ee5931951fa3d2577"
|
||||
name = "github.com/oklog/run"
|
||||
packages = ["."]
|
||||
pruneopts = ""
|
||||
revision = "4dadeb3030eda0273a12382bb2348ffc7c9d1a39"
|
||||
version = "v1.0.0"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:c24598ffeadd2762552269271b3b1510df2d83ee6696c1e543a0ff653af494bc"
|
||||
name = "github.com/petar/GoLLRB"
|
||||
packages = ["llrb"]
|
||||
pruneopts = ""
|
||||
revision = "53be0d36a84c2a886ca057d34b6aa4468df9ccb4"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:b46305723171710475f2dd37547edd57b67b9de9f2a6267cafdd98331fd6897f"
|
||||
name = "github.com/peterbourgon/diskv"
|
||||
packages = ["."]
|
||||
pruneopts = ""
|
||||
revision = "5f041e8faa004a95c88a202771f4cc3e991971e6"
|
||||
version = "v2.0.1"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:7365acd48986e205ccb8652cc746f09c8b7876030d53710ea6ef7d0bd0dcd7ca"
|
||||
name = "github.com/pkg/errors"
|
||||
packages = ["."]
|
||||
pruneopts = ""
|
||||
revision = "645ef00459ed84a119197bfb8d8205042c6df63d"
|
||||
version = "v0.8.0"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:256484dbbcd271f9ecebc6795b2df8cad4c458dd0f5fd82a8c2fa0c29f233411"
|
||||
name = "github.com/pmezard/go-difflib"
|
||||
packages = ["difflib"]
|
||||
pruneopts = ""
|
||||
revision = "792786c7400a136282c1664665ae0a8db921c6c2"
|
||||
version = "v1.0.0"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:087d4ebe9bb26b9dba01fef70156f93874a5be9e47b3e193a5cf566fafee8bb1"
|
||||
name = "github.com/prometheus/client_golang"
|
||||
packages = [
|
||||
"prometheus",
|
||||
"prometheus/internal",
|
||||
"prometheus/promhttp",
|
||||
]
|
||||
pruneopts = ""
|
||||
revision = "0a8115f42e037a6e327f9a269a26ff6603fb8472"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:60aca47f4eeeb972f1b9da7e7db51dee15ff6c59f7b401c1588b8e6771ba15ef"
|
||||
name = "github.com/prometheus/client_model"
|
||||
packages = ["go"]
|
||||
pruneopts = ""
|
||||
revision = "99fa1f4be8e564e8a6b613da7fa6f46c9edafc6c"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:545c0adfcd1c5a3d155cfada997c426d3e4b358ba26cf7a1ace99fd9230e63de"
|
||||
name = "github.com/prometheus/common"
|
||||
packages = [
|
||||
"expfmt",
|
||||
"internal/bitbucket.org/ww/goautoneg",
|
||||
"model",
|
||||
]
|
||||
pruneopts = ""
|
||||
revision = "e4aa40a9169a88835b849a6efb71e05dc04b88f0"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:9015def9b01c5b9f28606fcbe9efcb76657fc20ef1c3102e89d290bff8a6380d"
|
||||
name = "github.com/prometheus/procfs"
|
||||
packages = [
|
||||
".",
|
||||
"internal/util",
|
||||
"nfs",
|
||||
"xfs",
|
||||
]
|
||||
pruneopts = ""
|
||||
revision = "780932d4fbbe0e69b84c34c20f5c8d0981e109ea"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:c10188d96f7a014299e1a82e0cf5491a02c159fc076f275336f35d796ba3f4d6"
|
||||
name = "github.com/spf13/pflag"
|
||||
packages = ["."]
|
||||
pruneopts = ""
|
||||
revision = "ee5fd03fd6acfd43e44aea0b4135958546ed8e73"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:a30066593578732a356dc7e5d7f78d69184ca65aeeff5939241a3ab10559bb06"
|
||||
name = "github.com/stretchr/testify"
|
||||
packages = [
|
||||
"assert",
|
||||
"require",
|
||||
]
|
||||
pruneopts = ""
|
||||
revision = "12b6f73e6084dad08a7c6e575284b177ecafbc71"
|
||||
version = "v1.2.1"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:73338180b1dae2db9e5795fc6cd533145e5ef9ce6354aea8437599e15ee2addf"
|
||||
name = "golang.org/x/crypto"
|
||||
packages = ["ssh/terminal"]
|
||||
pruneopts = ""
|
||||
revision = "80db560fac1fb3e6ac81dbc7f8ae4c061f5257bd"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:58977d25cdcfe647173e2282e77f88797750d8124ad7417174afcaea301d4a6a"
|
||||
name = "golang.org/x/net"
|
||||
packages = [
|
||||
"context",
|
||||
"context/ctxhttp",
|
||||
"http2",
|
||||
"http2/hpack",
|
||||
"idna",
|
||||
"lex/httplex",
|
||||
]
|
||||
pruneopts = ""
|
||||
revision = "6078986fec03a1dcc236c34816c71b0e05018fda"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:b697592485cb412be4188c08ca0beed9aab87f36b86418e21acc4a3998f63734"
|
||||
name = "golang.org/x/oauth2"
|
||||
packages = [
|
||||
".",
|
||||
"internal",
|
||||
]
|
||||
pruneopts = ""
|
||||
revision = "d2e6202438beef2727060aa7cabdd924d92ebfd9"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:b2ea75de0ccb2db2ac79356407f8a4cd8f798fe15d41b381c00abf3ae8e55ed1"
|
||||
name = "golang.org/x/sync"
|
||||
packages = ["errgroup"]
|
||||
pruneopts = ""
|
||||
revision = "1d60e4601c6fd243af51cc01ddf169918a5407ca"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:6f4c646a3e6cc4e6871e30a611090361b0153fce20c2bc431b73e178d67f0e78"
|
||||
name = "golang.org/x/sys"
|
||||
packages = [
|
||||
"unix",
|
||||
"windows",
|
||||
]
|
||||
pruneopts = ""
|
||||
revision = "c488ab1dd8481ef762f96a79a9577c27825be697"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:5acd3512b047305d49e8763eef7ba423901e85d5dd2fd1e71778a0ea8de10bd4"
|
||||
name = "golang.org/x/text"
|
||||
packages = [
|
||||
"collate",
|
||||
"collate/build",
|
||||
"internal/colltab",
|
||||
"internal/gen",
|
||||
"internal/tag",
|
||||
"internal/triegen",
|
||||
"internal/ucd",
|
||||
"language",
|
||||
"secure/bidirule",
|
||||
"transform",
|
||||
"unicode/bidi",
|
||||
"unicode/cldr",
|
||||
"unicode/norm",
|
||||
"unicode/rangetable",
|
||||
"width",
|
||||
]
|
||||
pruneopts = ""
|
||||
revision = "f21a4dfb5e38f5895301dc265a8def02365cc3d0"
|
||||
version = "v0.3.0"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:55a681cb66f28755765fa5fa5104cbd8dc85c55c02d206f9f89566451e3fe1aa"
|
||||
name = "golang.org/x/time"
|
||||
packages = ["rate"]
|
||||
pruneopts = ""
|
||||
revision = "fbb02b2291d28baffd63558aa44b4b56f178d650"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:8c432632a230496c35a15cfdf441436f04c90e724ad99c8463ef0c82bbe93edb"
|
||||
name = "google.golang.org/appengine"
|
||||
packages = [
|
||||
"internal",
|
||||
"internal/base",
|
||||
"internal/datastore",
|
||||
"internal/log",
|
||||
"internal/remote_api",
|
||||
"internal/urlfetch",
|
||||
"urlfetch",
|
||||
]
|
||||
pruneopts = ""
|
||||
revision = "ae0ab99deb4dc413a2b4bd6c8bdd0eb67f1e4d06"
|
||||
version = "v1.2.0"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:15d017551627c8bb091bde628215b2861bed128855343fdd570c62d08871f6e1"
|
||||
name = "gopkg.in/alecthomas/kingpin.v2"
|
||||
packages = ["."]
|
||||
pruneopts = ""
|
||||
revision = "947dcec5ba9c011838740e680966fd7087a71d0d"
|
||||
version = "v2.2.6"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:e5d1fb981765b6f7513f793a3fcaac7158408cca77f75f7311ac82cc88e9c445"
|
||||
name = "gopkg.in/inf.v0"
|
||||
packages = ["."]
|
||||
pruneopts = ""
|
||||
revision = "3887ee99ecf07df5b447e9b00d9c0b2adaa9f3e4"
|
||||
version = "v0.9.0"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:5fe876313b07628905b2181e537faabe45032cb9c79c01b49b51c25a0a40040d"
|
||||
name = "gopkg.in/yaml.v2"
|
||||
packages = ["."]
|
||||
pruneopts = ""
|
||||
revision = "7f97868eec74b32b0982dd158a51a446d1da7eb5"
|
||||
version = "v2.1.1"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:5f076f6f9c3ac4f2b99d79dc7974eabd3f51be35254aa0d8c4cf920fdb9c7ff8"
|
||||
name = "k8s.io/api"
|
||||
packages = [
|
||||
"admissionregistration/v1alpha1",
|
||||
"admissionregistration/v1beta1",
|
||||
"apps/v1",
|
||||
"apps/v1beta1",
|
||||
"apps/v1beta2",
|
||||
"authentication/v1",
|
||||
"authentication/v1beta1",
|
||||
"authorization/v1",
|
||||
"authorization/v1beta1",
|
||||
"autoscaling/v1",
|
||||
"autoscaling/v2beta1",
|
||||
"autoscaling/v2beta2",
|
||||
"batch/v1",
|
||||
"batch/v1beta1",
|
||||
"batch/v2alpha1",
|
||||
"certificates/v1beta1",
|
||||
"coordination/v1beta1",
|
||||
"core/v1",
|
||||
"events/v1beta1",
|
||||
"extensions/v1beta1",
|
||||
"networking/v1",
|
||||
"policy/v1beta1",
|
||||
"rbac/v1",
|
||||
"rbac/v1alpha1",
|
||||
"rbac/v1beta1",
|
||||
"scheduling/v1alpha1",
|
||||
"scheduling/v1beta1",
|
||||
"settings/v1alpha1",
|
||||
"storage/v1",
|
||||
"storage/v1alpha1",
|
||||
"storage/v1beta1",
|
||||
]
|
||||
pruneopts = ""
|
||||
revision = "fd83cbc87e7632ccd8bbab63d2b673d4e0c631cc"
|
||||
version = "kubernetes-1.12.0"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:466583feeb1602ea9f19fef76e96b55c08c49ce88743a9d38c7726891ffe0436"
|
||||
name = "k8s.io/apiextensions-apiserver"
|
||||
packages = [
|
||||
"pkg/apis/apiextensions",
|
||||
"pkg/apis/apiextensions/v1beta1",
|
||||
"pkg/client/clientset/clientset",
|
||||
"pkg/client/clientset/clientset/scheme",
|
||||
"pkg/client/clientset/clientset/typed/apiextensions/v1beta1",
|
||||
]
|
||||
pruneopts = ""
|
||||
revision = "1748dfb29e8a4432b78514bc88a1b07937a9805a"
|
||||
version = "kubernetes-1.12.0"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:7aa037a4df5432be2820d164f378d7c22335e5cbba124e90e42114757ebd11ac"
|
||||
name = "k8s.io/apimachinery"
|
||||
packages = [
|
||||
"pkg/api/errors",
|
||||
"pkg/api/meta",
|
||||
"pkg/api/resource",
|
||||
"pkg/apis/meta/internalversion",
|
||||
"pkg/apis/meta/v1",
|
||||
"pkg/apis/meta/v1/unstructured",
|
||||
"pkg/apis/meta/v1beta1",
|
||||
"pkg/conversion",
|
||||
"pkg/conversion/queryparams",
|
||||
"pkg/fields",
|
||||
"pkg/labels",
|
||||
"pkg/runtime",
|
||||
"pkg/runtime/schema",
|
||||
"pkg/runtime/serializer",
|
||||
"pkg/runtime/serializer/json",
|
||||
"pkg/runtime/serializer/protobuf",
|
||||
"pkg/runtime/serializer/recognizer",
|
||||
"pkg/runtime/serializer/streaming",
|
||||
"pkg/runtime/serializer/versioning",
|
||||
"pkg/selection",
|
||||
"pkg/types",
|
||||
"pkg/util/cache",
|
||||
"pkg/util/clock",
|
||||
"pkg/util/diff",
|
||||
"pkg/util/errors",
|
||||
"pkg/util/framer",
|
||||
"pkg/util/intstr",
|
||||
"pkg/util/json",
|
||||
"pkg/util/naming",
|
||||
"pkg/util/net",
|
||||
"pkg/util/runtime",
|
||||
"pkg/util/sets",
|
||||
"pkg/util/validation",
|
||||
"pkg/util/validation/field",
|
||||
"pkg/util/wait",
|
||||
"pkg/util/yaml",
|
||||
"pkg/version",
|
||||
"pkg/watch",
|
||||
"third_party/forked/golang/reflect",
|
||||
]
|
||||
pruneopts = ""
|
||||
revision = "6dd46049f39503a1fc8d65de4bd566829e95faff"
|
||||
version = "kubernetes-1.12.0"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:5d4153d12c3aed2c90a94262520d2498d5afa4d692554af55e65a7c5af0bc399"
|
||||
name = "k8s.io/client-go"
|
||||
packages = [
|
||||
"deprecated-dynamic",
|
||||
"discovery",
|
||||
"dynamic",
|
||||
"kubernetes",
|
||||
"kubernetes/scheme",
|
||||
"kubernetes/typed/admissionregistration/v1alpha1",
|
||||
"kubernetes/typed/admissionregistration/v1beta1",
|
||||
"kubernetes/typed/apps/v1",
|
||||
"kubernetes/typed/apps/v1beta1",
|
||||
"kubernetes/typed/apps/v1beta2",
|
||||
"kubernetes/typed/authentication/v1",
|
||||
"kubernetes/typed/authentication/v1beta1",
|
||||
"kubernetes/typed/authorization/v1",
|
||||
"kubernetes/typed/authorization/v1beta1",
|
||||
"kubernetes/typed/autoscaling/v1",
|
||||
"kubernetes/typed/autoscaling/v2beta1",
|
||||
"kubernetes/typed/autoscaling/v2beta2",
|
||||
"kubernetes/typed/batch/v1",
|
||||
"kubernetes/typed/batch/v1beta1",
|
||||
"kubernetes/typed/batch/v2alpha1",
|
||||
"kubernetes/typed/certificates/v1beta1",
|
||||
"kubernetes/typed/coordination/v1beta1",
|
||||
"kubernetes/typed/core/v1",
|
||||
"kubernetes/typed/events/v1beta1",
|
||||
"kubernetes/typed/extensions/v1beta1",
|
||||
"kubernetes/typed/networking/v1",
|
||||
"kubernetes/typed/policy/v1beta1",
|
||||
"kubernetes/typed/rbac/v1",
|
||||
"kubernetes/typed/rbac/v1alpha1",
|
||||
"kubernetes/typed/rbac/v1beta1",
|
||||
"kubernetes/typed/scheduling/v1alpha1",
|
||||
"kubernetes/typed/scheduling/v1beta1",
|
||||
"kubernetes/typed/settings/v1alpha1",
|
||||
"kubernetes/typed/storage/v1",
|
||||
"kubernetes/typed/storage/v1alpha1",
|
||||
"kubernetes/typed/storage/v1beta1",
|
||||
"pkg/apis/clientauthentication",
|
||||
"pkg/apis/clientauthentication/v1alpha1",
|
||||
"pkg/apis/clientauthentication/v1beta1",
|
||||
"pkg/version",
|
||||
"plugin/pkg/client/auth/exec",
|
||||
"rest",
|
||||
"rest/watch",
|
||||
"tools/auth",
|
||||
"tools/cache",
|
||||
"tools/clientcmd",
|
||||
"tools/clientcmd/api",
|
||||
"tools/clientcmd/api/latest",
|
||||
"tools/clientcmd/api/v1",
|
||||
"tools/metrics",
|
||||
"tools/pager",
|
||||
"tools/reference",
|
||||
"transport",
|
||||
"util/buffer",
|
||||
"util/cert",
|
||||
"util/connrotation",
|
||||
"util/flowcontrol",
|
||||
"util/homedir",
|
||||
"util/integer",
|
||||
"util/retry",
|
||||
"util/workqueue",
|
||||
]
|
||||
pruneopts = ""
|
||||
revision = "1638f8970cefaa404ff3a62950f88b08292b2696"
|
||||
version = "v9.0.0"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:a2f78b8fd86be41f2aa77404245aed4f4f410ac3aabc5f3bd9bd1fcc09076c53"
|
||||
name = "k8s.io/kube-openapi"
|
||||
packages = ["pkg/common"]
|
||||
pruneopts = ""
|
||||
revision = "0cf8f7e6ed1d2e3d47d02e3b6e559369af24d803"
|
||||
|
||||
[solve-meta]
|
||||
analyzer-name = "dep"
|
||||
analyzer-version = 1
|
||||
input-imports = [
|
||||
"github.com/ant31/crd-validation/pkg",
|
||||
"github.com/blang/semver",
|
||||
"github.com/ghodss/yaml",
|
||||
"github.com/go-kit/kit/log",
|
||||
"github.com/go-kit/kit/log/level",
|
||||
"github.com/go-openapi/spec",
|
||||
"github.com/hashicorp/go-version",
|
||||
"github.com/improbable-eng/thanos/pkg/reloader",
|
||||
"github.com/kylelemons/godebug/pretty",
|
||||
"github.com/mitchellh/hashstructure",
|
||||
"github.com/oklog/run",
|
||||
"github.com/pkg/errors",
|
||||
"github.com/prometheus/client_golang/prometheus",
|
||||
"github.com/prometheus/client_golang/prometheus/promhttp",
|
||||
"github.com/stretchr/testify/require",
|
||||
"golang.org/x/sync/errgroup",
|
||||
"gopkg.in/alecthomas/kingpin.v2",
|
||||
"gopkg.in/yaml.v2",
|
||||
"k8s.io/api/apps/v1beta2",
|
||||
"k8s.io/api/core/v1",
|
||||
"k8s.io/api/extensions/v1beta1",
|
||||
"k8s.io/api/rbac/v1",
|
||||
"k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1",
|
||||
"k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset",
|
||||
"k8s.io/apimachinery/pkg/api/errors",
|
||||
"k8s.io/apimachinery/pkg/api/meta",
|
||||
"k8s.io/apimachinery/pkg/api/resource",
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1",
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured",
|
||||
"k8s.io/apimachinery/pkg/fields",
|
||||
"k8s.io/apimachinery/pkg/labels",
|
||||
"k8s.io/apimachinery/pkg/runtime",
|
||||
"k8s.io/apimachinery/pkg/runtime/schema",
|
||||
"k8s.io/apimachinery/pkg/runtime/serializer",
|
||||
"k8s.io/apimachinery/pkg/util/intstr",
|
||||
"k8s.io/apimachinery/pkg/util/runtime",
|
||||
"k8s.io/apimachinery/pkg/util/validation",
|
||||
"k8s.io/apimachinery/pkg/util/wait",
|
||||
"k8s.io/apimachinery/pkg/util/yaml",
|
||||
"k8s.io/apimachinery/pkg/watch",
|
||||
"k8s.io/client-go/deprecated-dynamic",
|
||||
"k8s.io/client-go/discovery",
|
||||
"k8s.io/client-go/kubernetes",
|
||||
"k8s.io/client-go/kubernetes/scheme",
|
||||
"k8s.io/client-go/kubernetes/typed/core/v1",
|
||||
"k8s.io/client-go/rest",
|
||||
"k8s.io/client-go/tools/cache",
|
||||
"k8s.io/client-go/tools/clientcmd",
|
||||
"k8s.io/client-go/util/flowcontrol",
|
||||
"k8s.io/client-go/util/workqueue",
|
||||
"k8s.io/kube-openapi/pkg/common",
|
||||
]
|
||||
solver-name = "gps-cdcl"
|
||||
solver-version = 1
|
73
vendor/github.com/coreos/prometheus-operator/Gopkg.toml
generated
vendored
Normal file
73
vendor/github.com/coreos/prometheus-operator/Gopkg.toml
generated
vendored
Normal file
@ -0,0 +1,73 @@
|
||||
# Gopkg.toml example
|
||||
#
|
||||
# Refer to https://github.com/golang/dep/blob/master/docs/Gopkg.toml.md
|
||||
# for detailed Gopkg.toml documentation.
|
||||
#
|
||||
# required = ["github.com/user/thing/cmd/thing"]
|
||||
# ignored = ["github.com/user/project/pkgX", "bitbucket.org/user/project/pkgA/pkgY"]
|
||||
#
|
||||
# [[constraint]]
|
||||
# name = "github.com/user/project"
|
||||
# version = "1.0.0"
|
||||
#
|
||||
# [[constraint]]
|
||||
# name = "github.com/user/project2"
|
||||
# branch = "dev"
|
||||
# source = "github.com/myfork/project2"
|
||||
#
|
||||
# [[override]]
|
||||
# name = "github.com/x/y"
|
||||
# version = "2.4.0"
|
||||
|
||||
|
||||
[[constraint]]
|
||||
branch = "master"
|
||||
name = "github.com/ant31/crd-validation"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/blang/semver"
|
||||
version = "3.5.1"
|
||||
|
||||
[[constraint]]
|
||||
branch = "master"
|
||||
name = "github.com/go-openapi/spec"
|
||||
|
||||
[[constraint]]
|
||||
name = "k8s.io/api"
|
||||
version = "kubernetes-1.12.0"
|
||||
|
||||
[[constraint]]
|
||||
name = "k8s.io/apiextensions-apiserver"
|
||||
version = "kubernetes-1.12.0"
|
||||
|
||||
[[constraint]]
|
||||
name = "k8s.io/apimachinery"
|
||||
version = "kubernetes-1.12.0"
|
||||
|
||||
[[constraint]]
|
||||
name = "k8s.io/client-go"
|
||||
version = "=v9.0.0"
|
||||
|
||||
[[constraint]]
|
||||
name = "k8s.io/kube-openapi"
|
||||
revision = "0cf8f7e6ed1d2e3d47d02e3b6e559369af24d803"
|
||||
|
||||
[[override]]
|
||||
name = "github.com/ericchiang/k8s"
|
||||
version = "v0.4.0"
|
||||
|
||||
[[constraint]]
|
||||
branch = "master"
|
||||
name = "github.com/kylelemons/godebug"
|
||||
|
||||
[[override]]
|
||||
name = "github.com/json-iterator/go"
|
||||
revision = "f2b4162afba35581b6d4a50d3b8f34e33c144682"
|
||||
|
||||
[[constraint]]
|
||||
branch = "master"
|
||||
name = "github.com/mitchellh/hashstructure"
|
||||
|
||||
[[constraint]]
|
||||
branch = "master"
|
||||
name = "github.com/prometheus/client_golang"
|
202
vendor/github.com/coreos/prometheus-operator/LICENSE
generated
vendored
Normal file
202
vendor/github.com/coreos/prometheus-operator/LICENSE
generated
vendored
Normal file
@ -0,0 +1,202 @@
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "{}"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright {yyyy} {name of copyright owner}
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
|
235
vendor/github.com/coreos/prometheus-operator/Makefile
generated
vendored
Normal file
235
vendor/github.com/coreos/prometheus-operator/Makefile
generated
vendored
Normal file
@ -0,0 +1,235 @@
|
||||
SHELL=/bin/bash -o pipefail
|
||||
|
||||
REPO?=quay.io/coreos/prometheus-operator
|
||||
REPO_PROMETHEUS_CONFIG_RELOADER?=quay.io/coreos/prometheus-config-reloader
|
||||
TAG?=$(shell git rev-parse --short HEAD)
|
||||
|
||||
PO_CRDGEN_BINARY:=$(GOPATH)/bin/po-crdgen
|
||||
OPENAPI_GEN_BINARY:=$(GOPATH)/bin/openapi-gen
|
||||
DEEPCOPY_GEN_BINARY:=$(GOPATH)/bin/deepcopy-gen
|
||||
GOJSONTOYAML_BINARY:=$(GOPATH)/bin/gojsontoyaml
|
||||
JB_BINARY:=$(GOPATH)/bin/jb
|
||||
PO_DOCGEN_BINARY:=$(GOPATH)/bin/po-docgen
|
||||
EMBEDMD_BINARY:=$(GOPATH)/bin/embedmd
|
||||
|
||||
GOLANG_FILES:=$(shell find . -name \*.go -print)
|
||||
pkgs = $(shell go list ./... | grep -v /vendor/ | grep -v /test/)
|
||||
|
||||
|
||||
.PHONY: all
|
||||
all: format generate build test
|
||||
|
||||
.PHONY: clean
|
||||
clean:
|
||||
# Remove all files and directories ignored by git.
|
||||
git clean -Xfd .
|
||||
|
||||
|
||||
############
|
||||
# Building #
|
||||
############
|
||||
|
||||
.PHONY: build
|
||||
build: operator prometheus-config-reloader
|
||||
|
||||
operator: $(GOLANG_FILES)
|
||||
GOOS=linux GOARCH=amd64 CGO_ENABLED=0 go build \
|
||||
-ldflags "-X github.com/coreos/prometheus-operator/pkg/version.Version=$(shell cat VERSION)" \
|
||||
-o $@ cmd/operator/main.go
|
||||
|
||||
prometheus-config-reloader:
|
||||
GOOS=linux GOARCH=amd64 CGO_ENABLED=0 go build \
|
||||
-ldflags "-X github.com/coreos/prometheus-operator/pkg/version.Version=$(shell cat VERSION)" \
|
||||
-o $@ cmd/$@/main.go
|
||||
|
||||
pkg/client/monitoring/v1/zz_generated.deepcopy.go: .header pkg/client/monitoring/v1/types.go $(DEEPCOPY_GEN_BINARY)
|
||||
$(DEEPCOPY_GEN_BINARY) \
|
||||
-i github.com/coreos/prometheus-operator/pkg/client/monitoring/v1 \
|
||||
--go-header-file="$(GOPATH)/src/github.com/coreos/prometheus-operator/.header" \
|
||||
-v=4 \
|
||||
--logtostderr \
|
||||
--bounding-dirs "github.com/coreos/prometheus-operator/pkg/client" \
|
||||
--output-file-base zz_generated.deepcopy
|
||||
go fmt pkg/client/monitoring/v1/zz_generated.deepcopy.go
|
||||
|
||||
pkg/client/monitoring/v1alpha1/zz_generated.deepcopy.go: $(DEEPCOPY_GEN_BINARY)
|
||||
$(DEEPCOPY_GEN_BINARY) \
|
||||
-i github.com/coreos/prometheus-operator/pkg/client/monitoring/v1alpha1 \
|
||||
--go-header-file="$(GOPATH)/src/github.com/coreos/prometheus-operator/.header" \
|
||||
-v=4 \
|
||||
--logtostderr \
|
||||
--bounding-dirs "github.com/coreos/prometheus-operator/pkg/client" \
|
||||
--output-file-base zz_generated.deepcopy
|
||||
go fmt pkg/client/monitoring/v1alpha1/zz_generated.deepcopy.go
|
||||
|
||||
.PHONY: image
|
||||
image: hack/operator-image hack/prometheus-config-reloader-image
|
||||
|
||||
hack/operator-image: Dockerfile operator
|
||||
# Create empty target file, for the sole purpose of recording when this target
|
||||
# was last executed via the last-modification timestamp on the file. See
|
||||
# https://www.gnu.org/software/make/manual/make.html#Empty-Targets
|
||||
docker build -t $(REPO):$(TAG) .
|
||||
touch $@
|
||||
|
||||
hack/prometheus-config-reloader-image: cmd/prometheus-config-reloader/Dockerfile prometheus-config-reloader
|
||||
# Create empty target file, for the sole purpose of recording when this target
|
||||
# was last executed via the last-modification timestamp on the file. See
|
||||
# https://www.gnu.org/software/make/manual/make.html#Empty-Targets
|
||||
docker build -t $(REPO_PROMETHEUS_CONFIG_RELOADER):$(TAG) -f cmd/prometheus-config-reloader/Dockerfile .
|
||||
touch $@
|
||||
|
||||
|
||||
##############
|
||||
# Generating #
|
||||
##############
|
||||
|
||||
.PHONY: generate
|
||||
generate: pkg/client/monitoring/v1/zz_generated.deepcopy.go pkg/client/monitoring/v1/openapi_generated.go $(shell find jsonnet/prometheus-operator/*-crd.libsonnet -type f) bundle.yaml kube-prometheus $(shell find Documentation -type f)
|
||||
|
||||
.PHONY: generate-in-docker
|
||||
generate-in-docker: hack/jsonnet-docker-image
|
||||
hack/generate-in-docker.sh $(MFLAGS) # MFLAGS are the parent make call's flags
|
||||
|
||||
.PHONY: kube-prometheus
|
||||
kube-prometheus:
|
||||
cd contrib/kube-prometheus && $(MAKE) $(MFLAGS) generate
|
||||
|
||||
example/prometheus-operator-crd/**.crd.yaml: pkg/client/monitoring/v1/openapi_generated.go $(PO_CRDGEN_BINARY)
|
||||
po-crdgen prometheus > example/prometheus-operator-crd/prometheus.crd.yaml
|
||||
po-crdgen alertmanager > example/prometheus-operator-crd/alertmanager.crd.yaml
|
||||
po-crdgen servicemonitor > example/prometheus-operator-crd/servicemonitor.crd.yaml
|
||||
po-crdgen prometheusrule > example/prometheus-operator-crd/prometheusrule.crd.yaml
|
||||
|
||||
jsonnet/prometheus-operator/**-crd.libsonnet: $(shell find example/prometheus-operator-crd/*.crd.yaml -type f) $(GOJSONTOYAML_BINARY)
|
||||
cat example/prometheus-operator-crd/alertmanager.crd.yaml | gojsontoyaml -yamltojson > jsonnet/prometheus-operator/alertmanager-crd.libsonnet
|
||||
cat example/prometheus-operator-crd/prometheus.crd.yaml | gojsontoyaml -yamltojson > jsonnet/prometheus-operator/prometheus-crd.libsonnet
|
||||
cat example/prometheus-operator-crd/servicemonitor.crd.yaml | gojsontoyaml -yamltojson > jsonnet/prometheus-operator/servicemonitor-crd.libsonnet
|
||||
cat example/prometheus-operator-crd/prometheusrule.crd.yaml | gojsontoyaml -yamltojson > jsonnet/prometheus-operator/prometheusrule-crd.libsonnet
|
||||
|
||||
pkg/client/monitoring/v1/openapi_generated.go: pkg/client/monitoring/v1/types.go $(OPENAPI_GEN_BINARY)
|
||||
$(OPENAPI_GEN_BINARY) \
|
||||
-i github.com/coreos/prometheus-operator/pkg/client/monitoring/v1,k8s.io/apimachinery/pkg/apis/meta/v1,k8s.io/api/core/v1 \
|
||||
-p github.com/coreos/prometheus-operator/pkg/client/monitoring/v1 \
|
||||
--go-header-file="$(GOPATH)/src/github.com/coreos/prometheus-operator/.header"
|
||||
go fmt pkg/client/monitoring/v1/openapi_generated.go
|
||||
|
||||
bundle.yaml: $(shell find example/rbac/prometheus-operator/*.yaml -type f)
|
||||
hack/generate-bundle.sh
|
||||
|
||||
hack/generate/vendor: $(JB_BINARY) $(shell find jsonnet/prometheus-operator -type f)
|
||||
cd hack/generate; $(JB_BINARY) install;
|
||||
|
||||
example/non-rbac/prometheus-operator.yaml: hack/generate/vendor hack/generate/prometheus-operator-non-rbac.jsonnet $(shell find jsonnet -type f)
|
||||
hack/generate/build-non-rbac-prometheus-operator.sh
|
||||
|
||||
RBAC_MANIFESTS = example/rbac/prometheus-operator/prometheus-operator-cluster-role.yaml example/rbac/prometheus-operator/prometheus-operator-cluster-role-binding.yaml example/rbac/prometheus-operator/prometheus-operator-service-account.yaml example/rbac/prometheus-operator/prometheus-operator-deployment.yaml
|
||||
$(RBAC_MANIFESTS): hack/generate/vendor hack/generate/prometheus-operator-rbac.jsonnet $(shell find jsonnet -type f)
|
||||
hack/generate/build-rbac-prometheus-operator.sh
|
||||
|
||||
jsonnet/prometheus-operator/prometheus-operator.libsonnet: VERSION
|
||||
sed -i \
|
||||
"s/prometheusOperator: 'v.*',/prometheusOperator: 'v$(shell cat VERSION)',/" \
|
||||
jsonnet/prometheus-operator/prometheus-operator.libsonnet;
|
||||
|
||||
FULLY_GENERATED_DOCS = Documentation/api.md Documentation/compatibility.md
|
||||
TO_BE_EXTENDED_DOCS = $(filter-out $(FULLY_GENERATED_DOCS), $(wildcard Documentation/*.md))
|
||||
|
||||
Documentation/api.md: $(PO_DOCGEN_BINARY) pkg/client/monitoring/v1/types.go
|
||||
$(PO_DOCGEN_BINARY) api pkg/client/monitoring/v1/types.go > $@
|
||||
|
||||
Documentation/compatibility.md: $(PO_DOCGEN_BINARY) pkg/prometheus/statefulset.go
|
||||
$(PO_DOCGEN_BINARY) compatibility > $@
|
||||
|
||||
$(TO_BE_EXTENDED_DOCS): $(EMBEDMD_BINARY) $(shell find example) kube-prometheus
|
||||
$(EMBEDMD_BINARY) -w `find Documentation -name "*.md" | grep -v vendor`
|
||||
|
||||
|
||||
##############
|
||||
# Formatting #
|
||||
##############
|
||||
|
||||
.PHONY: format
|
||||
format: go-fmt check-license shellcheck
|
||||
|
||||
.PHONY: go-fmt
|
||||
go-fmt:
|
||||
go fmt $(pkgs)
|
||||
|
||||
.PHONY: check-license
|
||||
check-license:
|
||||
./scripts/check_license.sh
|
||||
|
||||
.PHONY: shellcheck
|
||||
shellcheck:
|
||||
docker run -v "${PWD}:/mnt" koalaman/shellcheck:stable $(shell find . -type f -name "*.sh" -not -path "*vendor*")
|
||||
|
||||
|
||||
###########
|
||||
# Testing #
|
||||
###########
|
||||
|
||||
.PHONY: test
|
||||
test: test-unit test-e2e
|
||||
|
||||
.PHONY: test-unit
|
||||
test-unit:
|
||||
@go test $(TEST_RUN_ARGS) -short $(pkgs)
|
||||
|
||||
.PHONY: test-e2e
|
||||
test-e2e: KUBECONFIG?=$(HOME)/.kube/config
|
||||
test-e2e:
|
||||
go test -timeout 55m -v ./test/e2e/ $(TEST_RUN_ARGS) --kubeconfig=$(KUBECONFIG) --operator-image=$(REPO):$(TAG)
|
||||
|
||||
.PHONY: test-e2e-helm
|
||||
test-e2e-helm:
|
||||
./helm/hack/e2e-test.sh
|
||||
# package the chart and verify if they have the version bumped
|
||||
helm/hack/helm-package.sh "alertmanager grafana prometheus prometheus-operator exporter-kube-dns exporter-kube-scheduler exporter-kubelets exporter-node exporter-kube-controller-manager exporter-kube-etcd exporter-kube-state exporter-kubernetes exporter-coredns"
|
||||
helm/hack/sync-repo.sh false
|
||||
|
||||
|
||||
########
|
||||
# Misc #
|
||||
########
|
||||
|
||||
hack/jsonnet-docker-image: scripts/jsonnet/Dockerfile
|
||||
docker build -f scripts/jsonnet/Dockerfile -t po-jsonnet .
|
||||
touch $@
|
||||
|
||||
.PHONY: helm-sync-s3
|
||||
helm-sync-s3:
|
||||
helm/hack/helm-package.sh "alertmanager grafana prometheus prometheus-operator exporter-kube-dns exporter-kube-scheduler exporter-kubelets exporter-node exporter-kube-controller-manager exporter-kube-etcd exporter-kube-state exporter-kubernetes exporter-coredns"
|
||||
helm/hack/sync-repo.sh true
|
||||
helm/hack/helm-package.sh kube-prometheus
|
||||
helm/hack/sync-repo.sh true
|
||||
|
||||
|
||||
############
|
||||
# Binaries #
|
||||
############
|
||||
|
||||
$(EMBEDMD_BINARY):
|
||||
@go get github.com/campoy/embedmd
|
||||
|
||||
$(JB_BINARY):
|
||||
go get -u github.com/jsonnet-bundler/jsonnet-bundler/cmd/jb
|
||||
|
||||
$(PO_CRDGEN_BINARY): cmd/po-crdgen/main.go pkg/client/monitoring/v1/openapi_generated.go
|
||||
go install github.com/coreos/prometheus-operator/cmd/po-crdgen
|
||||
|
||||
$(PO_DOCGEN_BINARY): $(shell find cmd/po-docgen -type f) pkg/client/monitoring/v1/types.go
|
||||
go install github.com/coreos/prometheus-operator/cmd/po-docgen
|
||||
|
||||
$(OPENAPI_GEN_BINARY):
|
||||
go get -u -v -d k8s.io/code-generator/cmd/openapi-gen
|
||||
cd $(GOPATH)/src/k8s.io/code-generator; git checkout release-1.11
|
||||
go install k8s.io/code-generator/cmd/openapi-gen
|
||||
|
||||
$(DEEPCOPY_GEN_BINARY):
|
||||
go get -u -v -d k8s.io/code-generator/cmd/deepcopy-gen
|
||||
cd $(GOPATH)/src/k8s.io/code-generator; git checkout release-1.11
|
||||
go install k8s.io/code-generator/cmd/deepcopy-gen
|
||||
|
||||
$(GOJSONTOYAML_BINARY):
|
||||
go get -u github.com/brancz/gojsontoyaml
|
5
vendor/github.com/coreos/prometheus-operator/NOTICE
generated
vendored
Normal file
5
vendor/github.com/coreos/prometheus-operator/NOTICE
generated
vendored
Normal file
@ -0,0 +1,5 @@
|
||||
CoreOS Project
|
||||
Copyright 2015 CoreOS, Inc
|
||||
|
||||
This product includes software developed at CoreOS, Inc.
|
||||
(http://www.coreos.com/).
|
12
vendor/github.com/coreos/prometheus-operator/OWNERS
generated
vendored
Normal file
12
vendor/github.com/coreos/prometheus-operator/OWNERS
generated
vendored
Normal file
@ -0,0 +1,12 @@
|
||||
reviewers:
|
||||
- brancz
|
||||
- metalmatze
|
||||
- mxinden
|
||||
- s-urbaniak
|
||||
- squat
|
||||
approvers:
|
||||
- brancz
|
||||
- metalmatze
|
||||
- mxinden
|
||||
- s-urbaniak
|
||||
- squat
|
154
vendor/github.com/coreos/prometheus-operator/README.md
generated
vendored
Normal file
154
vendor/github.com/coreos/prometheus-operator/README.md
generated
vendored
Normal file
@ -0,0 +1,154 @@
|
||||
# Prometheus Operator
|
||||
[](https://travis-ci.org/coreos/prometheus-operator)
|
||||
[](https://goreportcard.com/report/coreos/prometheus-operator)
|
||||
|
||||
**Project status: *beta*** Not all planned features are completed. The API, spec, status and other user facing objects may change, but in a backward compatible way.
|
||||
|
||||
The Prometheus Operator for Kubernetes provides easy monitoring definitions for Kubernetes
|
||||
services and deployment and management of Prometheus instances.
|
||||
|
||||
Once installed, the Prometheus Operator provides the following features:
|
||||
|
||||
* **Create/Destroy**: Easily launch a Prometheus instance for your Kubernetes namespace,
|
||||
a specific application or team easily using the Operator.
|
||||
|
||||
* **Simple Configuration**: Configure the fundamentals of Prometheus like versions, persistence,
|
||||
retention policies, and replicas from a native Kubernetes resource.
|
||||
|
||||
* **Target Services via Labels**: Automatically generate monitoring target configurations based
|
||||
on familiar Kubernetes label queries; no need to learn a Prometheus specific configuration language.
|
||||
|
||||
For an introduction to the Prometheus Operator, see the initial [blog
|
||||
post](https://coreos.com/blog/the-prometheus-operator.html).
|
||||
|
||||
## Prometheus Operator vs. kube-prometheus
|
||||
|
||||
The Prometheus Operator makes the Prometheus configuration Kubernetes native
|
||||
and manages and operates Prometheus and Alertmanager clusters. It is a piece of
|
||||
the puzzle regarding full end-to-end monitoring.
|
||||
|
||||
[kube-prometheus](contrib/kube-prometheus) combines the Prometheus Operator
|
||||
with a collection of manifests to help getting started with monitoring
|
||||
Kubernetes itself and applications running on top of it.
|
||||
|
||||
## Prerequisites
|
||||
|
||||
Version `>=0.18.0` of the Prometheus Operator requires a Kubernetes
|
||||
cluster of version `>=1.8.0`. If you are just starting out with the
|
||||
Prometheus Operator, it is highly recommended to use the latest version.
|
||||
|
||||
If you have an older version of Kubernetes and the Prometheus Operator running,
|
||||
we recommend upgrading Kubernetes first and then the Prometheus Operator.
|
||||
|
||||
## CustomResourceDefinitions
|
||||
|
||||
The Operator acts on the following [custom resource definitions (CRDs)](https://kubernetes.io/docs/tasks/access-kubernetes-api/extend-api-custom-resource-definitions/):
|
||||
|
||||
* **`Prometheus`**, which defines a desired Prometheus deployment.
|
||||
The Operator ensures at all times that a deployment matching the resource definition is running.
|
||||
|
||||
* **`ServiceMonitor`**, which declaratively specifies how groups
|
||||
of services should be monitored. The Operator automatically generates Prometheus scrape configuration
|
||||
based on the definition.
|
||||
|
||||
* **`PrometheusRule`**, which defines a desired Prometheus rule file, which can
|
||||
be loaded by a Prometheus instance containing Prometheus alerting and
|
||||
recording rules.
|
||||
|
||||
* **`Alertmanager`**, which defines a desired Alertmanager deployment.
|
||||
The Operator ensures at all times that a deployment matching the resource definition is running.
|
||||
|
||||
To learn more about the CRDs introduced by the Prometheus Operator have a look
|
||||
at the [design doc](Documentation/design.md).
|
||||
|
||||
## Installation
|
||||
|
||||
Install the Operator inside a cluster by running the following command:
|
||||
|
||||
```sh
|
||||
kubectl apply -f bundle.yaml
|
||||
```
|
||||
|
||||
> Note: make sure to adapt the namespace in the ClusterRoleBinding if deploying in another namespace than the default namespace.
|
||||
|
||||
To run the Operator outside of a cluster:
|
||||
|
||||
```sh
|
||||
make
|
||||
hack/run-external.sh <kubectl cluster name>
|
||||
```
|
||||
|
||||
## Removal
|
||||
|
||||
To remove the operator and Prometheus, first delete any custom resources you created in each namespace. The
|
||||
operator will automatically shut down and remove Prometheus and Alertmanager pods, and associated configmaps.
|
||||
|
||||
```sh
|
||||
for n in $(kubectl get namespaces -o jsonpath={..metadata.name}); do
|
||||
kubectl delete --all --namespace=$n prometheus,servicemonitor,alertmanager
|
||||
done
|
||||
```
|
||||
|
||||
After a couple of minutes you can go ahead and remove the operator itself.
|
||||
|
||||
```sh
|
||||
kubectl delete -f bundle.yaml
|
||||
```
|
||||
|
||||
The operator automatically creates services in each namespace where you created a Prometheus or Alertmanager resources,
|
||||
and defines three custom resource definitions. You can clean these up now.
|
||||
|
||||
```sh
|
||||
for n in $(kubectl get namespaces -o jsonpath={..metadata.name}); do
|
||||
kubectl delete --ignore-not-found --namespace=$n service prometheus-operated alertmanager-operated
|
||||
done
|
||||
|
||||
kubectl delete --ignore-not-found customresourcedefinitions \
|
||||
prometheuses.monitoring.coreos.com \
|
||||
servicemonitors.monitoring.coreos.com \
|
||||
alertmanagers.monitoring.coreos.com
|
||||
```
|
||||
|
||||
## Development
|
||||
|
||||
### Prerequisites
|
||||
|
||||
- golang environment
|
||||
- docker (used for creating container images, etc.)
|
||||
- minikube (optional)
|
||||
|
||||
### Testing
|
||||
|
||||
> Ensure that you're running tests in the following path:
|
||||
> `$GOPATH/src/github.com/coreos/prometheus-operator` as tests expect paths to
|
||||
> match. If you're working from a fork, just add the forked repo as a remote and
|
||||
> pull against your local coreos checkout before running tests.
|
||||
|
||||
#### Running *unit tests*:
|
||||
|
||||
`make test-unit`
|
||||
|
||||
#### Running *end-to-end* tests on local minikube cluster:
|
||||
|
||||
1. `minikube start --kubernetes-version=v1.10.0 --memory=4096
|
||||
--extra-config=apiserver.Authorization.Mode=RBAC`
|
||||
2. `eval $(minikube docker-env) && make image` - build Prometheus Operator
|
||||
docker image on minikube's docker
|
||||
3. `make test-e2e`
|
||||
|
||||
## Contributing
|
||||
|
||||
Many files (documentation, manifests, ...) in this repository are
|
||||
auto-generated. E.g. `bundle.yaml` originates from the _Jsonnet_ files in
|
||||
`/jsonnet/prometheus-operator`. Before proposing a pull request:
|
||||
|
||||
1. Commit your changes.
|
||||
2. Run `make generate-in-docker`.
|
||||
3. Commit the generated changes.
|
||||
|
||||
|
||||
## Security
|
||||
|
||||
If you find a security vulnerability related to the Prometheus Operator, please
|
||||
do not report it by opening a GitHub issue, but instead please send an e-mail to
|
||||
the maintainers of the project found in the [OWNERS](OWNERS) file.
|
61
vendor/github.com/coreos/prometheus-operator/RELEASE.md
generated
vendored
Normal file
61
vendor/github.com/coreos/prometheus-operator/RELEASE.md
generated
vendored
Normal file
@ -0,0 +1,61 @@
|
||||
# How to cut a new release
|
||||
|
||||
> This guide is strongly based on the [Prometheus release instructions](https://github.com/prometheus/prometheus/wiki/HOWTO-cut-a-new-release).
|
||||
|
||||
## Branch management and versioning strategy
|
||||
|
||||
We use [Semantic Versioning](http://semver.org/).
|
||||
|
||||
We maintain a separate branch for each minor release, named `release-<major>.<minor>`, e.g. `release-1.1`, `release-2.0`.
|
||||
|
||||
The usual flow is to merge new features and changes into the master branch and to merge bug fixes into the latest release branch. Bug fixes are then merged into master from the latest release branch. The master branch should always contain all commits from the latest release branch.
|
||||
|
||||
If a bug fix got accidentally merged into master, cherry-pick commits have to be created in the latest release branch, which then have to be merged back into master. Try to avoid that situation.
|
||||
|
||||
Maintaining the release branches for older minor releases happens on a best effort basis.
|
||||
|
||||
## Prepare your release
|
||||
|
||||
For a patch release, work in the branch of the minor release you want to patch.
|
||||
|
||||
For a new major or minor release, create the corresponding release branch based on the master branch.
|
||||
|
||||
Bump the version in the `VERSION` file in the root of the repository. Once that's done, a number of files have to be re-generated, this is automated with the following make target:
|
||||
|
||||
```bash
|
||||
$ make generate
|
||||
```
|
||||
|
||||
Now that all version information has been updated, an entry for the new version can be added to the `CHANGELOG.md` file.
|
||||
|
||||
Entries in the `CHANGELOG.md` are meant to be in this order:
|
||||
|
||||
* `[CHANGE]`
|
||||
* `[FEATURE]`
|
||||
* `[ENHANCEMENT]`
|
||||
* `[BUGFIX]`
|
||||
|
||||
Create a PR for the version and changelog changes to be reviewed.
|
||||
|
||||
## Draft the new release
|
||||
|
||||
Once the PR for the new release has been merged, make sure there is a release branch for the respective release. For new minor releases create the `release-<major>.<minor>` branch, for patch releases, merge the master branch into the existing release branch. Should the release be a patch release for an older minor release, cherry-pick the respective changes.
|
||||
|
||||
Push the new or updated release branch to the upstream repository.
|
||||
|
||||
Tag the new release with a tag named `v<major>.<minor>.<patch>`, e.g. `v2.1.3`. Note the `v` prefix.
|
||||
|
||||
You can do the tagging on the commandline:
|
||||
|
||||
```bash
|
||||
$ tag=$(< VERSION) && git tag -s "v${tag}" -m "v${tag}"
|
||||
$ git push --tags
|
||||
```
|
||||
|
||||
Signed tag with a GPG key is appreciated, but in case you can't add a GPG key to your Github account using the following [procedure](https://help.github.com/articles/generating-a-gpg-key/), you can replace the `-s` flag by `-a` flag of the `git tag` command to only annotate the tag without signing.
|
||||
|
||||
Our CI pipeline will automatically push a new docker image to quay.io.
|
||||
|
||||
Go to https://github.com/coreos/prometheus-operator/releases/new, associate the new release with the before pushed tag, paste in changes made to `CHANGELOG.md` and click "Publish release".
|
||||
|
||||
Take a breath. You're done releasing.
|
1
vendor/github.com/coreos/prometheus-operator/VERSION
generated
vendored
Normal file
1
vendor/github.com/coreos/prometheus-operator/VERSION
generated
vendored
Normal file
@ -0,0 +1 @@
|
||||
0.25.0
|
130
vendor/github.com/coreos/prometheus-operator/bundle.yaml
generated
vendored
Normal file
130
vendor/github.com/coreos/prometheus-operator/bundle.yaml
generated
vendored
Normal file
@ -0,0 +1,130 @@
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: prometheus-operator
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: prometheus-operator
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: prometheus-operator
|
||||
namespace: default
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: prometheus-operator
|
||||
rules:
|
||||
- apiGroups:
|
||||
- apiextensions.k8s.io
|
||||
resources:
|
||||
- customresourcedefinitions
|
||||
verbs:
|
||||
- '*'
|
||||
- apiGroups:
|
||||
- monitoring.coreos.com
|
||||
resources:
|
||||
- alertmanagers
|
||||
- prometheuses
|
||||
- prometheuses/finalizers
|
||||
- alertmanagers/finalizers
|
||||
- servicemonitors
|
||||
- prometheusrules
|
||||
verbs:
|
||||
- '*'
|
||||
- apiGroups:
|
||||
- apps
|
||||
resources:
|
||||
- statefulsets
|
||||
verbs:
|
||||
- '*'
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- configmaps
|
||||
- secrets
|
||||
verbs:
|
||||
- '*'
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- pods
|
||||
verbs:
|
||||
- list
|
||||
- delete
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- services
|
||||
- endpoints
|
||||
verbs:
|
||||
- get
|
||||
- create
|
||||
- update
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- nodes
|
||||
verbs:
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- namespaces
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
---
|
||||
apiVersion: apps/v1beta2
|
||||
kind: Deployment
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: prometheus-operator
|
||||
name: prometheus-operator
|
||||
namespace: default
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
k8s-app: prometheus-operator
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: prometheus-operator
|
||||
spec:
|
||||
containers:
|
||||
- args:
|
||||
- --kubelet-service=kube-system/kubelet
|
||||
- --logtostderr=true
|
||||
- --config-reloader-image=quay.io/coreos/configmap-reload:v0.0.1
|
||||
- --prometheus-config-reloader=quay.io/coreos/prometheus-config-reloader:v0.25.0
|
||||
image: quay.io/coreos/prometheus-operator:v0.25.0
|
||||
name: prometheus-operator
|
||||
ports:
|
||||
- containerPort: 8080
|
||||
name: http
|
||||
resources:
|
||||
limits:
|
||||
cpu: 200m
|
||||
memory: 200Mi
|
||||
requests:
|
||||
cpu: 100m
|
||||
memory: 100Mi
|
||||
securityContext:
|
||||
allowPrivilegeEscalation: false
|
||||
readOnlyRootFilesystem: true
|
||||
nodeSelector:
|
||||
beta.kubernetes.io/os: linux
|
||||
securityContext:
|
||||
runAsNonRoot: true
|
||||
runAsUser: 65534
|
||||
serviceAccountName: prometheus-operator
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: prometheus-operator
|
||||
namespace: default
|
61
vendor/github.com/coreos/prometheus-operator/code-of-conduct.md
generated
vendored
Normal file
61
vendor/github.com/coreos/prometheus-operator/code-of-conduct.md
generated
vendored
Normal file
@ -0,0 +1,61 @@
|
||||
## CoreOS Community Code of Conduct
|
||||
|
||||
### Contributor Code of Conduct
|
||||
|
||||
As contributors and maintainers of this project, and in the interest of
|
||||
fostering an open and welcoming community, we pledge to respect all people who
|
||||
contribute through reporting issues, posting feature requests, updating
|
||||
documentation, submitting pull requests or patches, and other activities.
|
||||
|
||||
We are committed to making participation in this project a harassment-free
|
||||
experience for everyone, regardless of level of experience, gender, gender
|
||||
identity and expression, sexual orientation, disability, personal appearance,
|
||||
body size, race, ethnicity, age, religion, or nationality.
|
||||
|
||||
Examples of unacceptable behavior by participants include:
|
||||
|
||||
* The use of sexualized language or imagery
|
||||
* Personal attacks
|
||||
* Trolling or insulting/derogatory comments
|
||||
* Public or private harassment
|
||||
* Publishing others' private information, such as physical or electronic addresses, without explicit permission
|
||||
* Other unethical or unprofessional conduct.
|
||||
|
||||
Project maintainers have the right and responsibility to remove, edit, or
|
||||
reject comments, commits, code, wiki edits, issues, and other contributions
|
||||
that are not aligned to this Code of Conduct. By adopting this Code of Conduct,
|
||||
project maintainers commit themselves to fairly and consistently applying these
|
||||
principles to every aspect of managing this project. Project maintainers who do
|
||||
not follow or enforce the Code of Conduct may be permanently removed from the
|
||||
project team.
|
||||
|
||||
This code of conduct applies both within project spaces and in public spaces
|
||||
when an individual is representing the project or its community.
|
||||
|
||||
Instances of abusive, harassing, or otherwise unacceptable behavior may be
|
||||
reported by contacting a project maintainer, Brandon Philips
|
||||
<brandon.philips@coreos.com>, and/or Rithu John <rithu.john@coreos.com>.
|
||||
|
||||
This Code of Conduct is adapted from the Contributor Covenant
|
||||
(http://contributor-covenant.org), version 1.2.0, available at
|
||||
http://contributor-covenant.org/version/1/2/0/
|
||||
|
||||
### CoreOS Events Code of Conduct
|
||||
|
||||
CoreOS events are working conferences intended for professional networking and
|
||||
collaboration in the CoreOS community. Attendees are expected to behave
|
||||
according to professional standards and in accordance with their employer’s
|
||||
policies on appropriate workplace behavior.
|
||||
|
||||
While at CoreOS events or related social networking opportunities, attendees
|
||||
should not engage in discriminatory or offensive speech or actions including
|
||||
but not limited to gender, sexuality, race, age, disability, or religion.
|
||||
Speakers should be especially aware of these concerns.
|
||||
|
||||
CoreOS does not condone any statements by speakers contrary to these standards.
|
||||
CoreOS reserves the right to deny entrance and/or eject from an event (without
|
||||
refund) any individual found to be engaging in discriminatory or offensive
|
||||
speech or actions.
|
||||
|
||||
Please bring any concerns to the immediate attention of designated on-site
|
||||
staff, Brandon Philips <brandon.philips@coreos.com>, and/or Rithu John <rithu.john@coreos.com>.
|
216
vendor/github.com/coreos/prometheus-operator/pkg/client/monitoring/v1/alertmanager.go
generated
vendored
Normal file
216
vendor/github.com/coreos/prometheus-operator/pkg/client/monitoring/v1/alertmanager.go
generated
vendored
Normal file
@ -0,0 +1,216 @@
|
||||
// Copyright 2016 The prometheus-operator Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package v1
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/watch"
|
||||
dynamic "k8s.io/client-go/deprecated-dynamic"
|
||||
"k8s.io/client-go/rest"
|
||||
)
|
||||
|
||||
const (
|
||||
AlertmanagersKind = "Alertmanager"
|
||||
AlertmanagerName = "alertmanagers"
|
||||
)
|
||||
|
||||
type AlertmanagersGetter interface {
|
||||
Alertmanagers(namespace string) AlertmanagerInterface
|
||||
}
|
||||
|
||||
var _ AlertmanagerInterface = &alertmanagers{}
|
||||
|
||||
type AlertmanagerInterface interface {
|
||||
Create(*Alertmanager) (*Alertmanager, error)
|
||||
Get(name string, opts metav1.GetOptions) (*Alertmanager, error)
|
||||
Update(*Alertmanager) (*Alertmanager, error)
|
||||
Delete(name string, options *metav1.DeleteOptions) error
|
||||
List(opts metav1.ListOptions) (runtime.Object, error)
|
||||
Watch(opts metav1.ListOptions) (watch.Interface, error)
|
||||
DeleteCollection(dopts *metav1.DeleteOptions, lopts metav1.ListOptions) error
|
||||
}
|
||||
|
||||
type alertmanagers struct {
|
||||
restClient rest.Interface
|
||||
client dynamic.ResourceInterface
|
||||
crdKind CrdKind
|
||||
ns string
|
||||
}
|
||||
|
||||
func newAlertmanagers(r rest.Interface, c *dynamic.Client, crdKind CrdKind, namespace string) *alertmanagers {
|
||||
return &alertmanagers{
|
||||
restClient: r,
|
||||
client: c.Resource(
|
||||
&metav1.APIResource{
|
||||
Kind: crdKind.Kind,
|
||||
Name: crdKind.Plural,
|
||||
Namespaced: true,
|
||||
},
|
||||
namespace,
|
||||
),
|
||||
crdKind: crdKind,
|
||||
ns: namespace,
|
||||
}
|
||||
}
|
||||
|
||||
func (a *alertmanagers) Create(o *Alertmanager) (*Alertmanager, error) {
|
||||
ua, err := UnstructuredFromAlertmanager(o)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
ua, err = a.client.Create(ua)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return AlertmanagerFromUnstructured(ua)
|
||||
}
|
||||
|
||||
func (a *alertmanagers) Get(name string, opts metav1.GetOptions) (*Alertmanager, error) {
|
||||
obj, err := a.client.Get(name, opts)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return AlertmanagerFromUnstructured(obj)
|
||||
}
|
||||
|
||||
func (a *alertmanagers) Update(o *Alertmanager) (*Alertmanager, error) {
|
||||
ua, err := UnstructuredFromAlertmanager(o)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
cura, err := a.Get(o.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "unable to get current version for update")
|
||||
}
|
||||
ua.SetResourceVersion(cura.ObjectMeta.ResourceVersion)
|
||||
|
||||
ua, err = a.client.Update(ua)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return AlertmanagerFromUnstructured(ua)
|
||||
}
|
||||
|
||||
func (a *alertmanagers) Delete(name string, options *metav1.DeleteOptions) error {
|
||||
return a.client.Delete(name, options)
|
||||
}
|
||||
|
||||
func (a *alertmanagers) List(opts metav1.ListOptions) (runtime.Object, error) {
|
||||
req := a.restClient.Get().
|
||||
Namespace(a.ns).
|
||||
Resource(a.crdKind.Plural)
|
||||
|
||||
b, err := req.DoRaw()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var p AlertmanagerList
|
||||
return &p, json.Unmarshal(b, &p)
|
||||
}
|
||||
|
||||
func (a *alertmanagers) Watch(opts metav1.ListOptions) (watch.Interface, error) {
|
||||
r, err := a.restClient.Get().
|
||||
Prefix("watch").
|
||||
Namespace(a.ns).
|
||||
Resource(a.crdKind.Plural).
|
||||
Stream()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return watch.NewStreamWatcher(&alertmanagerDecoder{
|
||||
dec: json.NewDecoder(r),
|
||||
close: r.Close,
|
||||
}), nil
|
||||
|
||||
}
|
||||
|
||||
func (a *alertmanagers) DeleteCollection(dopts *metav1.DeleteOptions, lopts metav1.ListOptions) error {
|
||||
return a.client.DeleteCollection(dopts, lopts)
|
||||
}
|
||||
|
||||
// AlertmanagerFromUnstructured unmarshals an Alertmanager object from dynamic client's unstructured
|
||||
func AlertmanagerFromUnstructured(r *unstructured.Unstructured) (*Alertmanager, error) {
|
||||
b, err := json.Marshal(r.Object)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var a Alertmanager
|
||||
if err := json.Unmarshal(b, &a); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
a.TypeMeta.Kind = AlertmanagersKind
|
||||
a.TypeMeta.APIVersion = Group + "/" + Version
|
||||
return &a, nil
|
||||
}
|
||||
|
||||
// UnstructuredFromAlertmanager marshals an Alertmanager object into dynamic client's unstructured
|
||||
func UnstructuredFromAlertmanager(a *Alertmanager) (*unstructured.Unstructured, error) {
|
||||
a.TypeMeta.Kind = AlertmanagersKind
|
||||
a.TypeMeta.APIVersion = Group + "/" + Version
|
||||
b, err := json.Marshal(a)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var r unstructured.Unstructured
|
||||
if err := json.Unmarshal(b, &r.Object); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// Value-type timestamp fields like ObjectMeta.CreationTimestamp with a zero
|
||||
// value are marshalled as "null" in JSON (rather than omitted) and then
|
||||
// unmarshalled into Unstructured with the key intact and a null value (rather
|
||||
// than being omitted); the net effect is the resulting structs can't be used
|
||||
// to issue a POST because creationTimestamp=null is sent to the server and
|
||||
// fails validation. For example, passing an Alertmanager with a
|
||||
// volumeClaimTemplate can result in an invalid object. This hack simply
|
||||
// removes such timestamp fields manually.
|
||||
//
|
||||
// TODO: reevaluate the use of Unstructured directly here in the context of
|
||||
// the latest dynamic client capabilities; this manual conversion may not be
|
||||
// necessary anymore.
|
||||
unstructured.RemoveNestedField(r.Object, "metadata", "creationTimestamp")
|
||||
unstructured.RemoveNestedField(r.Object, "spec", "storage", "volumeClaimTemplate", "metadata", "creationTimestamp")
|
||||
unstructured.RemoveNestedField(r.Object, "spec", "podMetadata", "creationTimestamp")
|
||||
return &r, nil
|
||||
}
|
||||
|
||||
type alertmanagerDecoder struct {
|
||||
dec *json.Decoder
|
||||
close func() error
|
||||
}
|
||||
|
||||
func (d *alertmanagerDecoder) Close() {
|
||||
d.close()
|
||||
}
|
||||
|
||||
func (d *alertmanagerDecoder) Decode() (action watch.EventType, object runtime.Object, err error) {
|
||||
var e struct {
|
||||
Type watch.EventType
|
||||
Object Alertmanager
|
||||
}
|
||||
if err := d.dec.Decode(&e); err != nil {
|
||||
return watch.Error, nil, err
|
||||
}
|
||||
return e.Type, &e.Object, nil
|
||||
}
|
161
vendor/github.com/coreos/prometheus-operator/pkg/client/monitoring/v1/client.go
generated
vendored
Normal file
161
vendor/github.com/coreos/prometheus-operator/pkg/client/monitoring/v1/client.go
generated
vendored
Normal file
@ -0,0 +1,161 @@
|
||||
// Copyright 2016 The prometheus-operator Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package v1
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/apimachinery/pkg/runtime/serializer"
|
||||
dynamic "k8s.io/client-go/deprecated-dynamic"
|
||||
"k8s.io/client-go/kubernetes/scheme"
|
||||
"k8s.io/client-go/rest"
|
||||
)
|
||||
|
||||
const (
|
||||
Group = "monitoring.coreos.com"
|
||||
PrometheusKindKey = "prometheus"
|
||||
AlertManagerKindKey = "alertmanager"
|
||||
ServiceMonitorKindKey = "servicemonitor"
|
||||
PrometheusRuleKindKey = "prometheusrule"
|
||||
)
|
||||
|
||||
type CrdKind struct {
|
||||
Kind string
|
||||
Plural string
|
||||
SpecName string
|
||||
}
|
||||
|
||||
type CrdKinds struct {
|
||||
KindsString string
|
||||
Prometheus CrdKind
|
||||
Alertmanager CrdKind
|
||||
ServiceMonitor CrdKind
|
||||
PrometheusRule CrdKind
|
||||
}
|
||||
|
||||
var DefaultCrdKinds = CrdKinds{
|
||||
KindsString: "",
|
||||
Prometheus: CrdKind{Plural: PrometheusName, Kind: PrometheusesKind, SpecName: "github.com/coreos/prometheus-operator/pkg/client/monitoring/v1.Prometheus"},
|
||||
ServiceMonitor: CrdKind{Plural: ServiceMonitorName, Kind: ServiceMonitorsKind, SpecName: "github.com/coreos/prometheus-operator/pkg/client/monitoring/v1.ServiceMonitor"},
|
||||
Alertmanager: CrdKind{Plural: AlertmanagerName, Kind: AlertmanagersKind, SpecName: "github.com/coreos/prometheus-operator/pkg/client/monitoring/v1.Alertmanager"},
|
||||
PrometheusRule: CrdKind{Plural: PrometheusRuleName, Kind: PrometheusRuleKind, SpecName: "github.com/coreos/prometheus-operator/pkg/client/monitoring/v1.PrometheusRule"},
|
||||
}
|
||||
|
||||
// Implement the flag.Value interface
|
||||
func (crdkinds *CrdKinds) String() string {
|
||||
return crdkinds.KindsString
|
||||
}
|
||||
|
||||
// Set Implement the flag.Set interface
|
||||
func (crdkinds *CrdKinds) Set(value string) error {
|
||||
*crdkinds = DefaultCrdKinds
|
||||
if value == "" {
|
||||
value = fmt.Sprintf("%s=%s:%s,%s=%s:%s,%s=%s:%s,%s=%s:%s",
|
||||
PrometheusKindKey, PrometheusesKind, PrometheusName,
|
||||
AlertManagerKindKey, AlertmanagersKind, AlertmanagerName,
|
||||
ServiceMonitorKindKey, ServiceMonitorsKind, ServiceMonitorName,
|
||||
PrometheusRuleKindKey, PrometheusRuleKind, PrometheusRuleName,
|
||||
)
|
||||
}
|
||||
splited := strings.Split(value, ",")
|
||||
for _, pair := range splited {
|
||||
sp := strings.Split(pair, "=")
|
||||
kind := strings.Split(sp[1], ":")
|
||||
crdKind := CrdKind{Plural: kind[1], Kind: kind[0]}
|
||||
switch kindKey := sp[0]; kindKey {
|
||||
case PrometheusKindKey:
|
||||
(*crdkinds).Prometheus = crdKind
|
||||
case ServiceMonitorKindKey:
|
||||
(*crdkinds).ServiceMonitor = crdKind
|
||||
case AlertManagerKindKey:
|
||||
(*crdkinds).Alertmanager = crdKind
|
||||
case PrometheusRuleKindKey:
|
||||
(*crdkinds).PrometheusRule = crdKind
|
||||
default:
|
||||
fmt.Printf("Warning: unknown kind: %s... ignoring", kindKey)
|
||||
}
|
||||
|
||||
}
|
||||
(*crdkinds).KindsString = value
|
||||
return nil
|
||||
}
|
||||
|
||||
var Version = "v1"
|
||||
|
||||
type MonitoringV1Interface interface {
|
||||
RESTClient() rest.Interface
|
||||
PrometheusesGetter
|
||||
AlertmanagersGetter
|
||||
ServiceMonitorsGetter
|
||||
PrometheusRulesGetter
|
||||
}
|
||||
|
||||
// +k8s:deepcopy-gen=false
|
||||
type MonitoringV1Client struct {
|
||||
restClient rest.Interface
|
||||
dynamicClient *dynamic.Client
|
||||
crdKinds *CrdKinds
|
||||
}
|
||||
|
||||
func (c *MonitoringV1Client) Prometheuses(namespace string) PrometheusInterface {
|
||||
return newPrometheuses(c.restClient, c.dynamicClient, c.crdKinds.Prometheus, namespace)
|
||||
}
|
||||
|
||||
func (c *MonitoringV1Client) Alertmanagers(namespace string) AlertmanagerInterface {
|
||||
return newAlertmanagers(c.restClient, c.dynamicClient, c.crdKinds.Alertmanager, namespace)
|
||||
}
|
||||
|
||||
func (c *MonitoringV1Client) ServiceMonitors(namespace string) ServiceMonitorInterface {
|
||||
return newServiceMonitors(c.restClient, c.dynamicClient, c.crdKinds.ServiceMonitor, namespace)
|
||||
}
|
||||
|
||||
func (c *MonitoringV1Client) PrometheusRules(namespace string) PrometheusRuleInterface {
|
||||
return newPrometheusRules(c.restClient, c.dynamicClient, c.crdKinds.PrometheusRule, namespace)
|
||||
}
|
||||
|
||||
func (c *MonitoringV1Client) RESTClient() rest.Interface {
|
||||
return c.restClient
|
||||
}
|
||||
|
||||
func NewForConfig(crdKinds *CrdKinds, apiGroup string, c *rest.Config) (*MonitoringV1Client, error) {
|
||||
config := *c
|
||||
SetConfigDefaults(apiGroup, &config)
|
||||
client, err := rest.RESTClientFor(&config)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
dynamicClient, err := dynamic.NewClient(&config, schema.GroupVersion{
|
||||
Group: apiGroup,
|
||||
Version: Version,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &MonitoringV1Client{client, dynamicClient, crdKinds}, nil
|
||||
}
|
||||
|
||||
func SetConfigDefaults(apiGroup string, config *rest.Config) {
|
||||
config.GroupVersion = &schema.GroupVersion{
|
||||
Group: apiGroup,
|
||||
Version: Version,
|
||||
}
|
||||
config.APIPath = "/apis"
|
||||
config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs}
|
||||
return
|
||||
}
|
17
vendor/github.com/coreos/prometheus-operator/pkg/client/monitoring/v1/doc.go
generated
vendored
Normal file
17
vendor/github.com/coreos/prometheus-operator/pkg/client/monitoring/v1/doc.go
generated
vendored
Normal file
@ -0,0 +1,17 @@
|
||||
// Copyright 2017 The prometheus-operator Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// +k8s:deepcopy-gen=package
|
||||
|
||||
package v1
|
13759
vendor/github.com/coreos/prometheus-operator/pkg/client/monitoring/v1/openapi_generated.go
generated
vendored
Normal file
13759
vendor/github.com/coreos/prometheus-operator/pkg/client/monitoring/v1/openapi_generated.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
215
vendor/github.com/coreos/prometheus-operator/pkg/client/monitoring/v1/prometheus.go
generated
vendored
Normal file
215
vendor/github.com/coreos/prometheus-operator/pkg/client/monitoring/v1/prometheus.go
generated
vendored
Normal file
@ -0,0 +1,215 @@
|
||||
// Copyright 2016 The prometheus-operator Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package v1
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/watch"
|
||||
dynamic "k8s.io/client-go/deprecated-dynamic"
|
||||
"k8s.io/client-go/rest"
|
||||
)
|
||||
|
||||
const (
|
||||
PrometheusesKind = "Prometheus"
|
||||
PrometheusName = "prometheuses"
|
||||
)
|
||||
|
||||
type PrometheusesGetter interface {
|
||||
Prometheuses(namespace string) PrometheusInterface
|
||||
}
|
||||
|
||||
var _ PrometheusInterface = &prometheuses{}
|
||||
|
||||
type PrometheusInterface interface {
|
||||
Create(*Prometheus) (*Prometheus, error)
|
||||
Get(name string, opts metav1.GetOptions) (*Prometheus, error)
|
||||
Update(*Prometheus) (*Prometheus, error)
|
||||
Delete(name string, options *metav1.DeleteOptions) error
|
||||
List(opts metav1.ListOptions) (runtime.Object, error)
|
||||
Watch(opts metav1.ListOptions) (watch.Interface, error)
|
||||
DeleteCollection(dopts *metav1.DeleteOptions, lopts metav1.ListOptions) error
|
||||
}
|
||||
|
||||
type prometheuses struct {
|
||||
restClient rest.Interface
|
||||
client dynamic.ResourceInterface
|
||||
crdKind CrdKind
|
||||
ns string
|
||||
}
|
||||
|
||||
func newPrometheuses(r rest.Interface, c *dynamic.Client, crdKind CrdKind, namespace string) *prometheuses {
|
||||
return &prometheuses{
|
||||
restClient: r,
|
||||
client: c.Resource(
|
||||
&metav1.APIResource{
|
||||
Kind: crdKind.Kind,
|
||||
Name: crdKind.Plural,
|
||||
Namespaced: true,
|
||||
},
|
||||
namespace,
|
||||
),
|
||||
crdKind: crdKind,
|
||||
ns: namespace,
|
||||
}
|
||||
}
|
||||
|
||||
func (p *prometheuses) Create(o *Prometheus) (*Prometheus, error) {
|
||||
up, err := UnstructuredFromPrometheus(o)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
up, err = p.client.Create(up)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return PrometheusFromUnstructured(up)
|
||||
}
|
||||
|
||||
func (p *prometheuses) Get(name string, opts metav1.GetOptions) (*Prometheus, error) {
|
||||
obj, err := p.client.Get(name, opts)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return PrometheusFromUnstructured(obj)
|
||||
}
|
||||
|
||||
func (p *prometheuses) Update(o *Prometheus) (*Prometheus, error) {
|
||||
up, err := UnstructuredFromPrometheus(o)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
curp, err := p.Get(o.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "unable to get current version for update")
|
||||
}
|
||||
up.SetResourceVersion(curp.ObjectMeta.ResourceVersion)
|
||||
|
||||
up, err = p.client.Update(up)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return PrometheusFromUnstructured(up)
|
||||
}
|
||||
|
||||
func (p *prometheuses) Delete(name string, options *metav1.DeleteOptions) error {
|
||||
return p.client.Delete(name, options)
|
||||
}
|
||||
|
||||
func (p *prometheuses) List(opts metav1.ListOptions) (runtime.Object, error) {
|
||||
req := p.restClient.Get().
|
||||
Namespace(p.ns).
|
||||
Resource(p.crdKind.Plural)
|
||||
|
||||
b, err := req.DoRaw()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var prom PrometheusList
|
||||
return &prom, json.Unmarshal(b, &prom)
|
||||
}
|
||||
|
||||
func (p *prometheuses) Watch(opts metav1.ListOptions) (watch.Interface, error) {
|
||||
r, err := p.restClient.Get().
|
||||
Prefix("watch").
|
||||
Namespace(p.ns).
|
||||
Resource(p.crdKind.Plural).
|
||||
Stream()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return watch.NewStreamWatcher(&prometheusDecoder{
|
||||
dec: json.NewDecoder(r),
|
||||
close: r.Close,
|
||||
}), nil
|
||||
}
|
||||
|
||||
func (p *prometheuses) DeleteCollection(dopts *metav1.DeleteOptions, lopts metav1.ListOptions) error {
|
||||
return p.client.DeleteCollection(dopts, lopts)
|
||||
}
|
||||
|
||||
// PrometheusFromUnstructured unmarshals a Prometheus object from dynamic client's unstructured
|
||||
func PrometheusFromUnstructured(r *unstructured.Unstructured) (*Prometheus, error) {
|
||||
b, err := json.Marshal(r.Object)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var p Prometheus
|
||||
if err := json.Unmarshal(b, &p); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
p.TypeMeta.Kind = PrometheusesKind
|
||||
p.TypeMeta.APIVersion = Group + "/" + Version
|
||||
return &p, nil
|
||||
}
|
||||
|
||||
// UnstructuredFromPrometheus marshals a Prometheus object into dynamic client's unstructured
|
||||
func UnstructuredFromPrometheus(p *Prometheus) (*unstructured.Unstructured, error) {
|
||||
p.TypeMeta.Kind = PrometheusesKind
|
||||
p.TypeMeta.APIVersion = Group + "/" + Version
|
||||
b, err := json.Marshal(p)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var r unstructured.Unstructured
|
||||
if err := json.Unmarshal(b, &r.Object); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// Value-type timestamp fields like ObjectMeta.CreationTimestamp with a zero
|
||||
// value are marshalled as "null" in JSON (rather than omitted) and then
|
||||
// unmarshalled into Unstructured with the key intact and a null value (rather
|
||||
// than being omitted); the net effect is the resulting structs can't be used
|
||||
// to issue a POST because creationTimestamp=null is sent to the server and
|
||||
// fails validation. For example, passing a Prometheus with a
|
||||
// volumeClaimTemplate can result in an invalid object. This hack simply
|
||||
// removes such timestamp fields manually.
|
||||
//
|
||||
// TODO: reevaluate the use of Unstructured directly here in the context of
|
||||
// the latest dynamic client capabilities; this manual conversion may not be
|
||||
// necessary anymore.
|
||||
unstructured.RemoveNestedField(r.Object, "metadata", "creationTimestamp")
|
||||
unstructured.RemoveNestedField(r.Object, "spec", "storage", "volumeClaimTemplate", "metadata", "creationTimestamp")
|
||||
unstructured.RemoveNestedField(r.Object, "spec", "podMetadata", "creationTimestamp")
|
||||
return &r, nil
|
||||
}
|
||||
|
||||
type prometheusDecoder struct {
|
||||
dec *json.Decoder
|
||||
close func() error
|
||||
}
|
||||
|
||||
func (d *prometheusDecoder) Close() {
|
||||
d.close()
|
||||
}
|
||||
|
||||
func (d *prometheusDecoder) Decode() (action watch.EventType, object runtime.Object, err error) {
|
||||
var e struct {
|
||||
Type watch.EventType
|
||||
Object Prometheus
|
||||
}
|
||||
if err := d.dec.Decode(&e); err != nil {
|
||||
return watch.Error, nil, err
|
||||
}
|
||||
return e.Type, &e.Object, nil
|
||||
}
|
200
vendor/github.com/coreos/prometheus-operator/pkg/client/monitoring/v1/prometheusrule.go
generated
vendored
Normal file
200
vendor/github.com/coreos/prometheus-operator/pkg/client/monitoring/v1/prometheusrule.go
generated
vendored
Normal file
@ -0,0 +1,200 @@
|
||||
// Copyright 2016 The prometheus-operator Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package v1
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/watch"
|
||||
dynamic "k8s.io/client-go/deprecated-dynamic"
|
||||
"k8s.io/client-go/rest"
|
||||
)
|
||||
|
||||
const (
|
||||
PrometheusRuleKind = "PrometheusRule"
|
||||
PrometheusRuleName = "prometheusrules"
|
||||
)
|
||||
|
||||
type PrometheusRulesGetter interface {
|
||||
PrometheusRules(namespace string) PrometheusRuleInterface
|
||||
}
|
||||
|
||||
var _ PrometheusRuleInterface = &prometheusrules{}
|
||||
|
||||
type PrometheusRuleInterface interface {
|
||||
Create(*PrometheusRule) (*PrometheusRule, error)
|
||||
Get(name string, opts metav1.GetOptions) (*PrometheusRule, error)
|
||||
Update(*PrometheusRule) (*PrometheusRule, error)
|
||||
Delete(name string, options *metav1.DeleteOptions) error
|
||||
List(opts metav1.ListOptions) (runtime.Object, error)
|
||||
Watch(opts metav1.ListOptions) (watch.Interface, error)
|
||||
DeleteCollection(dopts *metav1.DeleteOptions, lopts metav1.ListOptions) error
|
||||
}
|
||||
|
||||
type prometheusrules struct {
|
||||
restClient rest.Interface
|
||||
client dynamic.ResourceInterface
|
||||
crdKind CrdKind
|
||||
ns string
|
||||
}
|
||||
|
||||
func newPrometheusRules(r rest.Interface, c *dynamic.Client, crdKind CrdKind, namespace string) *prometheusrules {
|
||||
return &prometheusrules{
|
||||
restClient: r,
|
||||
client: c.Resource(
|
||||
&metav1.APIResource{
|
||||
Kind: crdKind.Kind,
|
||||
Name: crdKind.Plural,
|
||||
Namespaced: true,
|
||||
},
|
||||
namespace,
|
||||
),
|
||||
crdKind: crdKind,
|
||||
ns: namespace,
|
||||
}
|
||||
}
|
||||
|
||||
func (s *prometheusrules) Create(o *PrometheusRule) (*PrometheusRule, error) {
|
||||
us, err := UnstructuredFromPrometheusRule(o)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
us, err = s.client.Create(us)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return PrometheusRuleFromUnstructured(us)
|
||||
}
|
||||
|
||||
func (s *prometheusrules) Get(name string, opts metav1.GetOptions) (*PrometheusRule, error) {
|
||||
obj, err := s.client.Get(name, opts)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return PrometheusRuleFromUnstructured(obj)
|
||||
}
|
||||
|
||||
func (s *prometheusrules) Update(o *PrometheusRule) (*PrometheusRule, error) {
|
||||
us, err := UnstructuredFromPrometheusRule(o)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
curs, err := s.Get(o.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "unable to get current version for update")
|
||||
}
|
||||
us.SetResourceVersion(curs.ObjectMeta.ResourceVersion)
|
||||
|
||||
us, err = s.client.Update(us)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return PrometheusRuleFromUnstructured(us)
|
||||
}
|
||||
|
||||
func (s *prometheusrules) Delete(name string, options *metav1.DeleteOptions) error {
|
||||
return s.client.Delete(name, options)
|
||||
}
|
||||
|
||||
func (s *prometheusrules) List(opts metav1.ListOptions) (runtime.Object, error) {
|
||||
req := s.restClient.Get().
|
||||
Namespace(s.ns).
|
||||
Resource(s.crdKind.Plural)
|
||||
|
||||
b, err := req.DoRaw()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var sm PrometheusRuleList
|
||||
return &sm, json.Unmarshal(b, &sm)
|
||||
}
|
||||
|
||||
func (s *prometheusrules) Watch(opts metav1.ListOptions) (watch.Interface, error) {
|
||||
r, err := s.restClient.Get().
|
||||
Prefix("watch").
|
||||
Namespace(s.ns).
|
||||
Resource(s.crdKind.Plural).
|
||||
Stream()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return watch.NewStreamWatcher(&prometheusRuleDecoder{
|
||||
dec: json.NewDecoder(r),
|
||||
close: r.Close,
|
||||
}), nil
|
||||
}
|
||||
|
||||
func (s *prometheusrules) DeleteCollection(dopts *metav1.DeleteOptions, lopts metav1.ListOptions) error {
|
||||
return s.client.DeleteCollection(dopts, lopts)
|
||||
}
|
||||
|
||||
// PrometheusRuleFromUnstructured unmarshals a PrometheusRule object from dynamic client's unstructured
|
||||
func PrometheusRuleFromUnstructured(r *unstructured.Unstructured) (*PrometheusRule, error) {
|
||||
b, err := json.Marshal(r.Object)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var s PrometheusRule
|
||||
if err := json.Unmarshal(b, &s); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
s.TypeMeta.Kind = PrometheusRuleKind
|
||||
s.TypeMeta.APIVersion = Group + "/" + Version
|
||||
return &s, nil
|
||||
}
|
||||
|
||||
// UnstructuredFromPrometheusRule marshals a PrometheusRule object into dynamic client's unstructured
|
||||
func UnstructuredFromPrometheusRule(s *PrometheusRule) (*unstructured.Unstructured, error) {
|
||||
s.TypeMeta.Kind = PrometheusRuleKind
|
||||
s.TypeMeta.APIVersion = Group + "/" + Version
|
||||
b, err := json.Marshal(s)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var r unstructured.Unstructured
|
||||
if err := json.Unmarshal(b, &r.Object); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &r, nil
|
||||
}
|
||||
|
||||
type prometheusRuleDecoder struct {
|
||||
dec *json.Decoder
|
||||
close func() error
|
||||
}
|
||||
|
||||
func (d *prometheusRuleDecoder) Close() {
|
||||
d.close()
|
||||
}
|
||||
|
||||
func (d *prometheusRuleDecoder) Decode() (action watch.EventType, object runtime.Object, err error) {
|
||||
var e struct {
|
||||
Type watch.EventType
|
||||
Object PrometheusRule
|
||||
}
|
||||
if err := d.dec.Decode(&e); err != nil {
|
||||
return watch.Error, nil, err
|
||||
}
|
||||
return e.Type, &e.Object, nil
|
||||
}
|
200
vendor/github.com/coreos/prometheus-operator/pkg/client/monitoring/v1/servicemonitor.go
generated
vendored
Normal file
200
vendor/github.com/coreos/prometheus-operator/pkg/client/monitoring/v1/servicemonitor.go
generated
vendored
Normal file
@ -0,0 +1,200 @@
|
||||
// Copyright 2016 The prometheus-operator Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package v1
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/watch"
|
||||
dynamic "k8s.io/client-go/deprecated-dynamic"
|
||||
"k8s.io/client-go/rest"
|
||||
)
|
||||
|
||||
const (
|
||||
ServiceMonitorsKind = "ServiceMonitor"
|
||||
ServiceMonitorName = "servicemonitors"
|
||||
)
|
||||
|
||||
type ServiceMonitorsGetter interface {
|
||||
ServiceMonitors(namespace string) ServiceMonitorInterface
|
||||
}
|
||||
|
||||
var _ ServiceMonitorInterface = &servicemonitors{}
|
||||
|
||||
type ServiceMonitorInterface interface {
|
||||
Create(*ServiceMonitor) (*ServiceMonitor, error)
|
||||
Get(name string, opts metav1.GetOptions) (*ServiceMonitor, error)
|
||||
Update(*ServiceMonitor) (*ServiceMonitor, error)
|
||||
Delete(name string, options *metav1.DeleteOptions) error
|
||||
List(opts metav1.ListOptions) (runtime.Object, error)
|
||||
Watch(opts metav1.ListOptions) (watch.Interface, error)
|
||||
DeleteCollection(dopts *metav1.DeleteOptions, lopts metav1.ListOptions) error
|
||||
}
|
||||
|
||||
type servicemonitors struct {
|
||||
restClient rest.Interface
|
||||
client dynamic.ResourceInterface
|
||||
crdKind CrdKind
|
||||
ns string
|
||||
}
|
||||
|
||||
func newServiceMonitors(r rest.Interface, c *dynamic.Client, crdKind CrdKind, namespace string) *servicemonitors {
|
||||
return &servicemonitors{
|
||||
restClient: r,
|
||||
client: c.Resource(
|
||||
&metav1.APIResource{
|
||||
Kind: crdKind.Kind,
|
||||
Name: crdKind.Plural,
|
||||
Namespaced: true,
|
||||
},
|
||||
namespace,
|
||||
),
|
||||
crdKind: crdKind,
|
||||
ns: namespace,
|
||||
}
|
||||
}
|
||||
|
||||
func (s *servicemonitors) Create(o *ServiceMonitor) (*ServiceMonitor, error) {
|
||||
us, err := UnstructuredFromServiceMonitor(o)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
us, err = s.client.Create(us)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return ServiceMonitorFromUnstructured(us)
|
||||
}
|
||||
|
||||
func (s *servicemonitors) Get(name string, opts metav1.GetOptions) (*ServiceMonitor, error) {
|
||||
obj, err := s.client.Get(name, opts)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return ServiceMonitorFromUnstructured(obj)
|
||||
}
|
||||
|
||||
func (s *servicemonitors) Update(o *ServiceMonitor) (*ServiceMonitor, error) {
|
||||
us, err := UnstructuredFromServiceMonitor(o)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
curs, err := s.Get(o.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "unable to get current version for update")
|
||||
}
|
||||
us.SetResourceVersion(curs.ObjectMeta.ResourceVersion)
|
||||
|
||||
us, err = s.client.Update(us)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return ServiceMonitorFromUnstructured(us)
|
||||
}
|
||||
|
||||
func (s *servicemonitors) Delete(name string, options *metav1.DeleteOptions) error {
|
||||
return s.client.Delete(name, options)
|
||||
}
|
||||
|
||||
func (s *servicemonitors) List(opts metav1.ListOptions) (runtime.Object, error) {
|
||||
req := s.restClient.Get().
|
||||
Namespace(s.ns).
|
||||
Resource(s.crdKind.Plural)
|
||||
|
||||
b, err := req.DoRaw()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var sm ServiceMonitorList
|
||||
return &sm, json.Unmarshal(b, &sm)
|
||||
}
|
||||
|
||||
func (s *servicemonitors) Watch(opts metav1.ListOptions) (watch.Interface, error) {
|
||||
r, err := s.restClient.Get().
|
||||
Prefix("watch").
|
||||
Namespace(s.ns).
|
||||
Resource(s.crdKind.Plural).
|
||||
Stream()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return watch.NewStreamWatcher(&serviceMonitorDecoder{
|
||||
dec: json.NewDecoder(r),
|
||||
close: r.Close,
|
||||
}), nil
|
||||
}
|
||||
|
||||
func (s *servicemonitors) DeleteCollection(dopts *metav1.DeleteOptions, lopts metav1.ListOptions) error {
|
||||
return s.client.DeleteCollection(dopts, lopts)
|
||||
}
|
||||
|
||||
// ServiceMonitorFromUnstructured unmarshals a ServiceMonitor object from dynamic client's unstructured
|
||||
func ServiceMonitorFromUnstructured(r *unstructured.Unstructured) (*ServiceMonitor, error) {
|
||||
b, err := json.Marshal(r.Object)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var s ServiceMonitor
|
||||
if err := json.Unmarshal(b, &s); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
s.TypeMeta.Kind = ServiceMonitorsKind
|
||||
s.TypeMeta.APIVersion = Group + "/" + Version
|
||||
return &s, nil
|
||||
}
|
||||
|
||||
// UnstructuredFromServiceMonitor marshals a ServiceMonitor object into dynamic client's unstructured
|
||||
func UnstructuredFromServiceMonitor(s *ServiceMonitor) (*unstructured.Unstructured, error) {
|
||||
s.TypeMeta.Kind = ServiceMonitorsKind
|
||||
s.TypeMeta.APIVersion = Group + "/" + Version
|
||||
b, err := json.Marshal(s)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var r unstructured.Unstructured
|
||||
if err := json.Unmarshal(b, &r.Object); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &r, nil
|
||||
}
|
||||
|
||||
type serviceMonitorDecoder struct {
|
||||
dec *json.Decoder
|
||||
close func() error
|
||||
}
|
||||
|
||||
func (d *serviceMonitorDecoder) Close() {
|
||||
d.close()
|
||||
}
|
||||
|
||||
func (d *serviceMonitorDecoder) Decode() (action watch.EventType, object runtime.Object, err error) {
|
||||
var e struct {
|
||||
Type watch.EventType
|
||||
Object ServiceMonitor
|
||||
}
|
||||
if err := d.dec.Decode(&e); err != nil {
|
||||
return watch.Error, nil, err
|
||||
}
|
||||
return e.Type, &e.Object, nil
|
||||
}
|
785
vendor/github.com/coreos/prometheus-operator/pkg/client/monitoring/v1/types.go
generated
vendored
Normal file
785
vendor/github.com/coreos/prometheus-operator/pkg/client/monitoring/v1/types.go
generated
vendored
Normal file
@ -0,0 +1,785 @@
|
||||
// Copyright 2016 The prometheus-operator Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package v1
|
||||
|
||||
import (
|
||||
"k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/intstr"
|
||||
)
|
||||
|
||||
// Prometheus defines a Prometheus deployment.
|
||||
// +k8s:openapi-gen=true
|
||||
type Prometheus struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
// Standard object’s metadata. More info:
|
||||
// https://github.com/kubernetes/community/blob/master/contributors/devel/api-conventions.md#metadata
|
||||
// +k8s:openapi-gen=false
|
||||
metav1.ObjectMeta `json:"metadata,omitempty"`
|
||||
// Specification of the desired behavior of the Prometheus cluster. More info:
|
||||
// https://github.com/kubernetes/community/blob/master/contributors/devel/api-conventions.md#spec-and-status
|
||||
Spec PrometheusSpec `json:"spec"`
|
||||
// Most recent observed status of the Prometheus cluster. Read-only. Not
|
||||
// included when requesting from the apiserver, only from the Prometheus
|
||||
// Operator API itself. More info:
|
||||
// https://github.com/kubernetes/community/blob/master/contributors/devel/api-conventions.md#spec-and-status
|
||||
Status *PrometheusStatus `json:"status,omitempty"`
|
||||
}
|
||||
|
||||
// PrometheusList is a list of Prometheuses.
|
||||
// +k8s:openapi-gen=true
|
||||
type PrometheusList struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
// Standard list metadata
|
||||
// More info: https://github.com/kubernetes/community/blob/master/contributors/devel/api-conventions.md#metadata
|
||||
metav1.ListMeta `json:"metadata,omitempty"`
|
||||
// List of Prometheuses
|
||||
Items []*Prometheus `json:"items"`
|
||||
}
|
||||
|
||||
// PrometheusSpec is a specification of the desired behavior of the Prometheus cluster. More info:
|
||||
// https://github.com/kubernetes/community/blob/master/contributors/devel/api-conventions.md#spec-and-status
|
||||
// +k8s:openapi-gen=true
|
||||
type PrometheusSpec struct {
|
||||
// Standard object’s metadata. More info:
|
||||
// https://github.com/kubernetes/community/blob/master/contributors/devel/api-conventions.md#metadata
|
||||
// Metadata Labels and Annotations gets propagated to the prometheus pods.
|
||||
PodMetadata *metav1.ObjectMeta `json:"podMetadata,omitempty"`
|
||||
// ServiceMonitors to be selected for target discovery.
|
||||
ServiceMonitorSelector *metav1.LabelSelector `json:"serviceMonitorSelector,omitempty"`
|
||||
// Namespaces to be selected for ServiceMonitor discovery. If nil, only
|
||||
// check own namespace.
|
||||
ServiceMonitorNamespaceSelector *metav1.LabelSelector `json:"serviceMonitorNamespaceSelector,omitempty"`
|
||||
// Version of Prometheus to be deployed.
|
||||
Version string `json:"version,omitempty"`
|
||||
// Tag of Prometheus container image to be deployed. Defaults to the value of `version`.
|
||||
// Version is ignored if Tag is set.
|
||||
Tag string `json:"tag,omitempty"`
|
||||
// SHA of Prometheus container image to be deployed. Defaults to the value of `version`.
|
||||
// Similar to a tag, but the SHA explicitly deploys an immutable container image.
|
||||
// Version and Tag are ignored if SHA is set.
|
||||
SHA string `json:"sha,omitempty"`
|
||||
// When a Prometheus deployment is paused, no actions except for deletion
|
||||
// will be performed on the underlying objects.
|
||||
Paused bool `json:"paused,omitempty"`
|
||||
// Base image to use for a Prometheus deployment.
|
||||
BaseImage string `json:"baseImage,omitempty"`
|
||||
// An optional list of references to secrets in the same namespace
|
||||
// to use for pulling prometheus and alertmanager images from registries
|
||||
// see http://kubernetes.io/docs/user-guide/images#specifying-imagepullsecrets-on-a-pod
|
||||
ImagePullSecrets []v1.LocalObjectReference `json:"imagePullSecrets,omitempty"`
|
||||
// Number of instances to deploy for a Prometheus deployment.
|
||||
Replicas *int32 `json:"replicas,omitempty"`
|
||||
// Time duration Prometheus shall retain data for. Default is '24h',
|
||||
// and must match the regular expression `[0-9]+(ms|s|m|h|d|w|y)` (milliseconds seconds minutes hours days weeks years).
|
||||
Retention string `json:"retention,omitempty"`
|
||||
// Log level for Prometheus to be configured with.
|
||||
LogLevel string `json:"logLevel,omitempty"`
|
||||
// Interval between consecutive scrapes.
|
||||
ScrapeInterval string `json:"scrapeInterval,omitempty"`
|
||||
// Interval between consecutive evaluations.
|
||||
EvaluationInterval string `json:"evaluationInterval,omitempty"`
|
||||
// The labels to add to any time series or alerts when communicating with
|
||||
// external systems (federation, remote storage, Alertmanager).
|
||||
ExternalLabels map[string]string `json:"externalLabels,omitempty"`
|
||||
// The external URL the Prometheus instances will be available under. This is
|
||||
// necessary to generate correct URLs. This is necessary if Prometheus is not
|
||||
// served from root of a DNS name.
|
||||
ExternalURL string `json:"externalUrl,omitempty"`
|
||||
// The route prefix Prometheus registers HTTP handlers for. This is useful,
|
||||
// if using ExternalURL and a proxy is rewriting HTTP routes of a request,
|
||||
// and the actual ExternalURL is still true, but the server serves requests
|
||||
// under a different route prefix. For example for use with `kubectl proxy`.
|
||||
RoutePrefix string `json:"routePrefix,omitempty"`
|
||||
// Storage spec to specify how storage shall be used.
|
||||
Storage *StorageSpec `json:"storage,omitempty"`
|
||||
// A selector to select which PrometheusRules to mount for loading alerting
|
||||
// rules from. Until (excluding) Prometheus Operator v0.24.0 Prometheus
|
||||
// Operator will migrate any legacy rule ConfigMaps to PrometheusRule custom
|
||||
// resources selected by RuleSelector. Make sure it does not match any config
|
||||
// maps that you do not want to be migrated.
|
||||
RuleSelector *metav1.LabelSelector `json:"ruleSelector,omitempty"`
|
||||
// Namespaces to be selected for PrometheusRules discovery. If unspecified, only
|
||||
// the same namespace as the Prometheus object is in is used.
|
||||
RuleNamespaceSelector *metav1.LabelSelector `json:"ruleNamespaceSelector,omitempty"`
|
||||
// Define details regarding alerting.
|
||||
Alerting *AlertingSpec `json:"alerting,omitempty"`
|
||||
// Define resources requests and limits for single Pods.
|
||||
Resources v1.ResourceRequirements `json:"resources,omitempty"`
|
||||
// Define which Nodes the Pods are scheduled on.
|
||||
NodeSelector map[string]string `json:"nodeSelector,omitempty"`
|
||||
// ServiceAccountName is the name of the ServiceAccount to use to run the
|
||||
// Prometheus Pods.
|
||||
ServiceAccountName string `json:"serviceAccountName,omitempty"`
|
||||
// Secrets is a list of Secrets in the same namespace as the Prometheus
|
||||
// object, which shall be mounted into the Prometheus Pods.
|
||||
// The Secrets are mounted into /etc/prometheus/secrets/<secret-name>.
|
||||
Secrets []string `json:"secrets,omitempty"`
|
||||
// ConfigMaps is a list of ConfigMaps in the same namespace as the Prometheus
|
||||
// object, which shall be mounted into the Prometheus Pods.
|
||||
// The ConfigMaps are mounted into /etc/prometheus/configmaps/<configmap-name>.
|
||||
ConfigMaps []string `json:"configMaps,omitempty"`
|
||||
// If specified, the pod's scheduling constraints.
|
||||
Affinity *v1.Affinity `json:"affinity,omitempty"`
|
||||
// If specified, the pod's tolerations.
|
||||
Tolerations []v1.Toleration `json:"tolerations,omitempty"`
|
||||
// If specified, the remote_write spec. This is an experimental feature, it may change in any upcoming release in a breaking way.
|
||||
RemoteWrite []RemoteWriteSpec `json:"remoteWrite,omitempty"`
|
||||
// If specified, the remote_read spec. This is an experimental feature, it may change in any upcoming release in a breaking way.
|
||||
RemoteRead []RemoteReadSpec `json:"remoteRead,omitempty"`
|
||||
// SecurityContext holds pod-level security attributes and common container settings.
|
||||
// This defaults to non root user with uid 1000 and gid 2000 for Prometheus >v2.0 and
|
||||
// default PodSecurityContext for other versions.
|
||||
SecurityContext *v1.PodSecurityContext `json:"securityContext,omitempty"`
|
||||
// ListenLocal makes the Prometheus server listen on loopback, so that it
|
||||
// does not bind against the Pod IP.
|
||||
ListenLocal bool `json:"listenLocal,omitempty"`
|
||||
// Containers allows injecting additional containers. This is meant to
|
||||
// allow adding an authentication proxy to a Prometheus pod.
|
||||
Containers []v1.Container `json:"containers,omitempty"`
|
||||
// AdditionalScrapeConfigs allows specifying a key of a Secret containing
|
||||
// additional Prometheus scrape configurations. Scrape configurations
|
||||
// specified are appended to the configurations generated by the Prometheus
|
||||
// Operator. Job configurations specified must have the form as specified
|
||||
// in the official Prometheus documentation:
|
||||
// https://prometheus.io/docs/prometheus/latest/configuration/configuration/#<scrape_config>.
|
||||
// As scrape configs are appended, the user is responsible to make sure it
|
||||
// is valid. Note that using this feature may expose the possibility to
|
||||
// break upgrades of Prometheus. It is advised to review Prometheus release
|
||||
// notes to ensure that no incompatible scrape configs are going to break
|
||||
// Prometheus after the upgrade.
|
||||
AdditionalScrapeConfigs *v1.SecretKeySelector `json:"additionalScrapeConfigs,omitempty"`
|
||||
// AdditionalAlertRelabelConfigs allows specifying a key of a Secret containing
|
||||
// additional Prometheus alert relabel configurations. Alert relabel configurations
|
||||
// specified are appended to the configurations generated by the Prometheus
|
||||
// Operator. Alert relabel configurations specified must have the form as specified
|
||||
// in the official Prometheus documentation:
|
||||
// https://prometheus.io/docs/prometheus/latest/configuration/configuration/#alert_relabel_configs.
|
||||
// As alert relabel configs are appended, the user is responsible to make sure it
|
||||
// is valid. Note that using this feature may expose the possibility to
|
||||
// break upgrades of Prometheus. It is advised to review Prometheus release
|
||||
// notes to ensure that no incompatible alert relabel configs are going to break
|
||||
// Prometheus after the upgrade.
|
||||
AdditionalAlertRelabelConfigs *v1.SecretKeySelector `json:"additionalAlertRelabelConfigs,omitempty"`
|
||||
// AdditionalAlertManagerConfigs allows specifying a key of a Secret containing
|
||||
// additional Prometheus AlertManager configurations. AlertManager configurations
|
||||
// specified are appended to the configurations generated by the Prometheus
|
||||
// Operator. Job configurations specified must have the form as specified
|
||||
// in the official Prometheus documentation:
|
||||
// https://prometheus.io/docs/prometheus/latest/configuration/configuration/#<alertmanager_config>.
|
||||
// As AlertManager configs are appended, the user is responsible to make sure it
|
||||
// is valid. Note that using this feature may expose the possibility to
|
||||
// break upgrades of Prometheus. It is advised to review Prometheus release
|
||||
// notes to ensure that no incompatible AlertManager configs are going to break
|
||||
// Prometheus after the upgrade.
|
||||
AdditionalAlertManagerConfigs *v1.SecretKeySelector `json:"additionalAlertManagerConfigs,omitempty"`
|
||||
// APIServerConfig allows specifying a host and auth methods to access apiserver.
|
||||
// If left empty, Prometheus is assumed to run inside of the cluster
|
||||
// and will discover API servers automatically and use the pod's CA certificate
|
||||
// and bearer token file at /var/run/secrets/kubernetes.io/serviceaccount/.
|
||||
APIServerConfig *APIServerConfig `json:"apiserverConfig,omitempty"`
|
||||
// Thanos configuration allows configuring various aspects of a Prometheus
|
||||
// server in a Thanos environment.
|
||||
//
|
||||
// This section is experimental, it may change significantly without
|
||||
// deprecation notice in any release.
|
||||
//
|
||||
// This is experimental and may change significantly without backward
|
||||
// compatibility in any release.
|
||||
Thanos *ThanosSpec `json:"thanos,omitempty"`
|
||||
// Priority class assigned to the Pods
|
||||
PriorityClassName string `json:"priorityClassName,omitempty"`
|
||||
}
|
||||
|
||||
// PrometheusStatus is the most recent observed status of the Prometheus cluster. Read-only. Not
|
||||
// included when requesting from the apiserver, only from the Prometheus
|
||||
// Operator API itself. More info:
|
||||
// https://github.com/kubernetes/community/blob/master/contributors/devel/api-conventions.md#spec-and-status
|
||||
// +k8s:openapi-gen=true
|
||||
type PrometheusStatus struct {
|
||||
// Represents whether any actions on the underlaying managed objects are
|
||||
// being performed. Only delete actions will be performed.
|
||||
Paused bool `json:"paused"`
|
||||
// Total number of non-terminated pods targeted by this Prometheus deployment
|
||||
// (their labels match the selector).
|
||||
Replicas int32 `json:"replicas"`
|
||||
// Total number of non-terminated pods targeted by this Prometheus deployment
|
||||
// that have the desired version spec.
|
||||
UpdatedReplicas int32 `json:"updatedReplicas"`
|
||||
// Total number of available pods (ready for at least minReadySeconds)
|
||||
// targeted by this Prometheus deployment.
|
||||
AvailableReplicas int32 `json:"availableReplicas"`
|
||||
// Total number of unavailable pods targeted by this Prometheus deployment.
|
||||
UnavailableReplicas int32 `json:"unavailableReplicas"`
|
||||
}
|
||||
|
||||
// AlertingSpec defines parameters for alerting configuration of Prometheus servers.
|
||||
// +k8s:openapi-gen=true
|
||||
type AlertingSpec struct {
|
||||
// AlertmanagerEndpoints Prometheus should fire alerts against.
|
||||
Alertmanagers []AlertmanagerEndpoints `json:"alertmanagers"`
|
||||
}
|
||||
|
||||
// StorageSpec defines the configured storage for a group Prometheus servers.
|
||||
// If neither `emptyDir` nor `volumeClaimTemplate` is specified, then by default an [EmptyDir](https://kubernetes.io/docs/concepts/storage/volumes/#emptydir) will be used.
|
||||
// +k8s:openapi-gen=true
|
||||
type StorageSpec struct {
|
||||
// Name of the StorageClass to use when requesting storage provisioning. More
|
||||
// info: https://kubernetes.io/docs/user-guide/persistent-volumes/#storageclasses
|
||||
// (DEPRECATED - instead use `volumeClaimTemplate.spec.storageClassName`)
|
||||
Class string `json:"class,omitempty"`
|
||||
// EmptyDirVolumeSource to be used by the Prometheus StatefulSets. If specified, used in place of any volumeClaimTemplate. More
|
||||
// info: https://kubernetes.io/docs/concepts/storage/volumes/#emptydir
|
||||
EmptyDir *v1.EmptyDirVolumeSource `json:"emptyDir,omitempty"`
|
||||
// A label query over volumes to consider for binding.
|
||||
// (DEPRECATED - instead use `volumeClaimTemplate.spec.selector`)
|
||||
Selector *metav1.LabelSelector `json:"selector,omitempty"`
|
||||
// Resources represents the minimum resources the volume should have. More
|
||||
// info: http://kubernetes.io/docs/user-guide/persistent-volumes#resources
|
||||
// (DEPRECATED - instead use `volumeClaimTemplate.spec.resources`)
|
||||
Resources v1.ResourceRequirements `json:"resources,omitempty"`
|
||||
// A PVC spec to be used by the Prometheus StatefulSets.
|
||||
VolumeClaimTemplate v1.PersistentVolumeClaim `json:"volumeClaimTemplate,omitempty"`
|
||||
}
|
||||
|
||||
// ThanosSpec defines parameters for a Prometheus server within a Thanos deployment.
|
||||
// +k8s:openapi-gen=true
|
||||
type ThanosSpec struct {
|
||||
// Peers is a DNS name for Thanos to discover peers through.
|
||||
Peers *string `json:"peers,omitempty"`
|
||||
// Version describes the version of Thanos to use.
|
||||
Version *string `json:"version,omitempty"`
|
||||
// Tag of Thanos sidecar container image to be deployed. Defaults to the value of `version`.
|
||||
// Version is ignored if Tag is set.
|
||||
Tag *string `json:"tag,omitempty"`
|
||||
// SHA of Thanos container image to be deployed. Defaults to the value of `version`.
|
||||
// Similar to a tag, but the SHA explicitly deploys an immutable container image.
|
||||
// Version and Tag are ignored if SHA is set.
|
||||
SHA *string `json:"sha,omitempty"`
|
||||
// Thanos base image if other than default.
|
||||
BaseImage *string `json:"baseImage,omitempty"`
|
||||
// Resources defines the resource requirements for the Thanos sidecar.
|
||||
// If not provided, no requests/limits will be set
|
||||
Resources v1.ResourceRequirements `json:"resources,omitempty"`
|
||||
// GCS configures use of GCS in Thanos.
|
||||
GCS *ThanosGCSSpec `json:"gcs,omitempty"`
|
||||
// S3 configures use of S3 in Thanos.
|
||||
S3 *ThanosS3Spec `json:"s3,omitempty"`
|
||||
}
|
||||
|
||||
// ThanosGCSSpec defines parameters for use of Google Cloud Storage (GCS) with
|
||||
// Thanos.
|
||||
// +k8s:openapi-gen=true
|
||||
type ThanosGCSSpec struct {
|
||||
// Google Cloud Storage bucket name for stored blocks. If empty it won't
|
||||
// store any block inside Google Cloud Storage.
|
||||
Bucket *string `json:"bucket,omitempty"`
|
||||
// Secret to access our Bucket.
|
||||
SecretKey *v1.SecretKeySelector `json:"credentials,omitempty"`
|
||||
}
|
||||
|
||||
// ThanosS3Spec defines parameters for of AWS Simple Storage Service (S3) with
|
||||
// Thanos. (S3 compatible services apply as well)
|
||||
// +k8s:openapi-gen=true
|
||||
type ThanosS3Spec struct {
|
||||
// S3-Compatible API bucket name for stored blocks.
|
||||
Bucket *string `json:"bucket,omitempty"`
|
||||
// S3-Compatible API endpoint for stored blocks.
|
||||
Endpoint *string `json:"endpoint,omitempty"`
|
||||
// AccessKey for an S3-Compatible API.
|
||||
AccessKey *v1.SecretKeySelector `json:"accessKey,omitempty"`
|
||||
// SecretKey for an S3-Compatible API.
|
||||
SecretKey *v1.SecretKeySelector `json:"secretKey,omitempty"`
|
||||
// Whether to use an insecure connection with an S3-Compatible API.
|
||||
Insecure *bool `json:"insecure,omitempty"`
|
||||
// Whether to use S3 Signature Version 2; otherwise Signature Version 4 will be used.
|
||||
SignatureVersion2 *bool `json:"signatureVersion2,omitempty"`
|
||||
// Whether to use Server Side Encryption
|
||||
EncryptSSE *bool `json:"encryptsse,omitempty"`
|
||||
}
|
||||
|
||||
// RemoteWriteSpec defines the remote_write configuration for prometheus.
|
||||
// +k8s:openapi-gen=true
|
||||
type RemoteWriteSpec struct {
|
||||
//The URL of the endpoint to send samples to.
|
||||
URL string `json:"url"`
|
||||
//Timeout for requests to the remote write endpoint.
|
||||
RemoteTimeout string `json:"remoteTimeout,omitempty"`
|
||||
//The list of remote write relabel configurations.
|
||||
WriteRelabelConfigs []RelabelConfig `json:"writeRelabelConfigs,omitempty"`
|
||||
//BasicAuth for the URL.
|
||||
BasicAuth *BasicAuth `json:"basicAuth,omitempty"`
|
||||
// File to read bearer token for remote write.
|
||||
BearerToken string `json:"bearerToken,omitempty"`
|
||||
// File to read bearer token for remote write.
|
||||
BearerTokenFile string `json:"bearerTokenFile,omitempty"`
|
||||
// TLS Config to use for remote write.
|
||||
TLSConfig *TLSConfig `json:"tlsConfig,omitempty"`
|
||||
//Optional ProxyURL
|
||||
ProxyURL string `json:"proxyUrl,omitempty"`
|
||||
// QueueConfig allows tuning of the remote write queue parameters.
|
||||
QueueConfig *QueueConfig `json:"queueConfig,omitempty"`
|
||||
}
|
||||
|
||||
// QueueConfig allows the tuning of remote_write queue_config parameters. This object
|
||||
// is referenced in the RemoteWriteSpec object.
|
||||
// +k8s:openapi-gen=true
|
||||
type QueueConfig struct {
|
||||
// Capacity is the number of samples to buffer per shard before we start dropping them.
|
||||
Capacity int `json:"capacity,omitempty"`
|
||||
// MaxShards is the maximum number of shards, i.e. amount of concurrency.
|
||||
MaxShards int `json:"maxShards,omitempty"`
|
||||
// MaxSamplesPerSend is the maximum number of samples per send.
|
||||
MaxSamplesPerSend int `json:"maxSamplesPerSend,omitempty"`
|
||||
// BatchSendDeadline is the maximum time a sample will wait in buffer.
|
||||
BatchSendDeadline string `json:"batchSendDeadline,omitempty"`
|
||||
// MaxRetries is the maximum number of times to retry a batch on recoverable errors.
|
||||
MaxRetries int `json:"maxRetries,omitempty"`
|
||||
// MinBackoff is the initial retry delay. Gets doubled for every retry.
|
||||
MinBackoff string `json:"minBackoff,omitempty"`
|
||||
// MaxBackoff is the maximum retry delay.
|
||||
MaxBackoff string `json:"maxBackoff,omitempty"`
|
||||
}
|
||||
|
||||
// RemoteReadSpec defines the remote_read configuration for prometheus.
|
||||
// +k8s:openapi-gen=true
|
||||
type RemoteReadSpec struct {
|
||||
//The URL of the endpoint to send samples to.
|
||||
URL string `json:"url"`
|
||||
//An optional list of equality matchers which have to be present
|
||||
// in a selector to query the remote read endpoint.
|
||||
RequiredMatchers map[string]string `json:"requiredMatchers,omitempty"`
|
||||
//Timeout for requests to the remote read endpoint.
|
||||
RemoteTimeout string `json:"remoteTimeout,omitempty"`
|
||||
//Whether reads should be made for queries for time ranges that
|
||||
// the local storage should have complete data for.
|
||||
ReadRecent bool `json:"readRecent,omitempty"`
|
||||
//BasicAuth for the URL.
|
||||
BasicAuth *BasicAuth `json:"basicAuth,omitempty"`
|
||||
// bearer token for remote read.
|
||||
BearerToken string `json:"bearerToken,omitempty"`
|
||||
// File to read bearer token for remote read.
|
||||
BearerTokenFile string `json:"bearerTokenFile,omitempty"`
|
||||
// TLS Config to use for remote read.
|
||||
TLSConfig *TLSConfig `json:"tlsConfig,omitempty"`
|
||||
//Optional ProxyURL
|
||||
ProxyURL string `json:"proxyUrl,omitempty"`
|
||||
}
|
||||
|
||||
// RelabelConfig allows dynamic rewriting of the label set, being applied to samples before ingestion.
|
||||
// It defines `<metric_relabel_configs>`-section of Prometheus configuration.
|
||||
// More info: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#metric_relabel_configs
|
||||
// +k8s:openapi-gen=true
|
||||
type RelabelConfig struct {
|
||||
//The source labels select values from existing labels. Their content is concatenated
|
||||
//using the configured separator and matched against the configured regular expression
|
||||
//for the replace, keep, and drop actions.
|
||||
SourceLabels []string `json:"sourceLabels,omitempty"`
|
||||
//Separator placed between concatenated source label values. default is ';'.
|
||||
Separator string `json:"separator,omitempty"`
|
||||
//Label to which the resulting value is written in a replace action.
|
||||
//It is mandatory for replace actions. Regex capture groups are available.
|
||||
TargetLabel string `json:"targetLabel,omitempty"`
|
||||
//Regular expression against which the extracted value is matched. defailt is '(.*)'
|
||||
Regex string `json:"regex,omitempty"`
|
||||
// Modulus to take of the hash of the source label values.
|
||||
Modulus uint64 `json:"modulus,omitempty"`
|
||||
//Replacement value against which a regex replace is performed if the
|
||||
//regular expression matches. Regex capture groups are available. Default is '$1'
|
||||
Replacement string `json:"replacement,omitempty"`
|
||||
// Action to perform based on regex matching. Default is 'replace'
|
||||
Action string `json:"action,omitempty"`
|
||||
}
|
||||
|
||||
// APIServerConfig defines a host and auth methods to access apiserver.
|
||||
// More info: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#kubernetes_sd_config
|
||||
// +k8s:openapi-gen=true
|
||||
type APIServerConfig struct {
|
||||
// Host of apiserver.
|
||||
// A valid string consisting of a hostname or IP followed by an optional port number
|
||||
Host string `json:"host"`
|
||||
// BasicAuth allow an endpoint to authenticate over basic authentication
|
||||
BasicAuth *BasicAuth `json:"basicAuth,omitempty"`
|
||||
// Bearer token for accessing apiserver.
|
||||
BearerToken string `json:"bearerToken,omitempty"`
|
||||
// File to read bearer token for accessing apiserver.
|
||||
BearerTokenFile string `json:"bearerTokenFile,omitempty"`
|
||||
// TLS Config to use for accessing apiserver.
|
||||
TLSConfig *TLSConfig `json:"tlsConfig,omitempty"`
|
||||
}
|
||||
|
||||
// AlertmanagerEndpoints defines a selection of a single Endpoints object
|
||||
// containing alertmanager IPs to fire alerts against.
|
||||
// +k8s:openapi-gen=true
|
||||
type AlertmanagerEndpoints struct {
|
||||
// Namespace of Endpoints object.
|
||||
Namespace string `json:"namespace"`
|
||||
// Name of Endpoints object in Namespace.
|
||||
Name string `json:"name"`
|
||||
// Port the Alertmanager API is exposed on.
|
||||
Port intstr.IntOrString `json:"port"`
|
||||
// Scheme to use when firing alerts.
|
||||
Scheme string `json:"scheme,omitempty"`
|
||||
// Prefix for the HTTP path alerts are pushed to.
|
||||
PathPrefix string `json:"pathPrefix,omitempty"`
|
||||
// TLS Config to use for alertmanager connection.
|
||||
TLSConfig *TLSConfig `json:"tlsConfig,omitempty"`
|
||||
// BearerTokenFile to read from filesystem to use when authenticating to
|
||||
// Alertmanager.
|
||||
BearerTokenFile string `json:"bearerTokenFile,omitempty"`
|
||||
}
|
||||
|
||||
// ServiceMonitor defines monitoring for a set of services.
|
||||
// +k8s:openapi-gen=true
|
||||
type ServiceMonitor struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
// Standard object’s metadata. More info:
|
||||
// https://github.com/kubernetes/community/blob/master/contributors/devel/api-conventions.md#metadata
|
||||
// +k8s:openapi-gen=false
|
||||
metav1.ObjectMeta `json:"metadata,omitempty"`
|
||||
// Specification of desired Service selection for target discrovery by
|
||||
// Prometheus.
|
||||
Spec ServiceMonitorSpec `json:"spec"`
|
||||
}
|
||||
|
||||
// ServiceMonitorSpec contains specification parameters for a ServiceMonitor.
|
||||
// +k8s:openapi-gen=true
|
||||
type ServiceMonitorSpec struct {
|
||||
// The label to use to retrieve the job name from.
|
||||
JobLabel string `json:"jobLabel,omitempty"`
|
||||
// TargetLabels transfers labels on the Kubernetes Service onto the target.
|
||||
TargetLabels []string `json:"targetLabels,omitempty"`
|
||||
// PodTargetLabels transfers labels on the Kubernetes Pod onto the target.
|
||||
PodTargetLabels []string `json:"podTargetLabels,omitempty"`
|
||||
// A list of endpoints allowed as part of this ServiceMonitor.
|
||||
Endpoints []Endpoint `json:"endpoints"`
|
||||
// Selector to select Endpoints objects.
|
||||
Selector metav1.LabelSelector `json:"selector"`
|
||||
// Selector to select which namespaces the Endpoints objects are discovered from.
|
||||
NamespaceSelector NamespaceSelector `json:"namespaceSelector,omitempty"`
|
||||
// SampleLimit defines per-scrape limit on number of scraped samples that will be accepted.
|
||||
SampleLimit uint64 `json:"sampleLimit,omitempty"`
|
||||
}
|
||||
|
||||
// Endpoint defines a scrapeable endpoint serving Prometheus metrics.
|
||||
// +k8s:openapi-gen=true
|
||||
type Endpoint struct {
|
||||
// Name of the service port this endpoint refers to. Mutually exclusive with targetPort.
|
||||
Port string `json:"port,omitempty"`
|
||||
// Name or number of the target port of the endpoint. Mutually exclusive with port.
|
||||
TargetPort *intstr.IntOrString `json:"targetPort,omitempty"`
|
||||
// HTTP path to scrape for metrics.
|
||||
Path string `json:"path,omitempty"`
|
||||
// HTTP scheme to use for scraping.
|
||||
Scheme string `json:"scheme,omitempty"`
|
||||
// Optional HTTP URL parameters
|
||||
Params map[string][]string `json:"params,omitempty"`
|
||||
// Interval at which metrics should be scraped
|
||||
Interval string `json:"interval,omitempty"`
|
||||
// Timeout after which the scrape is ended
|
||||
ScrapeTimeout string `json:"scrapeTimeout,omitempty"`
|
||||
// TLS configuration to use when scraping the endpoint
|
||||
TLSConfig *TLSConfig `json:"tlsConfig,omitempty"`
|
||||
// File to read bearer token for scraping targets.
|
||||
BearerTokenFile string `json:"bearerTokenFile,omitempty"`
|
||||
// HonorLabels chooses the metric's labels on collisions with target labels.
|
||||
HonorLabels bool `json:"honorLabels,omitempty"`
|
||||
// BasicAuth allow an endpoint to authenticate over basic authentication
|
||||
// More info: https://prometheus.io/docs/operating/configuration/#endpoints
|
||||
BasicAuth *BasicAuth `json:"basicAuth,omitempty"`
|
||||
// MetricRelabelConfigs to apply to samples before ingestion.
|
||||
MetricRelabelConfigs []*RelabelConfig `json:"metricRelabelings,omitempty"`
|
||||
// RelabelConfigs to apply to samples before ingestion.
|
||||
// More info: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#<relabel_config>
|
||||
RelabelConfigs []*RelabelConfig `json:"relabelings,omitempty"`
|
||||
// ProxyURL eg http://proxyserver:2195 Directs scrapes to proxy through this endpoint.
|
||||
ProxyURL *string `json:"proxyUrl,omitempty"`
|
||||
}
|
||||
|
||||
// BasicAuth allow an endpoint to authenticate over basic authentication
|
||||
// More info: https://prometheus.io/docs/operating/configuration/#endpoints
|
||||
// +k8s:openapi-gen=true
|
||||
type BasicAuth struct {
|
||||
// The secret that contains the username for authenticate
|
||||
Username v1.SecretKeySelector `json:"username,omitempty"`
|
||||
// The secret that contains the password for authenticate
|
||||
Password v1.SecretKeySelector `json:"password,omitempty"`
|
||||
}
|
||||
|
||||
// TLSConfig specifies TLS configuration parameters.
|
||||
// +k8s:openapi-gen=true
|
||||
type TLSConfig struct {
|
||||
// The CA cert to use for the targets.
|
||||
CAFile string `json:"caFile,omitempty"`
|
||||
// The client cert file for the targets.
|
||||
CertFile string `json:"certFile,omitempty"`
|
||||
// The client key file for the targets.
|
||||
KeyFile string `json:"keyFile,omitempty"`
|
||||
// Used to verify the hostname for the targets.
|
||||
ServerName string `json:"serverName,omitempty"`
|
||||
// Disable target certificate validation.
|
||||
InsecureSkipVerify bool `json:"insecureSkipVerify,omitempty"`
|
||||
}
|
||||
|
||||
// ServiceMonitorList is a list of ServiceMonitors.
|
||||
// +k8s:openapi-gen=true
|
||||
type ServiceMonitorList struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
// Standard list metadata
|
||||
// More info: https://github.com/kubernetes/community/blob/master/contributors/devel/api-conventions.md#metadata
|
||||
metav1.ListMeta `json:"metadata,omitempty"`
|
||||
// List of ServiceMonitors
|
||||
Items []*ServiceMonitor `json:"items"`
|
||||
}
|
||||
|
||||
// PrometheusRuleList is a list of PrometheusRules.
|
||||
// +k8s:openapi-gen=true
|
||||
type PrometheusRuleList struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
// Standard list metadata
|
||||
// More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata
|
||||
metav1.ListMeta `json:"metadata,omitempty"`
|
||||
// List of Rules
|
||||
Items []*PrometheusRule `json:"items"`
|
||||
}
|
||||
|
||||
// PrometheusRule defines alerting rules for a Prometheus instance
|
||||
// +k8s:openapi-gen=true
|
||||
type PrometheusRule struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
// Standard object’s metadata. More info:
|
||||
// http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata
|
||||
metav1.ObjectMeta `json:"metadata,omitempty"`
|
||||
// Specification of desired alerting rule definitions for Prometheus.
|
||||
Spec PrometheusRuleSpec `json:"spec"`
|
||||
}
|
||||
|
||||
// PrometheusRuleSpec contains specification parameters for a Rule.
|
||||
// +k8s:openapi-gen=true
|
||||
type PrometheusRuleSpec struct {
|
||||
// Content of Prometheus rule file
|
||||
Groups []RuleGroup `json:"groups,omitempty"`
|
||||
}
|
||||
|
||||
// RuleGroup and Rule are copied instead of vendored because the
|
||||
// upstream Prometheus struct definitions don't have json struct tags.
|
||||
|
||||
// RuleGroup is a list of sequentially evaluated recording and alerting rules.
|
||||
// +k8s:openapi-gen=true
|
||||
type RuleGroup struct {
|
||||
Name string `json:"name"`
|
||||
Interval string `json:"interval,omitempty"`
|
||||
Rules []Rule `json:"rules"`
|
||||
}
|
||||
|
||||
// Rule describes an alerting or recording rule.
|
||||
// +k8s:openapi-gen=true
|
||||
type Rule struct {
|
||||
Record string `json:"record,omitempty"`
|
||||
Alert string `json:"alert,omitempty"`
|
||||
Expr intstr.IntOrString `json:"expr"`
|
||||
For string `json:"for,omitempty"`
|
||||
Labels map[string]string `json:"labels,omitempty"`
|
||||
Annotations map[string]string `json:"annotations,omitempty"`
|
||||
}
|
||||
|
||||
// Alertmanager describes an Alertmanager cluster.
|
||||
// +k8s:openapi-gen=true
|
||||
type Alertmanager struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
// Standard object’s metadata. More info:
|
||||
// https://github.com/kubernetes/community/blob/master/contributors/devel/api-conventions.md#metadata
|
||||
// +k8s:openapi-gen=false
|
||||
metav1.ObjectMeta `json:"metadata,omitempty"`
|
||||
// Specification of the desired behavior of the Alertmanager cluster. More info:
|
||||
// https://github.com/kubernetes/community/blob/master/contributors/devel/api-conventions.md#spec-and-status
|
||||
Spec AlertmanagerSpec `json:"spec"`
|
||||
// Most recent observed status of the Alertmanager cluster. Read-only. Not
|
||||
// included when requesting from the apiserver, only from the Prometheus
|
||||
// Operator API itself. More info:
|
||||
// https://github.com/kubernetes/community/blob/master/contributors/devel/api-conventions.md#spec-and-status
|
||||
Status *AlertmanagerStatus `json:"status,omitempty"`
|
||||
}
|
||||
|
||||
// AlertmanagerSpec is a specification of the desired behavior of the Alertmanager cluster. More info:
|
||||
// https://github.com/kubernetes/community/blob/master/contributors/devel/api-conventions.md#spec-and-status
|
||||
// +k8s:openapi-gen=true
|
||||
type AlertmanagerSpec struct {
|
||||
// Standard object’s metadata. More info:
|
||||
// https://github.com/kubernetes/community/blob/master/contributors/devel/api-conventions.md#metadata
|
||||
// Metadata Labels and Annotations gets propagated to the prometheus pods.
|
||||
PodMetadata *metav1.ObjectMeta `json:"podMetadata,omitempty"`
|
||||
// Version the cluster should be on.
|
||||
Version string `json:"version,omitempty"`
|
||||
// Tag of Alertmanager container image to be deployed. Defaults to the value of `version`.
|
||||
// Version is ignored if Tag is set.
|
||||
Tag string `json:"tag,omitempty"`
|
||||
// SHA of Alertmanager container image to be deployed. Defaults to the value of `version`.
|
||||
// Similar to a tag, but the SHA explicitly deploys an immutable container image.
|
||||
// Version and Tag are ignored if SHA is set.
|
||||
SHA string `json:"sha,omitempty"`
|
||||
// Base image that is used to deploy pods, without tag.
|
||||
BaseImage string `json:"baseImage,omitempty"`
|
||||
// An optional list of references to secrets in the same namespace
|
||||
// to use for pulling prometheus and alertmanager images from registries
|
||||
// see http://kubernetes.io/docs/user-guide/images#specifying-imagepullsecrets-on-a-pod
|
||||
ImagePullSecrets []v1.LocalObjectReference `json:"imagePullSecrets,omitempty"`
|
||||
// Secrets is a list of Secrets in the same namespace as the Alertmanager
|
||||
// object, which shall be mounted into the Alertmanager Pods.
|
||||
// The Secrets are mounted into /etc/alertmanager/secrets/<secret-name>.
|
||||
Secrets []string `json:"secrets,omitempty"`
|
||||
// ConfigMaps is a list of ConfigMaps in the same namespace as the Alertmanager
|
||||
// object, which shall be mounted into the Alertmanager Pods.
|
||||
// The ConfigMaps are mounted into /etc/alertmanager/configmaps/<configmap-name>.
|
||||
ConfigMaps []string `json:"configMaps,omitempty"`
|
||||
// Log level for Alertmanager to be configured with.
|
||||
LogLevel string `json:"logLevel,omitempty"`
|
||||
// Size is the expected size of the alertmanager cluster. The controller will
|
||||
// eventually make the size of the running cluster equal to the expected
|
||||
// size.
|
||||
Replicas *int32 `json:"replicas,omitempty"`
|
||||
// Time duration Alertmanager shall retain data for. Default is '120h',
|
||||
// and must match the regular expression `[0-9]+(ms|s|m|h|d|w|y)` (milliseconds seconds minutes hours days weeks years).
|
||||
Retention string `json:"retention,omitempty"`
|
||||
// Storage is the definition of how storage will be used by the Alertmanager
|
||||
// instances.
|
||||
Storage *StorageSpec `json:"storage,omitempty"`
|
||||
// The external URL the Alertmanager instances will be available under. This is
|
||||
// necessary to generate correct URLs. This is necessary if Alertmanager is not
|
||||
// served from root of a DNS name.
|
||||
ExternalURL string `json:"externalUrl,omitempty"`
|
||||
// The route prefix Alertmanager registers HTTP handlers for. This is useful,
|
||||
// if using ExternalURL and a proxy is rewriting HTTP routes of a request,
|
||||
// and the actual ExternalURL is still true, but the server serves requests
|
||||
// under a different route prefix. For example for use with `kubectl proxy`.
|
||||
RoutePrefix string `json:"routePrefix,omitempty"`
|
||||
// If set to true all actions on the underlaying managed objects are not
|
||||
// goint to be performed, except for delete actions.
|
||||
Paused bool `json:"paused,omitempty"`
|
||||
// Define which Nodes the Pods are scheduled on.
|
||||
NodeSelector map[string]string `json:"nodeSelector,omitempty"`
|
||||
// Define resources requests and limits for single Pods.
|
||||
Resources v1.ResourceRequirements `json:"resources,omitempty"`
|
||||
// If specified, the pod's scheduling constraints.
|
||||
Affinity *v1.Affinity `json:"affinity,omitempty"`
|
||||
// If specified, the pod's tolerations.
|
||||
Tolerations []v1.Toleration `json:"tolerations,omitempty"`
|
||||
// SecurityContext holds pod-level security attributes and common container settings.
|
||||
// This defaults to non root user with uid 1000 and gid 2000.
|
||||
SecurityContext *v1.PodSecurityContext `json:"securityContext,omitempty"`
|
||||
// ServiceAccountName is the name of the ServiceAccount to use to run the
|
||||
// Prometheus Pods.
|
||||
ServiceAccountName string `json:"serviceAccountName,omitempty"`
|
||||
// ListenLocal makes the Alertmanager server listen on loopback, so that it
|
||||
// does not bind against the Pod IP. Note this is only for the Alertmanager
|
||||
// UI, not the gossip communication.
|
||||
ListenLocal bool `json:"listenLocal,omitempty"`
|
||||
// Containers allows injecting additional containers. This is meant to
|
||||
// allow adding an authentication proxy to an Alertmanager pod.
|
||||
Containers []v1.Container `json:"containers,omitempty"`
|
||||
// Priority class assigned to the Pods
|
||||
PriorityClassName string `json:"priorityClassName,omitempty"`
|
||||
// AdditionalPeers allows injecting a set of additional Alertmanagers to peer with to form a highly available cluster.
|
||||
AdditionalPeers []string `json:"additionalPeers,omitempty"`
|
||||
}
|
||||
|
||||
// AlertmanagerList is a list of Alertmanagers.
|
||||
// +k8s:openapi-gen=true
|
||||
type AlertmanagerList struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
// Standard list metadata
|
||||
// More info: https://github.com/kubernetes/community/blob/master/contributors/devel/api-conventions.md#metadata
|
||||
metav1.ListMeta `json:"metadata,omitempty"`
|
||||
// List of Alertmanagers
|
||||
Items []Alertmanager `json:"items"`
|
||||
}
|
||||
|
||||
// AlertmanagerStatus is the most recent observed status of the Alertmanager cluster. Read-only. Not
|
||||
// included when requesting from the apiserver, only from the Prometheus
|
||||
// Operator API itself. More info:
|
||||
// https://github.com/kubernetes/community/blob/master/contributors/devel/api-conventions.md#spec-and-status
|
||||
// +k8s:openapi-gen=true
|
||||
type AlertmanagerStatus struct {
|
||||
// Represents whether any actions on the underlaying managed objects are
|
||||
// being performed. Only delete actions will be performed.
|
||||
Paused bool `json:"paused"`
|
||||
// Total number of non-terminated pods targeted by this Alertmanager
|
||||
// cluster (their labels match the selector).
|
||||
Replicas int32 `json:"replicas"`
|
||||
// Total number of non-terminated pods targeted by this Alertmanager
|
||||
// cluster that have the desired version spec.
|
||||
UpdatedReplicas int32 `json:"updatedReplicas"`
|
||||
// Total number of available pods (ready for at least minReadySeconds)
|
||||
// targeted by this Alertmanager cluster.
|
||||
AvailableReplicas int32 `json:"availableReplicas"`
|
||||
// Total number of unavailable pods targeted by this Alertmanager cluster.
|
||||
UnavailableReplicas int32 `json:"unavailableReplicas"`
|
||||
}
|
||||
|
||||
// NamespaceSelector is a selector for selecting either all namespaces or a
|
||||
// list of namespaces.
|
||||
// +k8s:openapi-gen=true
|
||||
type NamespaceSelector struct {
|
||||
// Boolean describing whether all namespaces are selected in contrast to a
|
||||
// list restricting them.
|
||||
Any bool `json:"any,omitempty"`
|
||||
// List of namespace names.
|
||||
MatchNames []string `json:"matchNames,omitempty"`
|
||||
|
||||
// TODO(fabxc): this should embed metav1.LabelSelector eventually.
|
||||
// Currently the selector is only used for namespaces which require more complex
|
||||
// implementation to support label selections.
|
||||
}
|
||||
|
||||
// DeepCopyObject implements the runtime.Object interface.
|
||||
func (l *Alertmanager) DeepCopyObject() runtime.Object {
|
||||
return l.DeepCopy()
|
||||
}
|
||||
|
||||
// DeepCopyObject implements the runtime.Object interface.
|
||||
func (l *AlertmanagerList) DeepCopyObject() runtime.Object {
|
||||
return l.DeepCopy()
|
||||
}
|
||||
|
||||
// DeepCopyObject implements the runtime.Object interface.
|
||||
func (l *Prometheus) DeepCopyObject() runtime.Object {
|
||||
return l.DeepCopy()
|
||||
}
|
||||
|
||||
// DeepCopyObject implements the runtime.Object interface.
|
||||
func (l *PrometheusList) DeepCopyObject() runtime.Object {
|
||||
return l.DeepCopy()
|
||||
}
|
||||
|
||||
// DeepCopyObject implements the runtime.Object interface.
|
||||
func (l *ServiceMonitor) DeepCopyObject() runtime.Object {
|
||||
return l.DeepCopy()
|
||||
}
|
||||
|
||||
// DeepCopyObject implements the runtime.Object interface.
|
||||
func (l *ServiceMonitorList) DeepCopyObject() runtime.Object {
|
||||
return l.DeepCopy()
|
||||
}
|
||||
|
||||
// DeepCopyObject implements the runtime.Object interface.
|
||||
func (f *PrometheusRule) DeepCopyObject() runtime.Object {
|
||||
return f.DeepCopy()
|
||||
}
|
||||
|
||||
// DeepCopyObject implements the runtime.Object interface.
|
||||
func (l *PrometheusRuleList) DeepCopyObject() runtime.Object {
|
||||
return l.DeepCopy()
|
||||
}
|
1309
vendor/github.com/coreos/prometheus-operator/pkg/client/monitoring/v1/zz_generated.deepcopy.go
generated
vendored
Normal file
1309
vendor/github.com/coreos/prometheus-operator/pkg/client/monitoring/v1/zz_generated.deepcopy.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
70
vendor/github.com/emicklei/go-restful/.gitignore
generated
vendored
Normal file
70
vendor/github.com/emicklei/go-restful/.gitignore
generated
vendored
Normal file
@ -0,0 +1,70 @@
|
||||
# Compiled Object files, Static and Dynamic libs (Shared Objects)
|
||||
*.o
|
||||
*.a
|
||||
*.so
|
||||
|
||||
# Folders
|
||||
_obj
|
||||
_test
|
||||
|
||||
# Architecture specific extensions/prefixes
|
||||
*.[568vq]
|
||||
[568vq].out
|
||||
|
||||
*.cgo1.go
|
||||
*.cgo2.c
|
||||
_cgo_defun.c
|
||||
_cgo_gotypes.go
|
||||
_cgo_export.*
|
||||
|
||||
_testmain.go
|
||||
|
||||
*.exe
|
||||
|
||||
restful.html
|
||||
|
||||
*.out
|
||||
|
||||
tmp.prof
|
||||
|
||||
go-restful.test
|
||||
|
||||
examples/restful-basic-authentication
|
||||
|
||||
examples/restful-encoding-filter
|
||||
|
||||
examples/restful-filters
|
||||
|
||||
examples/restful-hello-world
|
||||
|
||||
examples/restful-resource-functions
|
||||
|
||||
examples/restful-serve-static
|
||||
|
||||
examples/restful-user-service
|
||||
|
||||
*.DS_Store
|
||||
examples/restful-user-resource
|
||||
|
||||
examples/restful-multi-containers
|
||||
|
||||
examples/restful-form-handling
|
||||
|
||||
examples/restful-CORS-filter
|
||||
|
||||
examples/restful-options-filter
|
||||
|
||||
examples/restful-curly-router
|
||||
|
||||
examples/restful-cpuprofiler-service
|
||||
|
||||
examples/restful-pre-post-filters
|
||||
|
||||
curly.prof
|
||||
|
||||
examples/restful-NCSA-logging
|
||||
|
||||
examples/restful-html-template
|
||||
|
||||
s.html
|
||||
restful-path-tail
|
6
vendor/github.com/emicklei/go-restful/.travis.yml
generated
vendored
Normal file
6
vendor/github.com/emicklei/go-restful/.travis.yml
generated
vendored
Normal file
@ -0,0 +1,6 @@
|
||||
language: go
|
||||
|
||||
go:
|
||||
- 1.x
|
||||
|
||||
script: go test -v
|
223
vendor/github.com/emicklei/go-restful/CHANGES.md
generated
vendored
Normal file
223
vendor/github.com/emicklei/go-restful/CHANGES.md
generated
vendored
Normal file
@ -0,0 +1,223 @@
|
||||
Change history of go-restful
|
||||
=
|
||||
2017-02-16
|
||||
- solved issue #304, make operation names unique
|
||||
|
||||
2017-01-30
|
||||
|
||||
[IMPORTANT] For swagger users, change your import statement to:
|
||||
swagger "github.com/emicklei/go-restful-swagger12"
|
||||
|
||||
- moved swagger 1.2 code to go-restful-swagger12
|
||||
- created TAG 2.0.0
|
||||
|
||||
2017-01-27
|
||||
|
||||
- remove defer request body close
|
||||
- expose Dispatch for testing filters and Routefunctions
|
||||
- swagger response model cannot be array
|
||||
- created TAG 1.0.0
|
||||
|
||||
2016-12-22
|
||||
|
||||
- (API change) Remove code related to caching request content. Removes SetCacheReadEntity(doCache bool)
|
||||
|
||||
2016-11-26
|
||||
|
||||
- Default change! now use CurlyRouter (was RouterJSR311)
|
||||
- Default change! no more caching of request content
|
||||
- Default change! do not recover from panics
|
||||
|
||||
2016-09-22
|
||||
|
||||
- fix the DefaultRequestContentType feature
|
||||
|
||||
2016-02-14
|
||||
|
||||
- take the qualify factor of the Accept header mediatype into account when deciding the contentype of the response
|
||||
- add constructors for custom entity accessors for xml and json
|
||||
|
||||
2015-09-27
|
||||
|
||||
- rename new WriteStatusAnd... to WriteHeaderAnd... for consistency
|
||||
|
||||
2015-09-25
|
||||
|
||||
- fixed problem with changing Header after WriteHeader (issue 235)
|
||||
|
||||
2015-09-14
|
||||
|
||||
- changed behavior of WriteHeader (immediate write) and WriteEntity (no status write)
|
||||
- added support for custom EntityReaderWriters.
|
||||
|
||||
2015-08-06
|
||||
|
||||
- add support for reading entities from compressed request content
|
||||
- use sync.Pool for compressors of http response and request body
|
||||
- add Description to Parameter for documentation in Swagger UI
|
||||
|
||||
2015-03-20
|
||||
|
||||
- add configurable logging
|
||||
|
||||
2015-03-18
|
||||
|
||||
- if not specified, the Operation is derived from the Route function
|
||||
|
||||
2015-03-17
|
||||
|
||||
- expose Parameter creation functions
|
||||
- make trace logger an interface
|
||||
- fix OPTIONSFilter
|
||||
- customize rendering of ServiceError
|
||||
- JSR311 router now handles wildcards
|
||||
- add Notes to Route
|
||||
|
||||
2014-11-27
|
||||
|
||||
- (api add) PrettyPrint per response. (as proposed in #167)
|
||||
|
||||
2014-11-12
|
||||
|
||||
- (api add) ApiVersion(.) for documentation in Swagger UI
|
||||
|
||||
2014-11-10
|
||||
|
||||
- (api change) struct fields tagged with "description" show up in Swagger UI
|
||||
|
||||
2014-10-31
|
||||
|
||||
- (api change) ReturnsError -> Returns
|
||||
- (api add) RouteBuilder.Do(aBuilder) for DRY use of RouteBuilder
|
||||
- fix swagger nested structs
|
||||
- sort Swagger response messages by code
|
||||
|
||||
2014-10-23
|
||||
|
||||
- (api add) ReturnsError allows you to document Http codes in swagger
|
||||
- fixed problem with greedy CurlyRouter
|
||||
- (api add) Access-Control-Max-Age in CORS
|
||||
- add tracing functionality (injectable) for debugging purposes
|
||||
- support JSON parse 64bit int
|
||||
- fix empty parameters for swagger
|
||||
- WebServicesUrl is now optional for swagger
|
||||
- fixed duplicate AccessControlAllowOrigin in CORS
|
||||
- (api change) expose ServeMux in container
|
||||
- (api add) added AllowedDomains in CORS
|
||||
- (api add) ParameterNamed for detailed documentation
|
||||
|
||||
2014-04-16
|
||||
|
||||
- (api add) expose constructor of Request for testing.
|
||||
|
||||
2014-06-27
|
||||
|
||||
- (api add) ParameterNamed gives access to a Parameter definition and its data (for further specification).
|
||||
- (api add) SetCacheReadEntity allow scontrol over whether or not the request body is being cached (default true for compatibility reasons).
|
||||
|
||||
2014-07-03
|
||||
|
||||
- (api add) CORS can be configured with a list of allowed domains
|
||||
|
||||
2014-03-12
|
||||
|
||||
- (api add) Route path parameters can use wildcard or regular expressions. (requires CurlyRouter)
|
||||
|
||||
2014-02-26
|
||||
|
||||
- (api add) Request now provides information about the matched Route, see method SelectedRoutePath
|
||||
|
||||
2014-02-17
|
||||
|
||||
- (api change) renamed parameter constants (go-lint checks)
|
||||
|
||||
2014-01-10
|
||||
|
||||
- (api add) support for CloseNotify, see http://golang.org/pkg/net/http/#CloseNotifier
|
||||
|
||||
2014-01-07
|
||||
|
||||
- (api change) Write* methods in Response now return the error or nil.
|
||||
- added example of serving HTML from a Go template.
|
||||
- fixed comparing Allowed headers in CORS (is now case-insensitive)
|
||||
|
||||
2013-11-13
|
||||
|
||||
- (api add) Response knows how many bytes are written to the response body.
|
||||
|
||||
2013-10-29
|
||||
|
||||
- (api add) RecoverHandler(handler RecoverHandleFunction) to change how panic recovery is handled. Default behavior is to log and return a stacktrace. This may be a security issue as it exposes sourcecode information.
|
||||
|
||||
2013-10-04
|
||||
|
||||
- (api add) Response knows what HTTP status has been written
|
||||
- (api add) Request can have attributes (map of string->interface, also called request-scoped variables
|
||||
|
||||
2013-09-12
|
||||
|
||||
- (api change) Router interface simplified
|
||||
- Implemented CurlyRouter, a Router that does not use|allow regular expressions in paths
|
||||
|
||||
2013-08-05
|
||||
- add OPTIONS support
|
||||
- add CORS support
|
||||
|
||||
2013-08-27
|
||||
|
||||
- fixed some reported issues (see github)
|
||||
- (api change) deprecated use of WriteError; use WriteErrorString instead
|
||||
|
||||
2014-04-15
|
||||
|
||||
- (fix) v1.0.1 tag: fix Issue 111: WriteErrorString
|
||||
|
||||
2013-08-08
|
||||
|
||||
- (api add) Added implementation Container: a WebServices collection with its own http.ServeMux allowing multiple endpoints per program. Existing uses of go-restful will register their services to the DefaultContainer.
|
||||
- (api add) the swagger package has be extended to have a UI per container.
|
||||
- if panic is detected then a small stack trace is printed (thanks to runner-mei)
|
||||
- (api add) WriteErrorString to Response
|
||||
|
||||
Important API changes:
|
||||
|
||||
- (api remove) package variable DoNotRecover no longer works ; use restful.DefaultContainer.DoNotRecover(true) instead.
|
||||
- (api remove) package variable EnableContentEncoding no longer works ; use restful.DefaultContainer.EnableContentEncoding(true) instead.
|
||||
|
||||
|
||||
2013-07-06
|
||||
|
||||
- (api add) Added support for response encoding (gzip and deflate(zlib)). This feature is disabled on default (for backwards compatibility). Use restful.EnableContentEncoding = true in your initialization to enable this feature.
|
||||
|
||||
2013-06-19
|
||||
|
||||
- (improve) DoNotRecover option, moved request body closer, improved ReadEntity
|
||||
|
||||
2013-06-03
|
||||
|
||||
- (api change) removed Dispatcher interface, hide PathExpression
|
||||
- changed receiver names of type functions to be more idiomatic Go
|
||||
|
||||
2013-06-02
|
||||
|
||||
- (optimize) Cache the RegExp compilation of Paths.
|
||||
|
||||
2013-05-22
|
||||
|
||||
- (api add) Added support for request/response filter functions
|
||||
|
||||
2013-05-18
|
||||
|
||||
|
||||
- (api add) Added feature to change the default Http Request Dispatch function (travis cline)
|
||||
- (api change) Moved Swagger Webservice to swagger package (see example restful-user)
|
||||
|
||||
[2012-11-14 .. 2013-05-18>
|
||||
|
||||
- See https://github.com/emicklei/go-restful/commits
|
||||
|
||||
2012-11-14
|
||||
|
||||
- Initial commit
|
||||
|
||||
|
22
vendor/github.com/emicklei/go-restful/LICENSE
generated
vendored
Normal file
22
vendor/github.com/emicklei/go-restful/LICENSE
generated
vendored
Normal file
@ -0,0 +1,22 @@
|
||||
Copyright (c) 2012,2013 Ernest Micklei
|
||||
|
||||
MIT License
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining
|
||||
a copy of this software and associated documentation files (the
|
||||
"Software"), to deal in the Software without restriction, including
|
||||
without limitation the rights to use, copy, modify, merge, publish,
|
||||
distribute, sublicense, and/or sell copies of the Software, and to
|
||||
permit persons to whom the Software is furnished to do so, subject to
|
||||
the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be
|
||||
included in all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
||||
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
|
||||
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
|
||||
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
|
||||
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
7
vendor/github.com/emicklei/go-restful/Makefile
generated
vendored
Normal file
7
vendor/github.com/emicklei/go-restful/Makefile
generated
vendored
Normal file
@ -0,0 +1,7 @@
|
||||
all: test
|
||||
|
||||
test:
|
||||
go test -v .
|
||||
|
||||
ex:
|
||||
cd examples && ls *.go | xargs go build -o /tmp/ignore
|
74
vendor/github.com/emicklei/go-restful/README.md
generated
vendored
Normal file
74
vendor/github.com/emicklei/go-restful/README.md
generated
vendored
Normal file
@ -0,0 +1,74 @@
|
||||
go-restful
|
||||
==========
|
||||
package for building REST-style Web Services using Google Go
|
||||
|
||||
[](https://travis-ci.org/emicklei/go-restful)
|
||||
[](https://goreportcard.com/report/github.com/emicklei/go-restful)
|
||||
[](https://godoc.org/github.com/emicklei/go-restful)
|
||||
|
||||
- [Code examples](https://github.com/emicklei/go-restful/tree/master/examples)
|
||||
|
||||
REST asks developers to use HTTP methods explicitly and in a way that's consistent with the protocol definition. This basic REST design principle establishes a one-to-one mapping between create, read, update, and delete (CRUD) operations and HTTP methods. According to this mapping:
|
||||
|
||||
- GET = Retrieve a representation of a resource
|
||||
- POST = Create if you are sending content to the server to create a subordinate of the specified resource collection, using some server-side algorithm.
|
||||
- PUT = Create if you are sending the full content of the specified resource (URI).
|
||||
- PUT = Update if you are updating the full content of the specified resource.
|
||||
- DELETE = Delete if you are requesting the server to delete the resource
|
||||
- PATCH = Update partial content of a resource
|
||||
- OPTIONS = Get information about the communication options for the request URI
|
||||
|
||||
### Example
|
||||
|
||||
```Go
|
||||
ws := new(restful.WebService)
|
||||
ws.
|
||||
Path("/users").
|
||||
Consumes(restful.MIME_XML, restful.MIME_JSON).
|
||||
Produces(restful.MIME_JSON, restful.MIME_XML)
|
||||
|
||||
ws.Route(ws.GET("/{user-id}").To(u.findUser).
|
||||
Doc("get a user").
|
||||
Param(ws.PathParameter("user-id", "identifier of the user").DataType("string")).
|
||||
Writes(User{}))
|
||||
...
|
||||
|
||||
func (u UserResource) findUser(request *restful.Request, response *restful.Response) {
|
||||
id := request.PathParameter("user-id")
|
||||
...
|
||||
}
|
||||
```
|
||||
|
||||
[Full API of a UserResource](https://github.com/emicklei/go-restful/tree/master/examples/restful-user-resource.go)
|
||||
|
||||
### Features
|
||||
|
||||
- Routes for request → function mapping with path parameter (e.g. {id}) support
|
||||
- Configurable router:
|
||||
- (default) Fast routing algorithm that allows static elements, regular expressions and dynamic parameters in the URL path (e.g. /meetings/{id} or /static/{subpath:*}
|
||||
- Routing algorithm after [JSR311](http://jsr311.java.net/nonav/releases/1.1/spec/spec.html) that is implemented using (but does **not** accept) regular expressions
|
||||
- Request API for reading structs from JSON/XML and accesing parameters (path,query,header)
|
||||
- Response API for writing structs to JSON/XML and setting headers
|
||||
- Customizable encoding using EntityReaderWriter registration
|
||||
- Filters for intercepting the request → response flow on Service or Route level
|
||||
- Request-scoped variables using attributes
|
||||
- Containers for WebServices on different HTTP endpoints
|
||||
- Content encoding (gzip,deflate) of request and response payloads
|
||||
- Automatic responses on OPTIONS (using a filter)
|
||||
- Automatic CORS request handling (using a filter)
|
||||
- API declaration for Swagger UI (see [go-restful-swagger12](https://github.com/emicklei/go-restful-swagger12),[go-restful-openapi](https://github.com/emicklei/go-restful-openapi))
|
||||
- Panic recovery to produce HTTP 500, customizable using RecoverHandler(...)
|
||||
- Route errors produce HTTP 404/405/406/415 errors, customizable using ServiceErrorHandler(...)
|
||||
- Configurable (trace) logging
|
||||
- Customizable gzip/deflate readers and writers using CompressorProvider registration
|
||||
|
||||
### Resources
|
||||
|
||||
- [Example posted on blog](http://ernestmicklei.com/2012/11/go-restful-first-working-example/)
|
||||
- [Design explained on blog](http://ernestmicklei.com/2012/11/go-restful-api-design/)
|
||||
- [sourcegraph](https://sourcegraph.com/github.com/emicklei/go-restful)
|
||||
- [showcase: Mora - MongoDB REST Api server](https://github.com/emicklei/mora)
|
||||
|
||||
Type ```git shortlog -s``` for a full list of contributors.
|
||||
|
||||
© 2012 - 2017, http://ernestmicklei.com. MIT License. Contributions are welcome.
|
1
vendor/github.com/emicklei/go-restful/Srcfile
generated
vendored
Normal file
1
vendor/github.com/emicklei/go-restful/Srcfile
generated
vendored
Normal file
@ -0,0 +1 @@
|
||||
{"SkipDirs": ["examples"]}
|
10
vendor/github.com/emicklei/go-restful/bench_test.sh
generated
vendored
Normal file
10
vendor/github.com/emicklei/go-restful/bench_test.sh
generated
vendored
Normal file
@ -0,0 +1,10 @@
|
||||
#go test -run=none -file bench_test.go -test.bench . -cpuprofile=bench_test.out
|
||||
|
||||
go test -c
|
||||
./go-restful.test -test.run=none -test.cpuprofile=tmp.prof -test.bench=BenchmarkMany
|
||||
./go-restful.test -test.run=none -test.cpuprofile=curly.prof -test.bench=BenchmarkManyCurly
|
||||
|
||||
#go tool pprof go-restful.test tmp.prof
|
||||
go tool pprof go-restful.test curly.prof
|
||||
|
||||
|
123
vendor/github.com/emicklei/go-restful/compress.go
generated
vendored
Normal file
123
vendor/github.com/emicklei/go-restful/compress.go
generated
vendored
Normal file
@ -0,0 +1,123 @@
|
||||
package restful
|
||||
|
||||
// Copyright 2013 Ernest Micklei. All rights reserved.
|
||||
// Use of this source code is governed by a license
|
||||
// that can be found in the LICENSE file.
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"compress/gzip"
|
||||
"compress/zlib"
|
||||
"errors"
|
||||
"io"
|
||||
"net"
|
||||
"net/http"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// OBSOLETE : use restful.DefaultContainer.EnableContentEncoding(true) to change this setting.
|
||||
var EnableContentEncoding = false
|
||||
|
||||
// CompressingResponseWriter is a http.ResponseWriter that can perform content encoding (gzip and zlib)
|
||||
type CompressingResponseWriter struct {
|
||||
writer http.ResponseWriter
|
||||
compressor io.WriteCloser
|
||||
encoding string
|
||||
}
|
||||
|
||||
// Header is part of http.ResponseWriter interface
|
||||
func (c *CompressingResponseWriter) Header() http.Header {
|
||||
return c.writer.Header()
|
||||
}
|
||||
|
||||
// WriteHeader is part of http.ResponseWriter interface
|
||||
func (c *CompressingResponseWriter) WriteHeader(status int) {
|
||||
c.writer.WriteHeader(status)
|
||||
}
|
||||
|
||||
// Write is part of http.ResponseWriter interface
|
||||
// It is passed through the compressor
|
||||
func (c *CompressingResponseWriter) Write(bytes []byte) (int, error) {
|
||||
if c.isCompressorClosed() {
|
||||
return -1, errors.New("Compressing error: tried to write data using closed compressor")
|
||||
}
|
||||
return c.compressor.Write(bytes)
|
||||
}
|
||||
|
||||
// CloseNotify is part of http.CloseNotifier interface
|
||||
func (c *CompressingResponseWriter) CloseNotify() <-chan bool {
|
||||
return c.writer.(http.CloseNotifier).CloseNotify()
|
||||
}
|
||||
|
||||
// Close the underlying compressor
|
||||
func (c *CompressingResponseWriter) Close() error {
|
||||
if c.isCompressorClosed() {
|
||||
return errors.New("Compressing error: tried to close already closed compressor")
|
||||
}
|
||||
|
||||
c.compressor.Close()
|
||||
if ENCODING_GZIP == c.encoding {
|
||||
currentCompressorProvider.ReleaseGzipWriter(c.compressor.(*gzip.Writer))
|
||||
}
|
||||
if ENCODING_DEFLATE == c.encoding {
|
||||
currentCompressorProvider.ReleaseZlibWriter(c.compressor.(*zlib.Writer))
|
||||
}
|
||||
// gc hint needed?
|
||||
c.compressor = nil
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *CompressingResponseWriter) isCompressorClosed() bool {
|
||||
return nil == c.compressor
|
||||
}
|
||||
|
||||
// Hijack implements the Hijacker interface
|
||||
// This is especially useful when combining Container.EnabledContentEncoding
|
||||
// in combination with websockets (for instance gorilla/websocket)
|
||||
func (c *CompressingResponseWriter) Hijack() (net.Conn, *bufio.ReadWriter, error) {
|
||||
hijacker, ok := c.writer.(http.Hijacker)
|
||||
if !ok {
|
||||
return nil, nil, errors.New("ResponseWriter doesn't support Hijacker interface")
|
||||
}
|
||||
return hijacker.Hijack()
|
||||
}
|
||||
|
||||
// WantsCompressedResponse reads the Accept-Encoding header to see if and which encoding is requested.
|
||||
func wantsCompressedResponse(httpRequest *http.Request) (bool, string) {
|
||||
header := httpRequest.Header.Get(HEADER_AcceptEncoding)
|
||||
gi := strings.Index(header, ENCODING_GZIP)
|
||||
zi := strings.Index(header, ENCODING_DEFLATE)
|
||||
// use in order of appearance
|
||||
if gi == -1 {
|
||||
return zi != -1, ENCODING_DEFLATE
|
||||
} else if zi == -1 {
|
||||
return gi != -1, ENCODING_GZIP
|
||||
} else {
|
||||
if gi < zi {
|
||||
return true, ENCODING_GZIP
|
||||
}
|
||||
return true, ENCODING_DEFLATE
|
||||
}
|
||||
}
|
||||
|
||||
// NewCompressingResponseWriter create a CompressingResponseWriter for a known encoding = {gzip,deflate}
|
||||
func NewCompressingResponseWriter(httpWriter http.ResponseWriter, encoding string) (*CompressingResponseWriter, error) {
|
||||
httpWriter.Header().Set(HEADER_ContentEncoding, encoding)
|
||||
c := new(CompressingResponseWriter)
|
||||
c.writer = httpWriter
|
||||
var err error
|
||||
if ENCODING_GZIP == encoding {
|
||||
w := currentCompressorProvider.AcquireGzipWriter()
|
||||
w.Reset(httpWriter)
|
||||
c.compressor = w
|
||||
c.encoding = ENCODING_GZIP
|
||||
} else if ENCODING_DEFLATE == encoding {
|
||||
w := currentCompressorProvider.AcquireZlibWriter()
|
||||
w.Reset(httpWriter)
|
||||
c.compressor = w
|
||||
c.encoding = ENCODING_DEFLATE
|
||||
} else {
|
||||
return nil, errors.New("Unknown encoding:" + encoding)
|
||||
}
|
||||
return c, err
|
||||
}
|
103
vendor/github.com/emicklei/go-restful/compressor_cache.go
generated
vendored
Normal file
103
vendor/github.com/emicklei/go-restful/compressor_cache.go
generated
vendored
Normal file
@ -0,0 +1,103 @@
|
||||
package restful
|
||||
|
||||
// Copyright 2015 Ernest Micklei. All rights reserved.
|
||||
// Use of this source code is governed by a license
|
||||
// that can be found in the LICENSE file.
|
||||
|
||||
import (
|
||||
"compress/gzip"
|
||||
"compress/zlib"
|
||||
)
|
||||
|
||||
// BoundedCachedCompressors is a CompressorProvider that uses a cache with a fixed amount
|
||||
// of writers and readers (resources).
|
||||
// If a new resource is acquired and all are in use, it will return a new unmanaged resource.
|
||||
type BoundedCachedCompressors struct {
|
||||
gzipWriters chan *gzip.Writer
|
||||
gzipReaders chan *gzip.Reader
|
||||
zlibWriters chan *zlib.Writer
|
||||
writersCapacity int
|
||||
readersCapacity int
|
||||
}
|
||||
|
||||
// NewBoundedCachedCompressors returns a new, with filled cache, BoundedCachedCompressors.
|
||||
func NewBoundedCachedCompressors(writersCapacity, readersCapacity int) *BoundedCachedCompressors {
|
||||
b := &BoundedCachedCompressors{
|
||||
gzipWriters: make(chan *gzip.Writer, writersCapacity),
|
||||
gzipReaders: make(chan *gzip.Reader, readersCapacity),
|
||||
zlibWriters: make(chan *zlib.Writer, writersCapacity),
|
||||
writersCapacity: writersCapacity,
|
||||
readersCapacity: readersCapacity,
|
||||
}
|
||||
for ix := 0; ix < writersCapacity; ix++ {
|
||||
b.gzipWriters <- newGzipWriter()
|
||||
b.zlibWriters <- newZlibWriter()
|
||||
}
|
||||
for ix := 0; ix < readersCapacity; ix++ {
|
||||
b.gzipReaders <- newGzipReader()
|
||||
}
|
||||
return b
|
||||
}
|
||||
|
||||
// AcquireGzipWriter returns an resettable *gzip.Writer. Needs to be released.
|
||||
func (b *BoundedCachedCompressors) AcquireGzipWriter() *gzip.Writer {
|
||||
var writer *gzip.Writer
|
||||
select {
|
||||
case writer, _ = <-b.gzipWriters:
|
||||
default:
|
||||
// return a new unmanaged one
|
||||
writer = newGzipWriter()
|
||||
}
|
||||
return writer
|
||||
}
|
||||
|
||||
// ReleaseGzipWriter accepts a writer (does not have to be one that was cached)
|
||||
// only when the cache has room for it. It will ignore it otherwise.
|
||||
func (b *BoundedCachedCompressors) ReleaseGzipWriter(w *gzip.Writer) {
|
||||
// forget the unmanaged ones
|
||||
if len(b.gzipWriters) < b.writersCapacity {
|
||||
b.gzipWriters <- w
|
||||
}
|
||||
}
|
||||
|
||||
// AcquireGzipReader returns a *gzip.Reader. Needs to be released.
|
||||
func (b *BoundedCachedCompressors) AcquireGzipReader() *gzip.Reader {
|
||||
var reader *gzip.Reader
|
||||
select {
|
||||
case reader, _ = <-b.gzipReaders:
|
||||
default:
|
||||
// return a new unmanaged one
|
||||
reader = newGzipReader()
|
||||
}
|
||||
return reader
|
||||
}
|
||||
|
||||
// ReleaseGzipReader accepts a reader (does not have to be one that was cached)
|
||||
// only when the cache has room for it. It will ignore it otherwise.
|
||||
func (b *BoundedCachedCompressors) ReleaseGzipReader(r *gzip.Reader) {
|
||||
// forget the unmanaged ones
|
||||
if len(b.gzipReaders) < b.readersCapacity {
|
||||
b.gzipReaders <- r
|
||||
}
|
||||
}
|
||||
|
||||
// AcquireZlibWriter returns an resettable *zlib.Writer. Needs to be released.
|
||||
func (b *BoundedCachedCompressors) AcquireZlibWriter() *zlib.Writer {
|
||||
var writer *zlib.Writer
|
||||
select {
|
||||
case writer, _ = <-b.zlibWriters:
|
||||
default:
|
||||
// return a new unmanaged one
|
||||
writer = newZlibWriter()
|
||||
}
|
||||
return writer
|
||||
}
|
||||
|
||||
// ReleaseZlibWriter accepts a writer (does not have to be one that was cached)
|
||||
// only when the cache has room for it. It will ignore it otherwise.
|
||||
func (b *BoundedCachedCompressors) ReleaseZlibWriter(w *zlib.Writer) {
|
||||
// forget the unmanaged ones
|
||||
if len(b.zlibWriters) < b.writersCapacity {
|
||||
b.zlibWriters <- w
|
||||
}
|
||||
}
|
91
vendor/github.com/emicklei/go-restful/compressor_pools.go
generated
vendored
Normal file
91
vendor/github.com/emicklei/go-restful/compressor_pools.go
generated
vendored
Normal file
@ -0,0 +1,91 @@
|
||||
package restful
|
||||
|
||||
// Copyright 2015 Ernest Micklei. All rights reserved.
|
||||
// Use of this source code is governed by a license
|
||||
// that can be found in the LICENSE file.
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"compress/gzip"
|
||||
"compress/zlib"
|
||||
"sync"
|
||||
)
|
||||
|
||||
// SyncPoolCompessors is a CompressorProvider that use the standard sync.Pool.
|
||||
type SyncPoolCompessors struct {
|
||||
GzipWriterPool *sync.Pool
|
||||
GzipReaderPool *sync.Pool
|
||||
ZlibWriterPool *sync.Pool
|
||||
}
|
||||
|
||||
// NewSyncPoolCompessors returns a new ("empty") SyncPoolCompessors.
|
||||
func NewSyncPoolCompessors() *SyncPoolCompessors {
|
||||
return &SyncPoolCompessors{
|
||||
GzipWriterPool: &sync.Pool{
|
||||
New: func() interface{} { return newGzipWriter() },
|
||||
},
|
||||
GzipReaderPool: &sync.Pool{
|
||||
New: func() interface{} { return newGzipReader() },
|
||||
},
|
||||
ZlibWriterPool: &sync.Pool{
|
||||
New: func() interface{} { return newZlibWriter() },
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func (s *SyncPoolCompessors) AcquireGzipWriter() *gzip.Writer {
|
||||
return s.GzipWriterPool.Get().(*gzip.Writer)
|
||||
}
|
||||
|
||||
func (s *SyncPoolCompessors) ReleaseGzipWriter(w *gzip.Writer) {
|
||||
s.GzipWriterPool.Put(w)
|
||||
}
|
||||
|
||||
func (s *SyncPoolCompessors) AcquireGzipReader() *gzip.Reader {
|
||||
return s.GzipReaderPool.Get().(*gzip.Reader)
|
||||
}
|
||||
|
||||
func (s *SyncPoolCompessors) ReleaseGzipReader(r *gzip.Reader) {
|
||||
s.GzipReaderPool.Put(r)
|
||||
}
|
||||
|
||||
func (s *SyncPoolCompessors) AcquireZlibWriter() *zlib.Writer {
|
||||
return s.ZlibWriterPool.Get().(*zlib.Writer)
|
||||
}
|
||||
|
||||
func (s *SyncPoolCompessors) ReleaseZlibWriter(w *zlib.Writer) {
|
||||
s.ZlibWriterPool.Put(w)
|
||||
}
|
||||
|
||||
func newGzipWriter() *gzip.Writer {
|
||||
// create with an empty bytes writer; it will be replaced before using the gzipWriter
|
||||
writer, err := gzip.NewWriterLevel(new(bytes.Buffer), gzip.BestSpeed)
|
||||
if err != nil {
|
||||
panic(err.Error())
|
||||
}
|
||||
return writer
|
||||
}
|
||||
|
||||
func newGzipReader() *gzip.Reader {
|
||||
// create with an empty reader (but with GZIP header); it will be replaced before using the gzipReader
|
||||
// we can safely use currentCompressProvider because it is set on package initialization.
|
||||
w := currentCompressorProvider.AcquireGzipWriter()
|
||||
defer currentCompressorProvider.ReleaseGzipWriter(w)
|
||||
b := new(bytes.Buffer)
|
||||
w.Reset(b)
|
||||
w.Flush()
|
||||
w.Close()
|
||||
reader, err := gzip.NewReader(bytes.NewReader(b.Bytes()))
|
||||
if err != nil {
|
||||
panic(err.Error())
|
||||
}
|
||||
return reader
|
||||
}
|
||||
|
||||
func newZlibWriter() *zlib.Writer {
|
||||
writer, err := zlib.NewWriterLevel(new(bytes.Buffer), gzip.BestSpeed)
|
||||
if err != nil {
|
||||
panic(err.Error())
|
||||
}
|
||||
return writer
|
||||
}
|
54
vendor/github.com/emicklei/go-restful/compressors.go
generated
vendored
Normal file
54
vendor/github.com/emicklei/go-restful/compressors.go
generated
vendored
Normal file
@ -0,0 +1,54 @@
|
||||
package restful
|
||||
|
||||
// Copyright 2015 Ernest Micklei. All rights reserved.
|
||||
// Use of this source code is governed by a license
|
||||
// that can be found in the LICENSE file.
|
||||
|
||||
import (
|
||||
"compress/gzip"
|
||||
"compress/zlib"
|
||||
)
|
||||
|
||||
// CompressorProvider describes a component that can provider compressors for the std methods.
|
||||
type CompressorProvider interface {
|
||||
// Returns a *gzip.Writer which needs to be released later.
|
||||
// Before using it, call Reset().
|
||||
AcquireGzipWriter() *gzip.Writer
|
||||
|
||||
// Releases an aqcuired *gzip.Writer.
|
||||
ReleaseGzipWriter(w *gzip.Writer)
|
||||
|
||||
// Returns a *gzip.Reader which needs to be released later.
|
||||
AcquireGzipReader() *gzip.Reader
|
||||
|
||||
// Releases an aqcuired *gzip.Reader.
|
||||
ReleaseGzipReader(w *gzip.Reader)
|
||||
|
||||
// Returns a *zlib.Writer which needs to be released later.
|
||||
// Before using it, call Reset().
|
||||
AcquireZlibWriter() *zlib.Writer
|
||||
|
||||
// Releases an aqcuired *zlib.Writer.
|
||||
ReleaseZlibWriter(w *zlib.Writer)
|
||||
}
|
||||
|
||||
// DefaultCompressorProvider is the actual provider of compressors (zlib or gzip).
|
||||
var currentCompressorProvider CompressorProvider
|
||||
|
||||
func init() {
|
||||
currentCompressorProvider = NewSyncPoolCompessors()
|
||||
}
|
||||
|
||||
// CurrentCompressorProvider returns the current CompressorProvider.
|
||||
// It is initialized using a SyncPoolCompessors.
|
||||
func CurrentCompressorProvider() CompressorProvider {
|
||||
return currentCompressorProvider
|
||||
}
|
||||
|
||||
// CompressorProvider sets the actual provider of compressors (zlib or gzip).
|
||||
func SetCompressorProvider(p CompressorProvider) {
|
||||
if p == nil {
|
||||
panic("cannot set compressor provider to nil")
|
||||
}
|
||||
currentCompressorProvider = p
|
||||
}
|
30
vendor/github.com/emicklei/go-restful/constants.go
generated
vendored
Normal file
30
vendor/github.com/emicklei/go-restful/constants.go
generated
vendored
Normal file
@ -0,0 +1,30 @@
|
||||
package restful
|
||||
|
||||
// Copyright 2013 Ernest Micklei. All rights reserved.
|
||||
// Use of this source code is governed by a license
|
||||
// that can be found in the LICENSE file.
|
||||
|
||||
const (
|
||||
MIME_XML = "application/xml" // Accept or Content-Type used in Consumes() and/or Produces()
|
||||
MIME_JSON = "application/json" // Accept or Content-Type used in Consumes() and/or Produces()
|
||||
MIME_OCTET = "application/octet-stream" // If Content-Type is not present in request, use the default
|
||||
|
||||
HEADER_Allow = "Allow"
|
||||
HEADER_Accept = "Accept"
|
||||
HEADER_Origin = "Origin"
|
||||
HEADER_ContentType = "Content-Type"
|
||||
HEADER_LastModified = "Last-Modified"
|
||||
HEADER_AcceptEncoding = "Accept-Encoding"
|
||||
HEADER_ContentEncoding = "Content-Encoding"
|
||||
HEADER_AccessControlExposeHeaders = "Access-Control-Expose-Headers"
|
||||
HEADER_AccessControlRequestMethod = "Access-Control-Request-Method"
|
||||
HEADER_AccessControlRequestHeaders = "Access-Control-Request-Headers"
|
||||
HEADER_AccessControlAllowMethods = "Access-Control-Allow-Methods"
|
||||
HEADER_AccessControlAllowOrigin = "Access-Control-Allow-Origin"
|
||||
HEADER_AccessControlAllowCredentials = "Access-Control-Allow-Credentials"
|
||||
HEADER_AccessControlAllowHeaders = "Access-Control-Allow-Headers"
|
||||
HEADER_AccessControlMaxAge = "Access-Control-Max-Age"
|
||||
|
||||
ENCODING_GZIP = "gzip"
|
||||
ENCODING_DEFLATE = "deflate"
|
||||
)
|
366
vendor/github.com/emicklei/go-restful/container.go
generated
vendored
Normal file
366
vendor/github.com/emicklei/go-restful/container.go
generated
vendored
Normal file
@ -0,0 +1,366 @@
|
||||
package restful
|
||||
|
||||
// Copyright 2013 Ernest Micklei. All rights reserved.
|
||||
// Use of this source code is governed by a license
|
||||
// that can be found in the LICENSE file.
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"os"
|
||||
"runtime"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"github.com/emicklei/go-restful/log"
|
||||
)
|
||||
|
||||
// Container holds a collection of WebServices and a http.ServeMux to dispatch http requests.
|
||||
// The requests are further dispatched to routes of WebServices using a RouteSelector
|
||||
type Container struct {
|
||||
webServicesLock sync.RWMutex
|
||||
webServices []*WebService
|
||||
ServeMux *http.ServeMux
|
||||
isRegisteredOnRoot bool
|
||||
containerFilters []FilterFunction
|
||||
doNotRecover bool // default is true
|
||||
recoverHandleFunc RecoverHandleFunction
|
||||
serviceErrorHandleFunc ServiceErrorHandleFunction
|
||||
router RouteSelector // default is a CurlyRouter (RouterJSR311 is a slower alternative)
|
||||
contentEncodingEnabled bool // default is false
|
||||
}
|
||||
|
||||
// NewContainer creates a new Container using a new ServeMux and default router (CurlyRouter)
|
||||
func NewContainer() *Container {
|
||||
return &Container{
|
||||
webServices: []*WebService{},
|
||||
ServeMux: http.NewServeMux(),
|
||||
isRegisteredOnRoot: false,
|
||||
containerFilters: []FilterFunction{},
|
||||
doNotRecover: true,
|
||||
recoverHandleFunc: logStackOnRecover,
|
||||
serviceErrorHandleFunc: writeServiceError,
|
||||
router: CurlyRouter{},
|
||||
contentEncodingEnabled: false}
|
||||
}
|
||||
|
||||
// RecoverHandleFunction declares functions that can be used to handle a panic situation.
|
||||
// The first argument is what recover() returns. The second must be used to communicate an error response.
|
||||
type RecoverHandleFunction func(interface{}, http.ResponseWriter)
|
||||
|
||||
// RecoverHandler changes the default function (logStackOnRecover) to be called
|
||||
// when a panic is detected. DoNotRecover must be have its default value (=false).
|
||||
func (c *Container) RecoverHandler(handler RecoverHandleFunction) {
|
||||
c.recoverHandleFunc = handler
|
||||
}
|
||||
|
||||
// ServiceErrorHandleFunction declares functions that can be used to handle a service error situation.
|
||||
// The first argument is the service error, the second is the request that resulted in the error and
|
||||
// the third must be used to communicate an error response.
|
||||
type ServiceErrorHandleFunction func(ServiceError, *Request, *Response)
|
||||
|
||||
// ServiceErrorHandler changes the default function (writeServiceError) to be called
|
||||
// when a ServiceError is detected.
|
||||
func (c *Container) ServiceErrorHandler(handler ServiceErrorHandleFunction) {
|
||||
c.serviceErrorHandleFunc = handler
|
||||
}
|
||||
|
||||
// DoNotRecover controls whether panics will be caught to return HTTP 500.
|
||||
// If set to true, Route functions are responsible for handling any error situation.
|
||||
// Default value is true.
|
||||
func (c *Container) DoNotRecover(doNot bool) {
|
||||
c.doNotRecover = doNot
|
||||
}
|
||||
|
||||
// Router changes the default Router (currently CurlyRouter)
|
||||
func (c *Container) Router(aRouter RouteSelector) {
|
||||
c.router = aRouter
|
||||
}
|
||||
|
||||
// EnableContentEncoding (default=false) allows for GZIP or DEFLATE encoding of responses.
|
||||
func (c *Container) EnableContentEncoding(enabled bool) {
|
||||
c.contentEncodingEnabled = enabled
|
||||
}
|
||||
|
||||
// Add a WebService to the Container. It will detect duplicate root paths and exit in that case.
|
||||
func (c *Container) Add(service *WebService) *Container {
|
||||
c.webServicesLock.Lock()
|
||||
defer c.webServicesLock.Unlock()
|
||||
|
||||
// if rootPath was not set then lazy initialize it
|
||||
if len(service.rootPath) == 0 {
|
||||
service.Path("/")
|
||||
}
|
||||
|
||||
// cannot have duplicate root paths
|
||||
for _, each := range c.webServices {
|
||||
if each.RootPath() == service.RootPath() {
|
||||
log.Printf("[restful] WebService with duplicate root path detected:['%v']", each)
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
|
||||
// If not registered on root then add specific mapping
|
||||
if !c.isRegisteredOnRoot {
|
||||
c.isRegisteredOnRoot = c.addHandler(service, c.ServeMux)
|
||||
}
|
||||
c.webServices = append(c.webServices, service)
|
||||
return c
|
||||
}
|
||||
|
||||
// addHandler may set a new HandleFunc for the serveMux
|
||||
// this function must run inside the critical region protected by the webServicesLock.
|
||||
// returns true if the function was registered on root ("/")
|
||||
func (c *Container) addHandler(service *WebService, serveMux *http.ServeMux) bool {
|
||||
pattern := fixedPrefixPath(service.RootPath())
|
||||
// check if root path registration is needed
|
||||
if "/" == pattern || "" == pattern {
|
||||
serveMux.HandleFunc("/", c.dispatch)
|
||||
return true
|
||||
}
|
||||
// detect if registration already exists
|
||||
alreadyMapped := false
|
||||
for _, each := range c.webServices {
|
||||
if each.RootPath() == service.RootPath() {
|
||||
alreadyMapped = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !alreadyMapped {
|
||||
serveMux.HandleFunc(pattern, c.dispatch)
|
||||
if !strings.HasSuffix(pattern, "/") {
|
||||
serveMux.HandleFunc(pattern+"/", c.dispatch)
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (c *Container) Remove(ws *WebService) error {
|
||||
if c.ServeMux == http.DefaultServeMux {
|
||||
errMsg := fmt.Sprintf("[restful] cannot remove a WebService from a Container using the DefaultServeMux: ['%v']", ws)
|
||||
log.Printf(errMsg)
|
||||
return errors.New(errMsg)
|
||||
}
|
||||
c.webServicesLock.Lock()
|
||||
defer c.webServicesLock.Unlock()
|
||||
// build a new ServeMux and re-register all WebServices
|
||||
newServeMux := http.NewServeMux()
|
||||
newServices := []*WebService{}
|
||||
newIsRegisteredOnRoot := false
|
||||
for _, each := range c.webServices {
|
||||
if each.rootPath != ws.rootPath {
|
||||
// If not registered on root then add specific mapping
|
||||
if !newIsRegisteredOnRoot {
|
||||
newIsRegisteredOnRoot = c.addHandler(each, newServeMux)
|
||||
}
|
||||
newServices = append(newServices, each)
|
||||
}
|
||||
}
|
||||
c.webServices, c.ServeMux, c.isRegisteredOnRoot = newServices, newServeMux, newIsRegisteredOnRoot
|
||||
return nil
|
||||
}
|
||||
|
||||
// logStackOnRecover is the default RecoverHandleFunction and is called
|
||||
// when DoNotRecover is false and the recoverHandleFunc is not set for the container.
|
||||
// Default implementation logs the stacktrace and writes the stacktrace on the response.
|
||||
// This may be a security issue as it exposes sourcecode information.
|
||||
func logStackOnRecover(panicReason interface{}, httpWriter http.ResponseWriter) {
|
||||
var buffer bytes.Buffer
|
||||
buffer.WriteString(fmt.Sprintf("[restful] recover from panic situation: - %v\r\n", panicReason))
|
||||
for i := 2; ; i += 1 {
|
||||
_, file, line, ok := runtime.Caller(i)
|
||||
if !ok {
|
||||
break
|
||||
}
|
||||
buffer.WriteString(fmt.Sprintf(" %s:%d\r\n", file, line))
|
||||
}
|
||||
log.Print(buffer.String())
|
||||
httpWriter.WriteHeader(http.StatusInternalServerError)
|
||||
httpWriter.Write(buffer.Bytes())
|
||||
}
|
||||
|
||||
// writeServiceError is the default ServiceErrorHandleFunction and is called
|
||||
// when a ServiceError is returned during route selection. Default implementation
|
||||
// calls resp.WriteErrorString(err.Code, err.Message)
|
||||
func writeServiceError(err ServiceError, req *Request, resp *Response) {
|
||||
resp.WriteErrorString(err.Code, err.Message)
|
||||
}
|
||||
|
||||
// Dispatch the incoming Http Request to a matching WebService.
|
||||
func (c *Container) Dispatch(httpWriter http.ResponseWriter, httpRequest *http.Request) {
|
||||
if httpWriter == nil {
|
||||
panic("httpWriter cannot be nil")
|
||||
}
|
||||
if httpRequest == nil {
|
||||
panic("httpRequest cannot be nil")
|
||||
}
|
||||
c.dispatch(httpWriter, httpRequest)
|
||||
}
|
||||
|
||||
// Dispatch the incoming Http Request to a matching WebService.
|
||||
func (c *Container) dispatch(httpWriter http.ResponseWriter, httpRequest *http.Request) {
|
||||
writer := httpWriter
|
||||
|
||||
// CompressingResponseWriter should be closed after all operations are done
|
||||
defer func() {
|
||||
if compressWriter, ok := writer.(*CompressingResponseWriter); ok {
|
||||
compressWriter.Close()
|
||||
}
|
||||
}()
|
||||
|
||||
// Instal panic recovery unless told otherwise
|
||||
if !c.doNotRecover { // catch all for 500 response
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
c.recoverHandleFunc(r, writer)
|
||||
return
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
// Detect if compression is needed
|
||||
// assume without compression, test for override
|
||||
if c.contentEncodingEnabled {
|
||||
doCompress, encoding := wantsCompressedResponse(httpRequest)
|
||||
if doCompress {
|
||||
var err error
|
||||
writer, err = NewCompressingResponseWriter(httpWriter, encoding)
|
||||
if err != nil {
|
||||
log.Print("[restful] unable to install compressor: ", err)
|
||||
httpWriter.WriteHeader(http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
// Find best match Route ; err is non nil if no match was found
|
||||
var webService *WebService
|
||||
var route *Route
|
||||
var err error
|
||||
func() {
|
||||
c.webServicesLock.RLock()
|
||||
defer c.webServicesLock.RUnlock()
|
||||
webService, route, err = c.router.SelectRoute(
|
||||
c.webServices,
|
||||
httpRequest)
|
||||
}()
|
||||
if err != nil {
|
||||
// a non-200 response has already been written
|
||||
// run container filters anyway ; they should not touch the response...
|
||||
chain := FilterChain{Filters: c.containerFilters, Target: func(req *Request, resp *Response) {
|
||||
switch err.(type) {
|
||||
case ServiceError:
|
||||
ser := err.(ServiceError)
|
||||
c.serviceErrorHandleFunc(ser, req, resp)
|
||||
}
|
||||
// TODO
|
||||
}}
|
||||
chain.ProcessFilter(NewRequest(httpRequest), NewResponse(writer))
|
||||
return
|
||||
}
|
||||
wrappedRequest, wrappedResponse := route.wrapRequestResponse(writer, httpRequest)
|
||||
// pass through filters (if any)
|
||||
if len(c.containerFilters)+len(webService.filters)+len(route.Filters) > 0 {
|
||||
// compose filter chain
|
||||
allFilters := []FilterFunction{}
|
||||
allFilters = append(allFilters, c.containerFilters...)
|
||||
allFilters = append(allFilters, webService.filters...)
|
||||
allFilters = append(allFilters, route.Filters...)
|
||||
chain := FilterChain{Filters: allFilters, Target: func(req *Request, resp *Response) {
|
||||
// handle request by route after passing all filters
|
||||
route.Function(wrappedRequest, wrappedResponse)
|
||||
}}
|
||||
chain.ProcessFilter(wrappedRequest, wrappedResponse)
|
||||
} else {
|
||||
// no filters, handle request by route
|
||||
route.Function(wrappedRequest, wrappedResponse)
|
||||
}
|
||||
}
|
||||
|
||||
// fixedPrefixPath returns the fixed part of the partspec ; it may include template vars {}
|
||||
func fixedPrefixPath(pathspec string) string {
|
||||
varBegin := strings.Index(pathspec, "{")
|
||||
if -1 == varBegin {
|
||||
return pathspec
|
||||
}
|
||||
return pathspec[:varBegin]
|
||||
}
|
||||
|
||||
// ServeHTTP implements net/http.Handler therefore a Container can be a Handler in a http.Server
|
||||
func (c *Container) ServeHTTP(httpwriter http.ResponseWriter, httpRequest *http.Request) {
|
||||
c.ServeMux.ServeHTTP(httpwriter, httpRequest)
|
||||
}
|
||||
|
||||
// Handle registers the handler for the given pattern. If a handler already exists for pattern, Handle panics.
|
||||
func (c *Container) Handle(pattern string, handler http.Handler) {
|
||||
c.ServeMux.Handle(pattern, handler)
|
||||
}
|
||||
|
||||
// HandleWithFilter registers the handler for the given pattern.
|
||||
// Container's filter chain is applied for handler.
|
||||
// If a handler already exists for pattern, HandleWithFilter panics.
|
||||
func (c *Container) HandleWithFilter(pattern string, handler http.Handler) {
|
||||
f := func(httpResponse http.ResponseWriter, httpRequest *http.Request) {
|
||||
if len(c.containerFilters) == 0 {
|
||||
handler.ServeHTTP(httpResponse, httpRequest)
|
||||
return
|
||||
}
|
||||
|
||||
chain := FilterChain{Filters: c.containerFilters, Target: func(req *Request, resp *Response) {
|
||||
handler.ServeHTTP(httpResponse, httpRequest)
|
||||
}}
|
||||
chain.ProcessFilter(NewRequest(httpRequest), NewResponse(httpResponse))
|
||||
}
|
||||
|
||||
c.Handle(pattern, http.HandlerFunc(f))
|
||||
}
|
||||
|
||||
// Filter appends a container FilterFunction. These are called before dispatching
|
||||
// a http.Request to a WebService from the container
|
||||
func (c *Container) Filter(filter FilterFunction) {
|
||||
c.containerFilters = append(c.containerFilters, filter)
|
||||
}
|
||||
|
||||
// RegisteredWebServices returns the collections of added WebServices
|
||||
func (c *Container) RegisteredWebServices() []*WebService {
|
||||
c.webServicesLock.RLock()
|
||||
defer c.webServicesLock.RUnlock()
|
||||
result := make([]*WebService, len(c.webServices))
|
||||
for ix := range c.webServices {
|
||||
result[ix] = c.webServices[ix]
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
// computeAllowedMethods returns a list of HTTP methods that are valid for a Request
|
||||
func (c *Container) computeAllowedMethods(req *Request) []string {
|
||||
// Go through all RegisteredWebServices() and all its Routes to collect the options
|
||||
methods := []string{}
|
||||
requestPath := req.Request.URL.Path
|
||||
for _, ws := range c.RegisteredWebServices() {
|
||||
matches := ws.pathExpr.Matcher.FindStringSubmatch(requestPath)
|
||||
if matches != nil {
|
||||
finalMatch := matches[len(matches)-1]
|
||||
for _, rt := range ws.Routes() {
|
||||
matches := rt.pathExpr.Matcher.FindStringSubmatch(finalMatch)
|
||||
if matches != nil {
|
||||
lastMatch := matches[len(matches)-1]
|
||||
if lastMatch == "" || lastMatch == "/" { // do not include if value is neither empty nor ‘/’.
|
||||
methods = append(methods, rt.Method)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
// methods = append(methods, "OPTIONS") not sure about this
|
||||
return methods
|
||||
}
|
||||
|
||||
// newBasicRequestResponse creates a pair of Request,Response from its http versions.
|
||||
// It is basic because no parameter or (produces) content-type information is given.
|
||||
func newBasicRequestResponse(httpWriter http.ResponseWriter, httpRequest *http.Request) (*Request, *Response) {
|
||||
resp := NewResponse(httpWriter)
|
||||
resp.requestAccept = httpRequest.Header.Get(HEADER_Accept)
|
||||
return NewRequest(httpRequest), resp
|
||||
}
|
202
vendor/github.com/emicklei/go-restful/cors_filter.go
generated
vendored
Normal file
202
vendor/github.com/emicklei/go-restful/cors_filter.go
generated
vendored
Normal file
@ -0,0 +1,202 @@
|
||||
package restful
|
||||
|
||||
// Copyright 2013 Ernest Micklei. All rights reserved.
|
||||
// Use of this source code is governed by a license
|
||||
// that can be found in the LICENSE file.
|
||||
|
||||
import (
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// CrossOriginResourceSharing is used to create a Container Filter that implements CORS.
|
||||
// Cross-origin resource sharing (CORS) is a mechanism that allows JavaScript on a web page
|
||||
// to make XMLHttpRequests to another domain, not the domain the JavaScript originated from.
|
||||
//
|
||||
// http://en.wikipedia.org/wiki/Cross-origin_resource_sharing
|
||||
// http://enable-cors.org/server.html
|
||||
// http://www.html5rocks.com/en/tutorials/cors/#toc-handling-a-not-so-simple-request
|
||||
type CrossOriginResourceSharing struct {
|
||||
ExposeHeaders []string // list of Header names
|
||||
AllowedHeaders []string // list of Header names
|
||||
AllowedDomains []string // list of allowed values for Http Origin. An allowed value can be a regular expression to support subdomain matching. If empty all are allowed.
|
||||
AllowedMethods []string
|
||||
MaxAge int // number of seconds before requiring new Options request
|
||||
CookiesAllowed bool
|
||||
Container *Container
|
||||
|
||||
allowedOriginPatterns []*regexp.Regexp // internal field for origin regexp check.
|
||||
}
|
||||
|
||||
// Filter is a filter function that implements the CORS flow as documented on http://enable-cors.org/server.html
|
||||
// and http://www.html5rocks.com/static/images/cors_server_flowchart.png
|
||||
func (c CrossOriginResourceSharing) Filter(req *Request, resp *Response, chain *FilterChain) {
|
||||
origin := req.Request.Header.Get(HEADER_Origin)
|
||||
if len(origin) == 0 {
|
||||
if trace {
|
||||
traceLogger.Print("no Http header Origin set")
|
||||
}
|
||||
chain.ProcessFilter(req, resp)
|
||||
return
|
||||
}
|
||||
if !c.isOriginAllowed(origin) { // check whether this origin is allowed
|
||||
if trace {
|
||||
traceLogger.Printf("HTTP Origin:%s is not part of %v, neither matches any part of %v", origin, c.AllowedDomains, c.allowedOriginPatterns)
|
||||
}
|
||||
chain.ProcessFilter(req, resp)
|
||||
return
|
||||
}
|
||||
if req.Request.Method != "OPTIONS" {
|
||||
c.doActualRequest(req, resp)
|
||||
chain.ProcessFilter(req, resp)
|
||||
return
|
||||
}
|
||||
if acrm := req.Request.Header.Get(HEADER_AccessControlRequestMethod); acrm != "" {
|
||||
c.doPreflightRequest(req, resp)
|
||||
} else {
|
||||
c.doActualRequest(req, resp)
|
||||
chain.ProcessFilter(req, resp)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
func (c CrossOriginResourceSharing) doActualRequest(req *Request, resp *Response) {
|
||||
c.setOptionsHeaders(req, resp)
|
||||
// continue processing the response
|
||||
}
|
||||
|
||||
func (c *CrossOriginResourceSharing) doPreflightRequest(req *Request, resp *Response) {
|
||||
if len(c.AllowedMethods) == 0 {
|
||||
if c.Container == nil {
|
||||
c.AllowedMethods = DefaultContainer.computeAllowedMethods(req)
|
||||
} else {
|
||||
c.AllowedMethods = c.Container.computeAllowedMethods(req)
|
||||
}
|
||||
}
|
||||
|
||||
acrm := req.Request.Header.Get(HEADER_AccessControlRequestMethod)
|
||||
if !c.isValidAccessControlRequestMethod(acrm, c.AllowedMethods) {
|
||||
if trace {
|
||||
traceLogger.Printf("Http header %s:%s is not in %v",
|
||||
HEADER_AccessControlRequestMethod,
|
||||
acrm,
|
||||
c.AllowedMethods)
|
||||
}
|
||||
return
|
||||
}
|
||||
acrhs := req.Request.Header.Get(HEADER_AccessControlRequestHeaders)
|
||||
if len(acrhs) > 0 {
|
||||
for _, each := range strings.Split(acrhs, ",") {
|
||||
if !c.isValidAccessControlRequestHeader(strings.Trim(each, " ")) {
|
||||
if trace {
|
||||
traceLogger.Printf("Http header %s:%s is not in %v",
|
||||
HEADER_AccessControlRequestHeaders,
|
||||
acrhs,
|
||||
c.AllowedHeaders)
|
||||
}
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
resp.AddHeader(HEADER_AccessControlAllowMethods, strings.Join(c.AllowedMethods, ","))
|
||||
resp.AddHeader(HEADER_AccessControlAllowHeaders, acrhs)
|
||||
c.setOptionsHeaders(req, resp)
|
||||
|
||||
// return http 200 response, no body
|
||||
}
|
||||
|
||||
func (c CrossOriginResourceSharing) setOptionsHeaders(req *Request, resp *Response) {
|
||||
c.checkAndSetExposeHeaders(resp)
|
||||
c.setAllowOriginHeader(req, resp)
|
||||
c.checkAndSetAllowCredentials(resp)
|
||||
if c.MaxAge > 0 {
|
||||
resp.AddHeader(HEADER_AccessControlMaxAge, strconv.Itoa(c.MaxAge))
|
||||
}
|
||||
}
|
||||
|
||||
func (c CrossOriginResourceSharing) isOriginAllowed(origin string) bool {
|
||||
if len(origin) == 0 {
|
||||
return false
|
||||
}
|
||||
if len(c.AllowedDomains) == 0 {
|
||||
return true
|
||||
}
|
||||
|
||||
allowed := false
|
||||
for _, domain := range c.AllowedDomains {
|
||||
if domain == origin {
|
||||
allowed = true
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if !allowed {
|
||||
if len(c.allowedOriginPatterns) == 0 {
|
||||
// compile allowed domains to allowed origin patterns
|
||||
allowedOriginRegexps, err := compileRegexps(c.AllowedDomains)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
c.allowedOriginPatterns = allowedOriginRegexps
|
||||
}
|
||||
|
||||
for _, pattern := range c.allowedOriginPatterns {
|
||||
if allowed = pattern.MatchString(origin); allowed {
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return allowed
|
||||
}
|
||||
|
||||
func (c CrossOriginResourceSharing) setAllowOriginHeader(req *Request, resp *Response) {
|
||||
origin := req.Request.Header.Get(HEADER_Origin)
|
||||
if c.isOriginAllowed(origin) {
|
||||
resp.AddHeader(HEADER_AccessControlAllowOrigin, origin)
|
||||
}
|
||||
}
|
||||
|
||||
func (c CrossOriginResourceSharing) checkAndSetExposeHeaders(resp *Response) {
|
||||
if len(c.ExposeHeaders) > 0 {
|
||||
resp.AddHeader(HEADER_AccessControlExposeHeaders, strings.Join(c.ExposeHeaders, ","))
|
||||
}
|
||||
}
|
||||
|
||||
func (c CrossOriginResourceSharing) checkAndSetAllowCredentials(resp *Response) {
|
||||
if c.CookiesAllowed {
|
||||
resp.AddHeader(HEADER_AccessControlAllowCredentials, "true")
|
||||
}
|
||||
}
|
||||
|
||||
func (c CrossOriginResourceSharing) isValidAccessControlRequestMethod(method string, allowedMethods []string) bool {
|
||||
for _, each := range allowedMethods {
|
||||
if each == method {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (c CrossOriginResourceSharing) isValidAccessControlRequestHeader(header string) bool {
|
||||
for _, each := range c.AllowedHeaders {
|
||||
if strings.ToLower(each) == strings.ToLower(header) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// Take a list of strings and compile them into a list of regular expressions.
|
||||
func compileRegexps(regexpStrings []string) ([]*regexp.Regexp, error) {
|
||||
regexps := []*regexp.Regexp{}
|
||||
for _, regexpStr := range regexpStrings {
|
||||
r, err := regexp.Compile(regexpStr)
|
||||
if err != nil {
|
||||
return regexps, err
|
||||
}
|
||||
regexps = append(regexps, r)
|
||||
}
|
||||
return regexps, nil
|
||||
}
|
2
vendor/github.com/emicklei/go-restful/coverage.sh
generated
vendored
Normal file
2
vendor/github.com/emicklei/go-restful/coverage.sh
generated
vendored
Normal file
@ -0,0 +1,2 @@
|
||||
go test -coverprofile=coverage.out
|
||||
go tool cover -html=coverage.out
|
164
vendor/github.com/emicklei/go-restful/curly.go
generated
vendored
Normal file
164
vendor/github.com/emicklei/go-restful/curly.go
generated
vendored
Normal file
@ -0,0 +1,164 @@
|
||||
package restful
|
||||
|
||||
// Copyright 2013 Ernest Micklei. All rights reserved.
|
||||
// Use of this source code is governed by a license
|
||||
// that can be found in the LICENSE file.
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"regexp"
|
||||
"sort"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// CurlyRouter expects Routes with paths that contain zero or more parameters in curly brackets.
|
||||
type CurlyRouter struct{}
|
||||
|
||||
// SelectRoute is part of the Router interface and returns the best match
|
||||
// for the WebService and its Route for the given Request.
|
||||
func (c CurlyRouter) SelectRoute(
|
||||
webServices []*WebService,
|
||||
httpRequest *http.Request) (selectedService *WebService, selected *Route, err error) {
|
||||
|
||||
requestTokens := tokenizePath(httpRequest.URL.Path)
|
||||
|
||||
detectedService := c.detectWebService(requestTokens, webServices)
|
||||
if detectedService == nil {
|
||||
if trace {
|
||||
traceLogger.Printf("no WebService was found to match URL path:%s\n", httpRequest.URL.Path)
|
||||
}
|
||||
return nil, nil, NewError(http.StatusNotFound, "404: Page Not Found")
|
||||
}
|
||||
candidateRoutes := c.selectRoutes(detectedService, requestTokens)
|
||||
if len(candidateRoutes) == 0 {
|
||||
if trace {
|
||||
traceLogger.Printf("no Route in WebService with path %s was found to match URL path:%s\n", detectedService.rootPath, httpRequest.URL.Path)
|
||||
}
|
||||
return detectedService, nil, NewError(http.StatusNotFound, "404: Page Not Found")
|
||||
}
|
||||
selectedRoute, err := c.detectRoute(candidateRoutes, httpRequest)
|
||||
if selectedRoute == nil {
|
||||
return detectedService, nil, err
|
||||
}
|
||||
return detectedService, selectedRoute, nil
|
||||
}
|
||||
|
||||
// selectRoutes return a collection of Route from a WebService that matches the path tokens from the request.
|
||||
func (c CurlyRouter) selectRoutes(ws *WebService, requestTokens []string) sortableCurlyRoutes {
|
||||
candidates := sortableCurlyRoutes{}
|
||||
for _, each := range ws.routes {
|
||||
matches, paramCount, staticCount := c.matchesRouteByPathTokens(each.pathParts, requestTokens)
|
||||
if matches {
|
||||
candidates.add(curlyRoute{each, paramCount, staticCount}) // TODO make sure Routes() return pointers?
|
||||
}
|
||||
}
|
||||
sort.Sort(sort.Reverse(candidates))
|
||||
return candidates
|
||||
}
|
||||
|
||||
// matchesRouteByPathTokens computes whether it matches, howmany parameters do match and what the number of static path elements are.
|
||||
func (c CurlyRouter) matchesRouteByPathTokens(routeTokens, requestTokens []string) (matches bool, paramCount int, staticCount int) {
|
||||
if len(routeTokens) < len(requestTokens) {
|
||||
// proceed in matching only if last routeToken is wildcard
|
||||
count := len(routeTokens)
|
||||
if count == 0 || !strings.HasSuffix(routeTokens[count-1], "*}") {
|
||||
return false, 0, 0
|
||||
}
|
||||
// proceed
|
||||
}
|
||||
for i, routeToken := range routeTokens {
|
||||
if i == len(requestTokens) {
|
||||
// reached end of request path
|
||||
return false, 0, 0
|
||||
}
|
||||
requestToken := requestTokens[i]
|
||||
if strings.HasPrefix(routeToken, "{") {
|
||||
paramCount++
|
||||
if colon := strings.Index(routeToken, ":"); colon != -1 {
|
||||
// match by regex
|
||||
matchesToken, matchesRemainder := c.regularMatchesPathToken(routeToken, colon, requestToken)
|
||||
if !matchesToken {
|
||||
return false, 0, 0
|
||||
}
|
||||
if matchesRemainder {
|
||||
break
|
||||
}
|
||||
}
|
||||
} else { // no { prefix
|
||||
if requestToken != routeToken {
|
||||
return false, 0, 0
|
||||
}
|
||||
staticCount++
|
||||
}
|
||||
}
|
||||
return true, paramCount, staticCount
|
||||
}
|
||||
|
||||
// regularMatchesPathToken tests whether the regular expression part of routeToken matches the requestToken or all remaining tokens
|
||||
// format routeToken is {someVar:someExpression}, e.g. {zipcode:[\d][\d][\d][\d][A-Z][A-Z]}
|
||||
func (c CurlyRouter) regularMatchesPathToken(routeToken string, colon int, requestToken string) (matchesToken bool, matchesRemainder bool) {
|
||||
regPart := routeToken[colon+1 : len(routeToken)-1]
|
||||
if regPart == "*" {
|
||||
if trace {
|
||||
traceLogger.Printf("wildcard parameter detected in route token %s that matches %s\n", routeToken, requestToken)
|
||||
}
|
||||
return true, true
|
||||
}
|
||||
matched, err := regexp.MatchString(regPart, requestToken)
|
||||
return (matched && err == nil), false
|
||||
}
|
||||
|
||||
var jsr311Router = RouterJSR311{}
|
||||
|
||||
// detectRoute selectes from a list of Route the first match by inspecting both the Accept and Content-Type
|
||||
// headers of the Request. See also RouterJSR311 in jsr311.go
|
||||
func (c CurlyRouter) detectRoute(candidateRoutes sortableCurlyRoutes, httpRequest *http.Request) (*Route, error) {
|
||||
// tracing is done inside detectRoute
|
||||
return jsr311Router.detectRoute(candidateRoutes.routes(), httpRequest)
|
||||
}
|
||||
|
||||
// detectWebService returns the best matching webService given the list of path tokens.
|
||||
// see also computeWebserviceScore
|
||||
func (c CurlyRouter) detectWebService(requestTokens []string, webServices []*WebService) *WebService {
|
||||
var best *WebService
|
||||
score := -1
|
||||
for _, each := range webServices {
|
||||
matches, eachScore := c.computeWebserviceScore(requestTokens, each.pathExpr.tokens)
|
||||
if matches && (eachScore > score) {
|
||||
best = each
|
||||
score = eachScore
|
||||
}
|
||||
}
|
||||
return best
|
||||
}
|
||||
|
||||
// computeWebserviceScore returns whether tokens match and
|
||||
// the weighted score of the longest matching consecutive tokens from the beginning.
|
||||
func (c CurlyRouter) computeWebserviceScore(requestTokens []string, tokens []string) (bool, int) {
|
||||
if len(tokens) > len(requestTokens) {
|
||||
return false, 0
|
||||
}
|
||||
score := 0
|
||||
for i := 0; i < len(tokens); i++ {
|
||||
each := requestTokens[i]
|
||||
other := tokens[i]
|
||||
if len(each) == 0 && len(other) == 0 {
|
||||
score++
|
||||
continue
|
||||
}
|
||||
if len(other) > 0 && strings.HasPrefix(other, "{") {
|
||||
// no empty match
|
||||
if len(each) == 0 {
|
||||
return false, score
|
||||
}
|
||||
score += 1
|
||||
} else {
|
||||
// not a parameter
|
||||
if each != other {
|
||||
return false, score
|
||||
}
|
||||
score += (len(tokens) - i) * 10 //fuzzy
|
||||
}
|
||||
}
|
||||
return true, score
|
||||
}
|
52
vendor/github.com/emicklei/go-restful/curly_route.go
generated
vendored
Normal file
52
vendor/github.com/emicklei/go-restful/curly_route.go
generated
vendored
Normal file
@ -0,0 +1,52 @@
|
||||
package restful
|
||||
|
||||
// Copyright 2013 Ernest Micklei. All rights reserved.
|
||||
// Use of this source code is governed by a license
|
||||
// that can be found in the LICENSE file.
|
||||
|
||||
// curlyRoute exits for sorting Routes by the CurlyRouter based on number of parameters and number of static path elements.
|
||||
type curlyRoute struct {
|
||||
route Route
|
||||
paramCount int
|
||||
staticCount int
|
||||
}
|
||||
|
||||
type sortableCurlyRoutes []curlyRoute
|
||||
|
||||
func (s *sortableCurlyRoutes) add(route curlyRoute) {
|
||||
*s = append(*s, route)
|
||||
}
|
||||
|
||||
func (s sortableCurlyRoutes) routes() (routes []Route) {
|
||||
for _, each := range s {
|
||||
routes = append(routes, each.route) // TODO change return type
|
||||
}
|
||||
return routes
|
||||
}
|
||||
|
||||
func (s sortableCurlyRoutes) Len() int {
|
||||
return len(s)
|
||||
}
|
||||
func (s sortableCurlyRoutes) Swap(i, j int) {
|
||||
s[i], s[j] = s[j], s[i]
|
||||
}
|
||||
func (s sortableCurlyRoutes) Less(i, j int) bool {
|
||||
ci := s[i]
|
||||
cj := s[j]
|
||||
|
||||
// primary key
|
||||
if ci.staticCount < cj.staticCount {
|
||||
return true
|
||||
}
|
||||
if ci.staticCount > cj.staticCount {
|
||||
return false
|
||||
}
|
||||
// secundary key
|
||||
if ci.paramCount < cj.paramCount {
|
||||
return true
|
||||
}
|
||||
if ci.paramCount > cj.paramCount {
|
||||
return false
|
||||
}
|
||||
return ci.route.Path < cj.route.Path
|
||||
}
|
185
vendor/github.com/emicklei/go-restful/doc.go
generated
vendored
Normal file
185
vendor/github.com/emicklei/go-restful/doc.go
generated
vendored
Normal file
@ -0,0 +1,185 @@
|
||||
/*
|
||||
Package restful , a lean package for creating REST-style WebServices without magic.
|
||||
|
||||
WebServices and Routes
|
||||
|
||||
A WebService has a collection of Route objects that dispatch incoming Http Requests to a function calls.
|
||||
Typically, a WebService has a root path (e.g. /users) and defines common MIME types for its routes.
|
||||
WebServices must be added to a container (see below) in order to handler Http requests from a server.
|
||||
|
||||
A Route is defined by a HTTP method, an URL path and (optionally) the MIME types it consumes (Content-Type) and produces (Accept).
|
||||
This package has the logic to find the best matching Route and if found, call its Function.
|
||||
|
||||
ws := new(restful.WebService)
|
||||
ws.
|
||||
Path("/users").
|
||||
Consumes(restful.MIME_JSON, restful.MIME_XML).
|
||||
Produces(restful.MIME_JSON, restful.MIME_XML)
|
||||
|
||||
ws.Route(ws.GET("/{user-id}").To(u.findUser)) // u is a UserResource
|
||||
|
||||
...
|
||||
|
||||
// GET http://localhost:8080/users/1
|
||||
func (u UserResource) findUser(request *restful.Request, response *restful.Response) {
|
||||
id := request.PathParameter("user-id")
|
||||
...
|
||||
}
|
||||
|
||||
The (*Request, *Response) arguments provide functions for reading information from the request and writing information back to the response.
|
||||
|
||||
See the example https://github.com/emicklei/go-restful/blob/master/examples/restful-user-resource.go with a full implementation.
|
||||
|
||||
Regular expression matching Routes
|
||||
|
||||
A Route parameter can be specified using the format "uri/{var[:regexp]}" or the special version "uri/{var:*}" for matching the tail of the path.
|
||||
For example, /persons/{name:[A-Z][A-Z]} can be used to restrict values for the parameter "name" to only contain capital alphabetic characters.
|
||||
Regular expressions must use the standard Go syntax as described in the regexp package. (https://code.google.com/p/re2/wiki/Syntax)
|
||||
This feature requires the use of a CurlyRouter.
|
||||
|
||||
Containers
|
||||
|
||||
A Container holds a collection of WebServices, Filters and a http.ServeMux for multiplexing http requests.
|
||||
Using the statements "restful.Add(...) and restful.Filter(...)" will register WebServices and Filters to the Default Container.
|
||||
The Default container of go-restful uses the http.DefaultServeMux.
|
||||
You can create your own Container and create a new http.Server for that particular container.
|
||||
|
||||
container := restful.NewContainer()
|
||||
server := &http.Server{Addr: ":8081", Handler: container}
|
||||
|
||||
Filters
|
||||
|
||||
A filter dynamically intercepts requests and responses to transform or use the information contained in the requests or responses.
|
||||
You can use filters to perform generic logging, measurement, authentication, redirect, set response headers etc.
|
||||
In the restful package there are three hooks into the request,response flow where filters can be added.
|
||||
Each filter must define a FilterFunction:
|
||||
|
||||
func (req *restful.Request, resp *restful.Response, chain *restful.FilterChain)
|
||||
|
||||
Use the following statement to pass the request,response pair to the next filter or RouteFunction
|
||||
|
||||
chain.ProcessFilter(req, resp)
|
||||
|
||||
Container Filters
|
||||
|
||||
These are processed before any registered WebService.
|
||||
|
||||
// install a (global) filter for the default container (processed before any webservice)
|
||||
restful.Filter(globalLogging)
|
||||
|
||||
WebService Filters
|
||||
|
||||
These are processed before any Route of a WebService.
|
||||
|
||||
// install a webservice filter (processed before any route)
|
||||
ws.Filter(webserviceLogging).Filter(measureTime)
|
||||
|
||||
|
||||
Route Filters
|
||||
|
||||
These are processed before calling the function associated with the Route.
|
||||
|
||||
// install 2 chained route filters (processed before calling findUser)
|
||||
ws.Route(ws.GET("/{user-id}").Filter(routeLogging).Filter(NewCountFilter().routeCounter).To(findUser))
|
||||
|
||||
See the example https://github.com/emicklei/go-restful/blob/master/examples/restful-filters.go with full implementations.
|
||||
|
||||
Response Encoding
|
||||
|
||||
Two encodings are supported: gzip and deflate. To enable this for all responses:
|
||||
|
||||
restful.DefaultContainer.EnableContentEncoding(true)
|
||||
|
||||
If a Http request includes the Accept-Encoding header then the response content will be compressed using the specified encoding.
|
||||
Alternatively, you can create a Filter that performs the encoding and install it per WebService or Route.
|
||||
|
||||
See the example https://github.com/emicklei/go-restful/blob/master/examples/restful-encoding-filter.go
|
||||
|
||||
OPTIONS support
|
||||
|
||||
By installing a pre-defined container filter, your Webservice(s) can respond to the OPTIONS Http request.
|
||||
|
||||
Filter(OPTIONSFilter())
|
||||
|
||||
CORS
|
||||
|
||||
By installing the filter of a CrossOriginResourceSharing (CORS), your WebService(s) can handle CORS requests.
|
||||
|
||||
cors := CrossOriginResourceSharing{ExposeHeaders: []string{"X-My-Header"}, CookiesAllowed: false, Container: DefaultContainer}
|
||||
Filter(cors.Filter)
|
||||
|
||||
Error Handling
|
||||
|
||||
Unexpected things happen. If a request cannot be processed because of a failure, your service needs to tell via the response what happened and why.
|
||||
For this reason HTTP status codes exist and it is important to use the correct code in every exceptional situation.
|
||||
|
||||
400: Bad Request
|
||||
|
||||
If path or query parameters are not valid (content or type) then use http.StatusBadRequest.
|
||||
|
||||
404: Not Found
|
||||
|
||||
Despite a valid URI, the resource requested may not be available
|
||||
|
||||
500: Internal Server Error
|
||||
|
||||
If the application logic could not process the request (or write the response) then use http.StatusInternalServerError.
|
||||
|
||||
405: Method Not Allowed
|
||||
|
||||
The request has a valid URL but the method (GET,PUT,POST,...) is not allowed.
|
||||
|
||||
406: Not Acceptable
|
||||
|
||||
The request does not have or has an unknown Accept Header set for this operation.
|
||||
|
||||
415: Unsupported Media Type
|
||||
|
||||
The request does not have or has an unknown Content-Type Header set for this operation.
|
||||
|
||||
ServiceError
|
||||
|
||||
In addition to setting the correct (error) Http status code, you can choose to write a ServiceError message on the response.
|
||||
|
||||
Performance options
|
||||
|
||||
This package has several options that affect the performance of your service. It is important to understand them and how you can change it.
|
||||
|
||||
restful.DefaultContainer.DoNotRecover(false)
|
||||
|
||||
DoNotRecover controls whether panics will be caught to return HTTP 500.
|
||||
If set to false, the container will recover from panics.
|
||||
Default value is true
|
||||
|
||||
restful.SetCompressorProvider(NewBoundedCachedCompressors(20, 20))
|
||||
|
||||
If content encoding is enabled then the default strategy for getting new gzip/zlib writers and readers is to use a sync.Pool.
|
||||
Because writers are expensive structures, performance is even more improved when using a preloaded cache. You can also inject your own implementation.
|
||||
|
||||
Trouble shooting
|
||||
|
||||
This package has the means to produce detail logging of the complete Http request matching process and filter invocation.
|
||||
Enabling this feature requires you to set an implementation of restful.StdLogger (e.g. log.Logger) instance such as:
|
||||
|
||||
restful.TraceLogger(log.New(os.Stdout, "[restful] ", log.LstdFlags|log.Lshortfile))
|
||||
|
||||
Logging
|
||||
|
||||
The restful.SetLogger() method allows you to override the logger used by the package. By default restful
|
||||
uses the standard library `log` package and logs to stdout. Different logging packages are supported as
|
||||
long as they conform to `StdLogger` interface defined in the `log` sub-package, writing an adapter for your
|
||||
preferred package is simple.
|
||||
|
||||
Resources
|
||||
|
||||
[project]: https://github.com/emicklei/go-restful
|
||||
|
||||
[examples]: https://github.com/emicklei/go-restful/blob/master/examples
|
||||
|
||||
[design]: http://ernestmicklei.com/2012/11/11/go-restful-api-design/
|
||||
|
||||
[showcases]: https://github.com/emicklei/mora, https://github.com/emicklei/landskape
|
||||
|
||||
(c) 2012-2015, http://ernestmicklei.com. MIT License
|
||||
*/
|
||||
package restful
|
163
vendor/github.com/emicklei/go-restful/entity_accessors.go
generated
vendored
Normal file
163
vendor/github.com/emicklei/go-restful/entity_accessors.go
generated
vendored
Normal file
@ -0,0 +1,163 @@
|
||||
package restful
|
||||
|
||||
// Copyright 2015 Ernest Micklei. All rights reserved.
|
||||
// Use of this source code is governed by a license
|
||||
// that can be found in the LICENSE file.
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"encoding/xml"
|
||||
"strings"
|
||||
"sync"
|
||||
)
|
||||
|
||||
// EntityReaderWriter can read and write values using an encoding such as JSON,XML.
|
||||
type EntityReaderWriter interface {
|
||||
// Read a serialized version of the value from the request.
|
||||
// The Request may have a decompressing reader. Depends on Content-Encoding.
|
||||
Read(req *Request, v interface{}) error
|
||||
|
||||
// Write a serialized version of the value on the response.
|
||||
// The Response may have a compressing writer. Depends on Accept-Encoding.
|
||||
// status should be a valid Http Status code
|
||||
Write(resp *Response, status int, v interface{}) error
|
||||
}
|
||||
|
||||
// entityAccessRegistry is a singleton
|
||||
var entityAccessRegistry = &entityReaderWriters{
|
||||
protection: new(sync.RWMutex),
|
||||
accessors: map[string]EntityReaderWriter{},
|
||||
}
|
||||
|
||||
// entityReaderWriters associates MIME to an EntityReaderWriter
|
||||
type entityReaderWriters struct {
|
||||
protection *sync.RWMutex
|
||||
accessors map[string]EntityReaderWriter
|
||||
}
|
||||
|
||||
func init() {
|
||||
RegisterEntityAccessor(MIME_JSON, NewEntityAccessorJSON(MIME_JSON))
|
||||
RegisterEntityAccessor(MIME_XML, NewEntityAccessorXML(MIME_XML))
|
||||
}
|
||||
|
||||
// RegisterEntityAccessor add/overrides the ReaderWriter for encoding content with this MIME type.
|
||||
func RegisterEntityAccessor(mime string, erw EntityReaderWriter) {
|
||||
entityAccessRegistry.protection.Lock()
|
||||
defer entityAccessRegistry.protection.Unlock()
|
||||
entityAccessRegistry.accessors[mime] = erw
|
||||
}
|
||||
|
||||
// NewEntityAccessorJSON returns a new EntityReaderWriter for accessing JSON content.
|
||||
// This package is already initialized with such an accessor using the MIME_JSON contentType.
|
||||
func NewEntityAccessorJSON(contentType string) EntityReaderWriter {
|
||||
return entityJSONAccess{ContentType: contentType}
|
||||
}
|
||||
|
||||
// NewEntityAccessorXML returns a new EntityReaderWriter for accessing XML content.
|
||||
// This package is already initialized with such an accessor using the MIME_XML contentType.
|
||||
func NewEntityAccessorXML(contentType string) EntityReaderWriter {
|
||||
return entityXMLAccess{ContentType: contentType}
|
||||
}
|
||||
|
||||
// accessorAt returns the registered ReaderWriter for this MIME type.
|
||||
func (r *entityReaderWriters) accessorAt(mime string) (EntityReaderWriter, bool) {
|
||||
r.protection.RLock()
|
||||
defer r.protection.RUnlock()
|
||||
er, ok := r.accessors[mime]
|
||||
if !ok {
|
||||
// retry with reverse lookup
|
||||
// more expensive but we are in an exceptional situation anyway
|
||||
for k, v := range r.accessors {
|
||||
if strings.Contains(mime, k) {
|
||||
return v, true
|
||||
}
|
||||
}
|
||||
}
|
||||
return er, ok
|
||||
}
|
||||
|
||||
// entityXMLAccess is a EntityReaderWriter for XML encoding
|
||||
type entityXMLAccess struct {
|
||||
// This is used for setting the Content-Type header when writing
|
||||
ContentType string
|
||||
}
|
||||
|
||||
// Read unmarshalls the value from XML
|
||||
func (e entityXMLAccess) Read(req *Request, v interface{}) error {
|
||||
return xml.NewDecoder(req.Request.Body).Decode(v)
|
||||
}
|
||||
|
||||
// Write marshalls the value to JSON and set the Content-Type Header.
|
||||
func (e entityXMLAccess) Write(resp *Response, status int, v interface{}) error {
|
||||
return writeXML(resp, status, e.ContentType, v)
|
||||
}
|
||||
|
||||
// writeXML marshalls the value to JSON and set the Content-Type Header.
|
||||
func writeXML(resp *Response, status int, contentType string, v interface{}) error {
|
||||
if v == nil {
|
||||
resp.WriteHeader(status)
|
||||
// do not write a nil representation
|
||||
return nil
|
||||
}
|
||||
if resp.prettyPrint {
|
||||
// pretty output must be created and written explicitly
|
||||
output, err := xml.MarshalIndent(v, " ", " ")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
resp.Header().Set(HEADER_ContentType, contentType)
|
||||
resp.WriteHeader(status)
|
||||
_, err = resp.Write([]byte(xml.Header))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_, err = resp.Write(output)
|
||||
return err
|
||||
}
|
||||
// not-so-pretty
|
||||
resp.Header().Set(HEADER_ContentType, contentType)
|
||||
resp.WriteHeader(status)
|
||||
return xml.NewEncoder(resp).Encode(v)
|
||||
}
|
||||
|
||||
// entityJSONAccess is a EntityReaderWriter for JSON encoding
|
||||
type entityJSONAccess struct {
|
||||
// This is used for setting the Content-Type header when writing
|
||||
ContentType string
|
||||
}
|
||||
|
||||
// Read unmarshalls the value from JSON
|
||||
func (e entityJSONAccess) Read(req *Request, v interface{}) error {
|
||||
decoder := json.NewDecoder(req.Request.Body)
|
||||
decoder.UseNumber()
|
||||
return decoder.Decode(v)
|
||||
}
|
||||
|
||||
// Write marshalls the value to JSON and set the Content-Type Header.
|
||||
func (e entityJSONAccess) Write(resp *Response, status int, v interface{}) error {
|
||||
return writeJSON(resp, status, e.ContentType, v)
|
||||
}
|
||||
|
||||
// write marshalls the value to JSON and set the Content-Type Header.
|
||||
func writeJSON(resp *Response, status int, contentType string, v interface{}) error {
|
||||
if v == nil {
|
||||
resp.WriteHeader(status)
|
||||
// do not write a nil representation
|
||||
return nil
|
||||
}
|
||||
if resp.prettyPrint {
|
||||
// pretty output must be created and written explicitly
|
||||
output, err := json.MarshalIndent(v, " ", " ")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
resp.Header().Set(HEADER_ContentType, contentType)
|
||||
resp.WriteHeader(status)
|
||||
_, err = resp.Write(output)
|
||||
return err
|
||||
}
|
||||
// not-so-pretty
|
||||
resp.Header().Set(HEADER_ContentType, contentType)
|
||||
resp.WriteHeader(status)
|
||||
return json.NewEncoder(resp).Encode(v)
|
||||
}
|
35
vendor/github.com/emicklei/go-restful/filter.go
generated
vendored
Normal file
35
vendor/github.com/emicklei/go-restful/filter.go
generated
vendored
Normal file
@ -0,0 +1,35 @@
|
||||
package restful
|
||||
|
||||
// Copyright 2013 Ernest Micklei. All rights reserved.
|
||||
// Use of this source code is governed by a license
|
||||
// that can be found in the LICENSE file.
|
||||
|
||||
// FilterChain is a request scoped object to process one or more filters before calling the target RouteFunction.
|
||||
type FilterChain struct {
|
||||
Filters []FilterFunction // ordered list of FilterFunction
|
||||
Index int // index into filters that is currently in progress
|
||||
Target RouteFunction // function to call after passing all filters
|
||||
}
|
||||
|
||||
// ProcessFilter passes the request,response pair through the next of Filters.
|
||||
// Each filter can decide to proceed to the next Filter or handle the Response itself.
|
||||
func (f *FilterChain) ProcessFilter(request *Request, response *Response) {
|
||||
if f.Index < len(f.Filters) {
|
||||
f.Index++
|
||||
f.Filters[f.Index-1](request, response, f)
|
||||
} else {
|
||||
f.Target(request, response)
|
||||
}
|
||||
}
|
||||
|
||||
// FilterFunction definitions must call ProcessFilter on the FilterChain to pass on the control and eventually call the RouteFunction
|
||||
type FilterFunction func(*Request, *Response, *FilterChain)
|
||||
|
||||
// NoBrowserCacheFilter is a filter function to set HTTP headers that disable browser caching
|
||||
// See examples/restful-no-cache-filter.go for usage
|
||||
func NoBrowserCacheFilter(req *Request, resp *Response, chain *FilterChain) {
|
||||
resp.Header().Set("Cache-Control", "no-cache, no-store, must-revalidate") // HTTP 1.1.
|
||||
resp.Header().Set("Pragma", "no-cache") // HTTP 1.0.
|
||||
resp.Header().Set("Expires", "0") // Proxies.
|
||||
chain.ProcessFilter(req, resp)
|
||||
}
|
248
vendor/github.com/emicklei/go-restful/jsr311.go
generated
vendored
Normal file
248
vendor/github.com/emicklei/go-restful/jsr311.go
generated
vendored
Normal file
@ -0,0 +1,248 @@
|
||||
package restful
|
||||
|
||||
// Copyright 2013 Ernest Micklei. All rights reserved.
|
||||
// Use of this source code is governed by a license
|
||||
// that can be found in the LICENSE file.
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"sort"
|
||||
)
|
||||
|
||||
// RouterJSR311 implements the flow for matching Requests to Routes (and consequently Resource Functions)
|
||||
// as specified by the JSR311 http://jsr311.java.net/nonav/releases/1.1/spec/spec.html.
|
||||
// RouterJSR311 implements the Router interface.
|
||||
// Concept of locators is not implemented.
|
||||
type RouterJSR311 struct{}
|
||||
|
||||
// SelectRoute is part of the Router interface and returns the best match
|
||||
// for the WebService and its Route for the given Request.
|
||||
func (r RouterJSR311) SelectRoute(
|
||||
webServices []*WebService,
|
||||
httpRequest *http.Request) (selectedService *WebService, selectedRoute *Route, err error) {
|
||||
|
||||
// Identify the root resource class (WebService)
|
||||
dispatcher, finalMatch, err := r.detectDispatcher(httpRequest.URL.Path, webServices)
|
||||
if err != nil {
|
||||
return nil, nil, NewError(http.StatusNotFound, "")
|
||||
}
|
||||
// Obtain the set of candidate methods (Routes)
|
||||
routes := r.selectRoutes(dispatcher, finalMatch)
|
||||
if len(routes) == 0 {
|
||||
return dispatcher, nil, NewError(http.StatusNotFound, "404: Page Not Found")
|
||||
}
|
||||
|
||||
// Identify the method (Route) that will handle the request
|
||||
route, ok := r.detectRoute(routes, httpRequest)
|
||||
return dispatcher, route, ok
|
||||
}
|
||||
|
||||
// http://jsr311.java.net/nonav/releases/1.1/spec/spec3.html#x3-360003.7.2
|
||||
func (r RouterJSR311) detectRoute(routes []Route, httpRequest *http.Request) (*Route, error) {
|
||||
// http method
|
||||
methodOk := []Route{}
|
||||
for _, each := range routes {
|
||||
if httpRequest.Method == each.Method {
|
||||
methodOk = append(methodOk, each)
|
||||
}
|
||||
}
|
||||
if len(methodOk) == 0 {
|
||||
if trace {
|
||||
traceLogger.Printf("no Route found (in %d routes) that matches HTTP method %s\n", len(routes), httpRequest.Method)
|
||||
}
|
||||
return nil, NewError(http.StatusMethodNotAllowed, "405: Method Not Allowed")
|
||||
}
|
||||
inputMediaOk := methodOk
|
||||
|
||||
// content-type
|
||||
contentType := httpRequest.Header.Get(HEADER_ContentType)
|
||||
inputMediaOk = []Route{}
|
||||
for _, each := range methodOk {
|
||||
if each.matchesContentType(contentType) {
|
||||
inputMediaOk = append(inputMediaOk, each)
|
||||
}
|
||||
}
|
||||
if len(inputMediaOk) == 0 {
|
||||
if trace {
|
||||
traceLogger.Printf("no Route found (from %d) that matches HTTP Content-Type: %s\n", len(methodOk), contentType)
|
||||
}
|
||||
return nil, NewError(http.StatusUnsupportedMediaType, "415: Unsupported Media Type")
|
||||
}
|
||||
|
||||
// accept
|
||||
outputMediaOk := []Route{}
|
||||
accept := httpRequest.Header.Get(HEADER_Accept)
|
||||
if len(accept) == 0 {
|
||||
accept = "*/*"
|
||||
}
|
||||
for _, each := range inputMediaOk {
|
||||
if each.matchesAccept(accept) {
|
||||
outputMediaOk = append(outputMediaOk, each)
|
||||
}
|
||||
}
|
||||
if len(outputMediaOk) == 0 {
|
||||
if trace {
|
||||
traceLogger.Printf("no Route found (from %d) that matches HTTP Accept: %s\n", len(inputMediaOk), accept)
|
||||
}
|
||||
return nil, NewError(http.StatusNotAcceptable, "406: Not Acceptable")
|
||||
}
|
||||
// return r.bestMatchByMedia(outputMediaOk, contentType, accept), nil
|
||||
return &outputMediaOk[0], nil
|
||||
}
|
||||
|
||||
// http://jsr311.java.net/nonav/releases/1.1/spec/spec3.html#x3-360003.7.2
|
||||
// n/m > n/* > */*
|
||||
func (r RouterJSR311) bestMatchByMedia(routes []Route, contentType string, accept string) *Route {
|
||||
// TODO
|
||||
return &routes[0]
|
||||
}
|
||||
|
||||
// http://jsr311.java.net/nonav/releases/1.1/spec/spec3.html#x3-360003.7.2 (step 2)
|
||||
func (r RouterJSR311) selectRoutes(dispatcher *WebService, pathRemainder string) []Route {
|
||||
filtered := &sortableRouteCandidates{}
|
||||
for _, each := range dispatcher.Routes() {
|
||||
pathExpr := each.pathExpr
|
||||
matches := pathExpr.Matcher.FindStringSubmatch(pathRemainder)
|
||||
if matches != nil {
|
||||
lastMatch := matches[len(matches)-1]
|
||||
if len(lastMatch) == 0 || lastMatch == "/" { // do not include if value is neither empty nor ‘/’.
|
||||
filtered.candidates = append(filtered.candidates,
|
||||
routeCandidate{each, len(matches) - 1, pathExpr.LiteralCount, pathExpr.VarCount})
|
||||
}
|
||||
}
|
||||
}
|
||||
if len(filtered.candidates) == 0 {
|
||||
if trace {
|
||||
traceLogger.Printf("WebService on path %s has no routes that match URL path remainder:%s\n", dispatcher.rootPath, pathRemainder)
|
||||
}
|
||||
return []Route{}
|
||||
}
|
||||
sort.Sort(sort.Reverse(filtered))
|
||||
|
||||
// select other routes from candidates whoes expression matches rmatch
|
||||
matchingRoutes := []Route{filtered.candidates[0].route}
|
||||
for c := 1; c < len(filtered.candidates); c++ {
|
||||
each := filtered.candidates[c]
|
||||
if each.route.pathExpr.Matcher.MatchString(pathRemainder) {
|
||||
matchingRoutes = append(matchingRoutes, each.route)
|
||||
}
|
||||
}
|
||||
return matchingRoutes
|
||||
}
|
||||
|
||||
// http://jsr311.java.net/nonav/releases/1.1/spec/spec3.html#x3-360003.7.2 (step 1)
|
||||
func (r RouterJSR311) detectDispatcher(requestPath string, dispatchers []*WebService) (*WebService, string, error) {
|
||||
filtered := &sortableDispatcherCandidates{}
|
||||
for _, each := range dispatchers {
|
||||
matches := each.pathExpr.Matcher.FindStringSubmatch(requestPath)
|
||||
if matches != nil {
|
||||
filtered.candidates = append(filtered.candidates,
|
||||
dispatcherCandidate{each, matches[len(matches)-1], len(matches), each.pathExpr.LiteralCount, each.pathExpr.VarCount})
|
||||
}
|
||||
}
|
||||
if len(filtered.candidates) == 0 {
|
||||
if trace {
|
||||
traceLogger.Printf("no WebService was found to match URL path:%s\n", requestPath)
|
||||
}
|
||||
return nil, "", errors.New("not found")
|
||||
}
|
||||
sort.Sort(sort.Reverse(filtered))
|
||||
return filtered.candidates[0].dispatcher, filtered.candidates[0].finalMatch, nil
|
||||
}
|
||||
|
||||
// Types and functions to support the sorting of Routes
|
||||
|
||||
type routeCandidate struct {
|
||||
route Route
|
||||
matchesCount int // the number of capturing groups
|
||||
literalCount int // the number of literal characters (means those not resulting from template variable substitution)
|
||||
nonDefaultCount int // the number of capturing groups with non-default regular expressions (i.e. not ‘([^ /]+?)’)
|
||||
}
|
||||
|
||||
func (r routeCandidate) expressionToMatch() string {
|
||||
return r.route.pathExpr.Source
|
||||
}
|
||||
|
||||
func (r routeCandidate) String() string {
|
||||
return fmt.Sprintf("(m=%d,l=%d,n=%d)", r.matchesCount, r.literalCount, r.nonDefaultCount)
|
||||
}
|
||||
|
||||
type sortableRouteCandidates struct {
|
||||
candidates []routeCandidate
|
||||
}
|
||||
|
||||
func (rcs *sortableRouteCandidates) Len() int {
|
||||
return len(rcs.candidates)
|
||||
}
|
||||
func (rcs *sortableRouteCandidates) Swap(i, j int) {
|
||||
rcs.candidates[i], rcs.candidates[j] = rcs.candidates[j], rcs.candidates[i]
|
||||
}
|
||||
func (rcs *sortableRouteCandidates) Less(i, j int) bool {
|
||||
ci := rcs.candidates[i]
|
||||
cj := rcs.candidates[j]
|
||||
// primary key
|
||||
if ci.literalCount < cj.literalCount {
|
||||
return true
|
||||
}
|
||||
if ci.literalCount > cj.literalCount {
|
||||
return false
|
||||
}
|
||||
// secundary key
|
||||
if ci.matchesCount < cj.matchesCount {
|
||||
return true
|
||||
}
|
||||
if ci.matchesCount > cj.matchesCount {
|
||||
return false
|
||||
}
|
||||
// tertiary key
|
||||
if ci.nonDefaultCount < cj.nonDefaultCount {
|
||||
return true
|
||||
}
|
||||
if ci.nonDefaultCount > cj.nonDefaultCount {
|
||||
return false
|
||||
}
|
||||
// quaternary key ("source" is interpreted as Path)
|
||||
return ci.route.Path < cj.route.Path
|
||||
}
|
||||
|
||||
// Types and functions to support the sorting of Dispatchers
|
||||
|
||||
type dispatcherCandidate struct {
|
||||
dispatcher *WebService
|
||||
finalMatch string
|
||||
matchesCount int // the number of capturing groups
|
||||
literalCount int // the number of literal characters (means those not resulting from template variable substitution)
|
||||
nonDefaultCount int // the number of capturing groups with non-default regular expressions (i.e. not ‘([^ /]+?)’)
|
||||
}
|
||||
type sortableDispatcherCandidates struct {
|
||||
candidates []dispatcherCandidate
|
||||
}
|
||||
|
||||
func (dc *sortableDispatcherCandidates) Len() int {
|
||||
return len(dc.candidates)
|
||||
}
|
||||
func (dc *sortableDispatcherCandidates) Swap(i, j int) {
|
||||
dc.candidates[i], dc.candidates[j] = dc.candidates[j], dc.candidates[i]
|
||||
}
|
||||
func (dc *sortableDispatcherCandidates) Less(i, j int) bool {
|
||||
ci := dc.candidates[i]
|
||||
cj := dc.candidates[j]
|
||||
// primary key
|
||||
if ci.matchesCount < cj.matchesCount {
|
||||
return true
|
||||
}
|
||||
if ci.matchesCount > cj.matchesCount {
|
||||
return false
|
||||
}
|
||||
// secundary key
|
||||
if ci.literalCount < cj.literalCount {
|
||||
return true
|
||||
}
|
||||
if ci.literalCount > cj.literalCount {
|
||||
return false
|
||||
}
|
||||
// tertiary key
|
||||
return ci.nonDefaultCount < cj.nonDefaultCount
|
||||
}
|
34
vendor/github.com/emicklei/go-restful/log/log.go
generated
vendored
Normal file
34
vendor/github.com/emicklei/go-restful/log/log.go
generated
vendored
Normal file
@ -0,0 +1,34 @@
|
||||
package log
|
||||
|
||||
import (
|
||||
stdlog "log"
|
||||
"os"
|
||||
)
|
||||
|
||||
// StdLogger corresponds to a minimal subset of the interface satisfied by stdlib log.Logger
|
||||
type StdLogger interface {
|
||||
Print(v ...interface{})
|
||||
Printf(format string, v ...interface{})
|
||||
}
|
||||
|
||||
var Logger StdLogger
|
||||
|
||||
func init() {
|
||||
// default Logger
|
||||
SetLogger(stdlog.New(os.Stderr, "[restful] ", stdlog.LstdFlags|stdlog.Lshortfile))
|
||||
}
|
||||
|
||||
// SetLogger sets the logger for this package
|
||||
func SetLogger(customLogger StdLogger) {
|
||||
Logger = customLogger
|
||||
}
|
||||
|
||||
// Print delegates to the Logger
|
||||
func Print(v ...interface{}) {
|
||||
Logger.Print(v...)
|
||||
}
|
||||
|
||||
// Printf delegates to the Logger
|
||||
func Printf(format string, v ...interface{}) {
|
||||
Logger.Printf(format, v...)
|
||||
}
|
32
vendor/github.com/emicklei/go-restful/logger.go
generated
vendored
Normal file
32
vendor/github.com/emicklei/go-restful/logger.go
generated
vendored
Normal file
@ -0,0 +1,32 @@
|
||||
package restful
|
||||
|
||||
// Copyright 2014 Ernest Micklei. All rights reserved.
|
||||
// Use of this source code is governed by a license
|
||||
// that can be found in the LICENSE file.
|
||||
import (
|
||||
"github.com/emicklei/go-restful/log"
|
||||
)
|
||||
|
||||
var trace bool = false
|
||||
var traceLogger log.StdLogger
|
||||
|
||||
func init() {
|
||||
traceLogger = log.Logger // use the package logger by default
|
||||
}
|
||||
|
||||
// TraceLogger enables detailed logging of Http request matching and filter invocation. Default no logger is set.
|
||||
// You may call EnableTracing() directly to enable trace logging to the package-wide logger.
|
||||
func TraceLogger(logger log.StdLogger) {
|
||||
traceLogger = logger
|
||||
EnableTracing(logger != nil)
|
||||
}
|
||||
|
||||
// expose the setter for the global logger on the top-level package
|
||||
func SetLogger(customLogger log.StdLogger) {
|
||||
log.SetLogger(customLogger)
|
||||
}
|
||||
|
||||
// EnableTracing can be used to Trace logging on and off.
|
||||
func EnableTracing(enabled bool) {
|
||||
trace = enabled
|
||||
}
|
45
vendor/github.com/emicklei/go-restful/mime.go
generated
vendored
Normal file
45
vendor/github.com/emicklei/go-restful/mime.go
generated
vendored
Normal file
@ -0,0 +1,45 @@
|
||||
package restful
|
||||
|
||||
import (
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
type mime struct {
|
||||
media string
|
||||
quality float64
|
||||
}
|
||||
|
||||
// insertMime adds a mime to a list and keeps it sorted by quality.
|
||||
func insertMime(l []mime, e mime) []mime {
|
||||
for i, each := range l {
|
||||
// if current mime has lower quality then insert before
|
||||
if e.quality > each.quality {
|
||||
left := append([]mime{}, l[0:i]...)
|
||||
return append(append(left, e), l[i:]...)
|
||||
}
|
||||
}
|
||||
return append(l, e)
|
||||
}
|
||||
|
||||
// sortedMimes returns a list of mime sorted (desc) by its specified quality.
|
||||
func sortedMimes(accept string) (sorted []mime) {
|
||||
for _, each := range strings.Split(accept, ",") {
|
||||
typeAndQuality := strings.Split(strings.Trim(each, " "), ";")
|
||||
if len(typeAndQuality) == 1 {
|
||||
sorted = insertMime(sorted, mime{typeAndQuality[0], 1.0})
|
||||
} else {
|
||||
// take factor
|
||||
parts := strings.Split(typeAndQuality[1], "=")
|
||||
if len(parts) == 2 {
|
||||
f, err := strconv.ParseFloat(parts[1], 64)
|
||||
if err != nil {
|
||||
traceLogger.Printf("unable to parse quality in %s, %v", each, err)
|
||||
} else {
|
||||
sorted = insertMime(sorted, mime{typeAndQuality[0], f})
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
26
vendor/github.com/emicklei/go-restful/options_filter.go
generated
vendored
Normal file
26
vendor/github.com/emicklei/go-restful/options_filter.go
generated
vendored
Normal file
@ -0,0 +1,26 @@
|
||||
package restful
|
||||
|
||||
import "strings"
|
||||
|
||||
// Copyright 2013 Ernest Micklei. All rights reserved.
|
||||
// Use of this source code is governed by a license
|
||||
// that can be found in the LICENSE file.
|
||||
|
||||
// OPTIONSFilter is a filter function that inspects the Http Request for the OPTIONS method
|
||||
// and provides the response with a set of allowed methods for the request URL Path.
|
||||
// As for any filter, you can also install it for a particular WebService within a Container.
|
||||
// Note: this filter is not needed when using CrossOriginResourceSharing (for CORS).
|
||||
func (c *Container) OPTIONSFilter(req *Request, resp *Response, chain *FilterChain) {
|
||||
if "OPTIONS" != req.Request.Method {
|
||||
chain.ProcessFilter(req, resp)
|
||||
return
|
||||
}
|
||||
resp.AddHeader(HEADER_Allow, strings.Join(c.computeAllowedMethods(req), ","))
|
||||
}
|
||||
|
||||
// OPTIONSFilter is a filter function that inspects the Http Request for the OPTIONS method
|
||||
// and provides the response with a set of allowed methods for the request URL Path.
|
||||
// Note: this filter is not needed when using CrossOriginResourceSharing (for CORS).
|
||||
func OPTIONSFilter() FilterFunction {
|
||||
return DefaultContainer.OPTIONSFilter
|
||||
}
|
114
vendor/github.com/emicklei/go-restful/parameter.go
generated
vendored
Normal file
114
vendor/github.com/emicklei/go-restful/parameter.go
generated
vendored
Normal file
@ -0,0 +1,114 @@
|
||||
package restful
|
||||
|
||||
// Copyright 2013 Ernest Micklei. All rights reserved.
|
||||
// Use of this source code is governed by a license
|
||||
// that can be found in the LICENSE file.
|
||||
|
||||
const (
|
||||
// PathParameterKind = indicator of Request parameter type "path"
|
||||
PathParameterKind = iota
|
||||
|
||||
// QueryParameterKind = indicator of Request parameter type "query"
|
||||
QueryParameterKind
|
||||
|
||||
// BodyParameterKind = indicator of Request parameter type "body"
|
||||
BodyParameterKind
|
||||
|
||||
// HeaderParameterKind = indicator of Request parameter type "header"
|
||||
HeaderParameterKind
|
||||
|
||||
// FormParameterKind = indicator of Request parameter type "form"
|
||||
FormParameterKind
|
||||
)
|
||||
|
||||
// Parameter is for documententing the parameter used in a Http Request
|
||||
// ParameterData kinds are Path,Query and Body
|
||||
type Parameter struct {
|
||||
data *ParameterData
|
||||
}
|
||||
|
||||
// ParameterData represents the state of a Parameter.
|
||||
// It is made public to make it accessible to e.g. the Swagger package.
|
||||
type ParameterData struct {
|
||||
Name, Description, DataType, DataFormat string
|
||||
Kind int
|
||||
Required bool
|
||||
AllowableValues map[string]string
|
||||
AllowMultiple bool
|
||||
DefaultValue string
|
||||
}
|
||||
|
||||
// Data returns the state of the Parameter
|
||||
func (p *Parameter) Data() ParameterData {
|
||||
return *p.data
|
||||
}
|
||||
|
||||
// Kind returns the parameter type indicator (see const for valid values)
|
||||
func (p *Parameter) Kind() int {
|
||||
return p.data.Kind
|
||||
}
|
||||
|
||||
func (p *Parameter) bePath() *Parameter {
|
||||
p.data.Kind = PathParameterKind
|
||||
return p
|
||||
}
|
||||
func (p *Parameter) beQuery() *Parameter {
|
||||
p.data.Kind = QueryParameterKind
|
||||
return p
|
||||
}
|
||||
func (p *Parameter) beBody() *Parameter {
|
||||
p.data.Kind = BodyParameterKind
|
||||
return p
|
||||
}
|
||||
|
||||
func (p *Parameter) beHeader() *Parameter {
|
||||
p.data.Kind = HeaderParameterKind
|
||||
return p
|
||||
}
|
||||
|
||||
func (p *Parameter) beForm() *Parameter {
|
||||
p.data.Kind = FormParameterKind
|
||||
return p
|
||||
}
|
||||
|
||||
// Required sets the required field and returns the receiver
|
||||
func (p *Parameter) Required(required bool) *Parameter {
|
||||
p.data.Required = required
|
||||
return p
|
||||
}
|
||||
|
||||
// AllowMultiple sets the allowMultiple field and returns the receiver
|
||||
func (p *Parameter) AllowMultiple(multiple bool) *Parameter {
|
||||
p.data.AllowMultiple = multiple
|
||||
return p
|
||||
}
|
||||
|
||||
// AllowableValues sets the allowableValues field and returns the receiver
|
||||
func (p *Parameter) AllowableValues(values map[string]string) *Parameter {
|
||||
p.data.AllowableValues = values
|
||||
return p
|
||||
}
|
||||
|
||||
// DataType sets the dataType field and returns the receiver
|
||||
func (p *Parameter) DataType(typeName string) *Parameter {
|
||||
p.data.DataType = typeName
|
||||
return p
|
||||
}
|
||||
|
||||
// DataFormat sets the dataFormat field for Swagger UI
|
||||
func (p *Parameter) DataFormat(formatName string) *Parameter {
|
||||
p.data.DataFormat = formatName
|
||||
return p
|
||||
}
|
||||
|
||||
// DefaultValue sets the default value field and returns the receiver
|
||||
func (p *Parameter) DefaultValue(stringRepresentation string) *Parameter {
|
||||
p.data.DefaultValue = stringRepresentation
|
||||
return p
|
||||
}
|
||||
|
||||
// Description sets the description value field and returns the receiver
|
||||
func (p *Parameter) Description(doc string) *Parameter {
|
||||
p.data.Description = doc
|
||||
return p
|
||||
}
|
69
vendor/github.com/emicklei/go-restful/path_expression.go
generated
vendored
Normal file
69
vendor/github.com/emicklei/go-restful/path_expression.go
generated
vendored
Normal file
@ -0,0 +1,69 @@
|
||||
package restful
|
||||
|
||||
// Copyright 2013 Ernest Micklei. All rights reserved.
|
||||
// Use of this source code is governed by a license
|
||||
// that can be found in the LICENSE file.
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"regexp"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// PathExpression holds a compiled path expression (RegExp) needed to match against
|
||||
// Http request paths and to extract path parameter values.
|
||||
type pathExpression struct {
|
||||
LiteralCount int // the number of literal characters (means those not resulting from template variable substitution)
|
||||
VarCount int // the number of named parameters (enclosed by {}) in the path
|
||||
Matcher *regexp.Regexp
|
||||
Source string // Path as defined by the RouteBuilder
|
||||
tokens []string
|
||||
}
|
||||
|
||||
// NewPathExpression creates a PathExpression from the input URL path.
|
||||
// Returns an error if the path is invalid.
|
||||
func newPathExpression(path string) (*pathExpression, error) {
|
||||
expression, literalCount, varCount, tokens := templateToRegularExpression(path)
|
||||
compiled, err := regexp.Compile(expression)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &pathExpression{literalCount, varCount, compiled, expression, tokens}, nil
|
||||
}
|
||||
|
||||
// http://jsr311.java.net/nonav/releases/1.1/spec/spec3.html#x3-370003.7.3
|
||||
func templateToRegularExpression(template string) (expression string, literalCount int, varCount int, tokens []string) {
|
||||
var buffer bytes.Buffer
|
||||
buffer.WriteString("^")
|
||||
//tokens = strings.Split(template, "/")
|
||||
tokens = tokenizePath(template)
|
||||
for _, each := range tokens {
|
||||
if each == "" {
|
||||
continue
|
||||
}
|
||||
buffer.WriteString("/")
|
||||
if strings.HasPrefix(each, "{") {
|
||||
// check for regular expression in variable
|
||||
colon := strings.Index(each, ":")
|
||||
if colon != -1 {
|
||||
// extract expression
|
||||
paramExpr := strings.TrimSpace(each[colon+1 : len(each)-1])
|
||||
if paramExpr == "*" { // special case
|
||||
buffer.WriteString("(.*)")
|
||||
} else {
|
||||
buffer.WriteString(fmt.Sprintf("(%s)", paramExpr)) // between colon and closing moustache
|
||||
}
|
||||
} else {
|
||||
// plain var
|
||||
buffer.WriteString("([^/]+?)")
|
||||
}
|
||||
varCount += 1
|
||||
} else {
|
||||
literalCount += len(each)
|
||||
encoded := each // TODO URI encode
|
||||
buffer.WriteString(regexp.QuoteMeta(encoded))
|
||||
}
|
||||
}
|
||||
return strings.TrimRight(buffer.String(), "/") + "(/.*)?$", literalCount, varCount, tokens
|
||||
}
|
113
vendor/github.com/emicklei/go-restful/request.go
generated
vendored
Normal file
113
vendor/github.com/emicklei/go-restful/request.go
generated
vendored
Normal file
@ -0,0 +1,113 @@
|
||||
package restful
|
||||
|
||||
// Copyright 2013 Ernest Micklei. All rights reserved.
|
||||
// Use of this source code is governed by a license
|
||||
// that can be found in the LICENSE file.
|
||||
|
||||
import (
|
||||
"compress/zlib"
|
||||
"net/http"
|
||||
)
|
||||
|
||||
var defaultRequestContentType string
|
||||
|
||||
// Request is a wrapper for a http Request that provides convenience methods
|
||||
type Request struct {
|
||||
Request *http.Request
|
||||
pathParameters map[string]string
|
||||
attributes map[string]interface{} // for storing request-scoped values
|
||||
selectedRoutePath string // root path + route path that matched the request, e.g. /meetings/{id}/attendees
|
||||
}
|
||||
|
||||
func NewRequest(httpRequest *http.Request) *Request {
|
||||
return &Request{
|
||||
Request: httpRequest,
|
||||
pathParameters: map[string]string{},
|
||||
attributes: map[string]interface{}{},
|
||||
} // empty parameters, attributes
|
||||
}
|
||||
|
||||
// If ContentType is missing or */* is given then fall back to this type, otherwise
|
||||
// a "Unable to unmarshal content of type:" response is returned.
|
||||
// Valid values are restful.MIME_JSON and restful.MIME_XML
|
||||
// Example:
|
||||
// restful.DefaultRequestContentType(restful.MIME_JSON)
|
||||
func DefaultRequestContentType(mime string) {
|
||||
defaultRequestContentType = mime
|
||||
}
|
||||
|
||||
// PathParameter accesses the Path parameter value by its name
|
||||
func (r *Request) PathParameter(name string) string {
|
||||
return r.pathParameters[name]
|
||||
}
|
||||
|
||||
// PathParameters accesses the Path parameter values
|
||||
func (r *Request) PathParameters() map[string]string {
|
||||
return r.pathParameters
|
||||
}
|
||||
|
||||
// QueryParameter returns the (first) Query parameter value by its name
|
||||
func (r *Request) QueryParameter(name string) string {
|
||||
return r.Request.FormValue(name)
|
||||
}
|
||||
|
||||
// BodyParameter parses the body of the request (once for typically a POST or a PUT) and returns the value of the given name or an error.
|
||||
func (r *Request) BodyParameter(name string) (string, error) {
|
||||
err := r.Request.ParseForm()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return r.Request.PostFormValue(name), nil
|
||||
}
|
||||
|
||||
// HeaderParameter returns the HTTP Header value of a Header name or empty if missing
|
||||
func (r *Request) HeaderParameter(name string) string {
|
||||
return r.Request.Header.Get(name)
|
||||
}
|
||||
|
||||
// ReadEntity checks the Accept header and reads the content into the entityPointer.
|
||||
func (r *Request) ReadEntity(entityPointer interface{}) (err error) {
|
||||
contentType := r.Request.Header.Get(HEADER_ContentType)
|
||||
contentEncoding := r.Request.Header.Get(HEADER_ContentEncoding)
|
||||
|
||||
// check if the request body needs decompression
|
||||
if ENCODING_GZIP == contentEncoding {
|
||||
gzipReader := currentCompressorProvider.AcquireGzipReader()
|
||||
defer currentCompressorProvider.ReleaseGzipReader(gzipReader)
|
||||
gzipReader.Reset(r.Request.Body)
|
||||
r.Request.Body = gzipReader
|
||||
} else if ENCODING_DEFLATE == contentEncoding {
|
||||
zlibReader, err := zlib.NewReader(r.Request.Body)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
r.Request.Body = zlibReader
|
||||
}
|
||||
|
||||
// lookup the EntityReader, use defaultRequestContentType if needed and provided
|
||||
entityReader, ok := entityAccessRegistry.accessorAt(contentType)
|
||||
if !ok {
|
||||
if len(defaultRequestContentType) != 0 {
|
||||
entityReader, ok = entityAccessRegistry.accessorAt(defaultRequestContentType)
|
||||
}
|
||||
if !ok {
|
||||
return NewError(http.StatusBadRequest, "Unable to unmarshal content of type:"+contentType)
|
||||
}
|
||||
}
|
||||
return entityReader.Read(r, entityPointer)
|
||||
}
|
||||
|
||||
// SetAttribute adds or replaces the attribute with the given value.
|
||||
func (r *Request) SetAttribute(name string, value interface{}) {
|
||||
r.attributes[name] = value
|
||||
}
|
||||
|
||||
// Attribute returns the value associated to the given name. Returns nil if absent.
|
||||
func (r Request) Attribute(name string) interface{} {
|
||||
return r.attributes[name]
|
||||
}
|
||||
|
||||
// SelectedRoutePath root path + route path that matched the request, e.g. /meetings/{id}/attendees
|
||||
func (r Request) SelectedRoutePath() string {
|
||||
return r.selectedRoutePath
|
||||
}
|
236
vendor/github.com/emicklei/go-restful/response.go
generated
vendored
Normal file
236
vendor/github.com/emicklei/go-restful/response.go
generated
vendored
Normal file
@ -0,0 +1,236 @@
|
||||
package restful
|
||||
|
||||
// Copyright 2013 Ernest Micklei. All rights reserved.
|
||||
// Use of this source code is governed by a license
|
||||
// that can be found in the LICENSE file.
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"net/http"
|
||||
)
|
||||
|
||||
// DefaultResponseMimeType is DEPRECATED, use DefaultResponseContentType(mime)
|
||||
var DefaultResponseMimeType string
|
||||
|
||||
//PrettyPrintResponses controls the indentation feature of XML and JSON serialization
|
||||
var PrettyPrintResponses = true
|
||||
|
||||
// Response is a wrapper on the actual http ResponseWriter
|
||||
// It provides several convenience methods to prepare and write response content.
|
||||
type Response struct {
|
||||
http.ResponseWriter
|
||||
requestAccept string // mime-type what the Http Request says it wants to receive
|
||||
routeProduces []string // mime-types what the Route says it can produce
|
||||
statusCode int // HTTP status code that has been written explicity (if zero then net/http has written 200)
|
||||
contentLength int // number of bytes written for the response body
|
||||
prettyPrint bool // controls the indentation feature of XML and JSON serialization. It is initialized using var PrettyPrintResponses.
|
||||
err error // err property is kept when WriteError is called
|
||||
}
|
||||
|
||||
// NewResponse creates a new response based on a http ResponseWriter.
|
||||
func NewResponse(httpWriter http.ResponseWriter) *Response {
|
||||
return &Response{httpWriter, "", []string{}, http.StatusOK, 0, PrettyPrintResponses, nil} // empty content-types
|
||||
}
|
||||
|
||||
// DefaultResponseContentType set a default.
|
||||
// If Accept header matching fails, fall back to this type.
|
||||
// Valid values are restful.MIME_JSON and restful.MIME_XML
|
||||
// Example:
|
||||
// restful.DefaultResponseContentType(restful.MIME_JSON)
|
||||
func DefaultResponseContentType(mime string) {
|
||||
DefaultResponseMimeType = mime
|
||||
}
|
||||
|
||||
// InternalServerError writes the StatusInternalServerError header.
|
||||
// DEPRECATED, use WriteErrorString(http.StatusInternalServerError,reason)
|
||||
func (r Response) InternalServerError() Response {
|
||||
r.WriteHeader(http.StatusInternalServerError)
|
||||
return r
|
||||
}
|
||||
|
||||
// PrettyPrint changes whether this response must produce pretty (line-by-line, indented) JSON or XML output.
|
||||
func (r *Response) PrettyPrint(bePretty bool) {
|
||||
r.prettyPrint = bePretty
|
||||
}
|
||||
|
||||
// AddHeader is a shortcut for .Header().Add(header,value)
|
||||
func (r Response) AddHeader(header string, value string) Response {
|
||||
r.Header().Add(header, value)
|
||||
return r
|
||||
}
|
||||
|
||||
// SetRequestAccepts tells the response what Mime-type(s) the HTTP request said it wants to accept. Exposed for testing.
|
||||
func (r *Response) SetRequestAccepts(mime string) {
|
||||
r.requestAccept = mime
|
||||
}
|
||||
|
||||
// EntityWriter returns the registered EntityWriter that the entity (requested resource)
|
||||
// can write according to what the request wants (Accept) and what the Route can produce or what the restful defaults say.
|
||||
// If called before WriteEntity and WriteHeader then a false return value can be used to write a 406: Not Acceptable.
|
||||
func (r *Response) EntityWriter() (EntityReaderWriter, bool) {
|
||||
sorted := sortedMimes(r.requestAccept)
|
||||
for _, eachAccept := range sorted {
|
||||
for _, eachProduce := range r.routeProduces {
|
||||
if eachProduce == eachAccept.media {
|
||||
if w, ok := entityAccessRegistry.accessorAt(eachAccept.media); ok {
|
||||
return w, true
|
||||
}
|
||||
}
|
||||
}
|
||||
if eachAccept.media == "*/*" {
|
||||
for _, each := range r.routeProduces {
|
||||
if w, ok := entityAccessRegistry.accessorAt(each); ok {
|
||||
return w, true
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
// if requestAccept is empty
|
||||
writer, ok := entityAccessRegistry.accessorAt(r.requestAccept)
|
||||
if !ok {
|
||||
// if not registered then fallback to the defaults (if set)
|
||||
if DefaultResponseMimeType == MIME_JSON {
|
||||
return entityAccessRegistry.accessorAt(MIME_JSON)
|
||||
}
|
||||
if DefaultResponseMimeType == MIME_XML {
|
||||
return entityAccessRegistry.accessorAt(MIME_XML)
|
||||
}
|
||||
// Fallback to whatever the route says it can produce.
|
||||
// https://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html
|
||||
for _, each := range r.routeProduces {
|
||||
if w, ok := entityAccessRegistry.accessorAt(each); ok {
|
||||
return w, true
|
||||
}
|
||||
}
|
||||
if trace {
|
||||
traceLogger.Printf("no registered EntityReaderWriter found for %s", r.requestAccept)
|
||||
}
|
||||
}
|
||||
return writer, ok
|
||||
}
|
||||
|
||||
// WriteEntity calls WriteHeaderAndEntity with Http Status OK (200)
|
||||
func (r *Response) WriteEntity(value interface{}) error {
|
||||
return r.WriteHeaderAndEntity(http.StatusOK, value)
|
||||
}
|
||||
|
||||
// WriteHeaderAndEntity marshals the value using the representation denoted by the Accept Header and the registered EntityWriters.
|
||||
// If no Accept header is specified (or */*) then respond with the Content-Type as specified by the first in the Route.Produces.
|
||||
// If an Accept header is specified then respond with the Content-Type as specified by the first in the Route.Produces that is matched with the Accept header.
|
||||
// If the value is nil then no response is send except for the Http status. You may want to call WriteHeader(http.StatusNotFound) instead.
|
||||
// If there is no writer available that can represent the value in the requested MIME type then Http Status NotAcceptable is written.
|
||||
// Current implementation ignores any q-parameters in the Accept Header.
|
||||
// Returns an error if the value could not be written on the response.
|
||||
func (r *Response) WriteHeaderAndEntity(status int, value interface{}) error {
|
||||
writer, ok := r.EntityWriter()
|
||||
if !ok {
|
||||
r.WriteHeader(http.StatusNotAcceptable)
|
||||
return nil
|
||||
}
|
||||
return writer.Write(r, status, value)
|
||||
}
|
||||
|
||||
// WriteAsXml is a convenience method for writing a value in xml (requires Xml tags on the value)
|
||||
// It uses the standard encoding/xml package for marshalling the value ; not using a registered EntityReaderWriter.
|
||||
func (r *Response) WriteAsXml(value interface{}) error {
|
||||
return writeXML(r, http.StatusOK, MIME_XML, value)
|
||||
}
|
||||
|
||||
// WriteHeaderAndXml is a convenience method for writing a status and value in xml (requires Xml tags on the value)
|
||||
// It uses the standard encoding/xml package for marshalling the value ; not using a registered EntityReaderWriter.
|
||||
func (r *Response) WriteHeaderAndXml(status int, value interface{}) error {
|
||||
return writeXML(r, status, MIME_XML, value)
|
||||
}
|
||||
|
||||
// WriteAsJson is a convenience method for writing a value in json.
|
||||
// It uses the standard encoding/json package for marshalling the value ; not using a registered EntityReaderWriter.
|
||||
func (r *Response) WriteAsJson(value interface{}) error {
|
||||
return writeJSON(r, http.StatusOK, MIME_JSON, value)
|
||||
}
|
||||
|
||||
// WriteJson is a convenience method for writing a value in Json with a given Content-Type.
|
||||
// It uses the standard encoding/json package for marshalling the value ; not using a registered EntityReaderWriter.
|
||||
func (r *Response) WriteJson(value interface{}, contentType string) error {
|
||||
return writeJSON(r, http.StatusOK, contentType, value)
|
||||
}
|
||||
|
||||
// WriteHeaderAndJson is a convenience method for writing the status and a value in Json with a given Content-Type.
|
||||
// It uses the standard encoding/json package for marshalling the value ; not using a registered EntityReaderWriter.
|
||||
func (r *Response) WriteHeaderAndJson(status int, value interface{}, contentType string) error {
|
||||
return writeJSON(r, status, contentType, value)
|
||||
}
|
||||
|
||||
// WriteError write the http status and the error string on the response.
|
||||
func (r *Response) WriteError(httpStatus int, err error) error {
|
||||
r.err = err
|
||||
return r.WriteErrorString(httpStatus, err.Error())
|
||||
}
|
||||
|
||||
// WriteServiceError is a convenience method for a responding with a status and a ServiceError
|
||||
func (r *Response) WriteServiceError(httpStatus int, err ServiceError) error {
|
||||
r.err = err
|
||||
return r.WriteHeaderAndEntity(httpStatus, err)
|
||||
}
|
||||
|
||||
// WriteErrorString is a convenience method for an error status with the actual error
|
||||
func (r *Response) WriteErrorString(httpStatus int, errorReason string) error {
|
||||
if r.err == nil {
|
||||
// if not called from WriteError
|
||||
r.err = errors.New(errorReason)
|
||||
}
|
||||
r.WriteHeader(httpStatus)
|
||||
if _, err := r.Write([]byte(errorReason)); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Flush implements http.Flusher interface, which sends any buffered data to the client.
|
||||
func (r *Response) Flush() {
|
||||
if f, ok := r.ResponseWriter.(http.Flusher); ok {
|
||||
f.Flush()
|
||||
} else if trace {
|
||||
traceLogger.Printf("ResponseWriter %v doesn't support Flush", r)
|
||||
}
|
||||
}
|
||||
|
||||
// WriteHeader is overridden to remember the Status Code that has been written.
|
||||
// Changes to the Header of the response have no effect after this.
|
||||
func (r *Response) WriteHeader(httpStatus int) {
|
||||
r.statusCode = httpStatus
|
||||
r.ResponseWriter.WriteHeader(httpStatus)
|
||||
}
|
||||
|
||||
// StatusCode returns the code that has been written using WriteHeader.
|
||||
func (r Response) StatusCode() int {
|
||||
if 0 == r.statusCode {
|
||||
// no status code has been written yet; assume OK
|
||||
return http.StatusOK
|
||||
}
|
||||
return r.statusCode
|
||||
}
|
||||
|
||||
// Write writes the data to the connection as part of an HTTP reply.
|
||||
// Write is part of http.ResponseWriter interface.
|
||||
func (r *Response) Write(bytes []byte) (int, error) {
|
||||
written, err := r.ResponseWriter.Write(bytes)
|
||||
r.contentLength += written
|
||||
return written, err
|
||||
}
|
||||
|
||||
// ContentLength returns the number of bytes written for the response content.
|
||||
// Note that this value is only correct if all data is written through the Response using its Write* methods.
|
||||
// Data written directly using the underlying http.ResponseWriter is not accounted for.
|
||||
func (r Response) ContentLength() int {
|
||||
return r.contentLength
|
||||
}
|
||||
|
||||
// CloseNotify is part of http.CloseNotifier interface
|
||||
func (r Response) CloseNotify() <-chan bool {
|
||||
return r.ResponseWriter.(http.CloseNotifier).CloseNotify()
|
||||
}
|
||||
|
||||
// Error returns the err created by WriteError
|
||||
func (r Response) Error() error {
|
||||
return r.err
|
||||
}
|
186
vendor/github.com/emicklei/go-restful/route.go
generated
vendored
Normal file
186
vendor/github.com/emicklei/go-restful/route.go
generated
vendored
Normal file
@ -0,0 +1,186 @@
|
||||
package restful
|
||||
|
||||
// Copyright 2013 Ernest Micklei. All rights reserved.
|
||||
// Use of this source code is governed by a license
|
||||
// that can be found in the LICENSE file.
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"net/http"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// RouteFunction declares the signature of a function that can be bound to a Route.
|
||||
type RouteFunction func(*Request, *Response)
|
||||
|
||||
// Route binds a HTTP Method,Path,Consumes combination to a RouteFunction.
|
||||
type Route struct {
|
||||
Method string
|
||||
Produces []string
|
||||
Consumes []string
|
||||
Path string // webservice root path + described path
|
||||
Function RouteFunction
|
||||
Filters []FilterFunction
|
||||
|
||||
// cached values for dispatching
|
||||
relativePath string
|
||||
pathParts []string
|
||||
pathExpr *pathExpression // cached compilation of relativePath as RegExp
|
||||
|
||||
// documentation
|
||||
Doc string
|
||||
Notes string
|
||||
Operation string
|
||||
ParameterDocs []*Parameter
|
||||
ResponseErrors map[int]ResponseError
|
||||
ReadSample, WriteSample interface{} // structs that model an example request or response payload
|
||||
|
||||
// Extra information used to store custom information about the route.
|
||||
Metadata map[string]interface{}
|
||||
}
|
||||
|
||||
// Initialize for Route
|
||||
func (r *Route) postBuild() {
|
||||
r.pathParts = tokenizePath(r.Path)
|
||||
}
|
||||
|
||||
// Create Request and Response from their http versions
|
||||
func (r *Route) wrapRequestResponse(httpWriter http.ResponseWriter, httpRequest *http.Request) (*Request, *Response) {
|
||||
params := r.extractParameters(httpRequest.URL.Path)
|
||||
wrappedRequest := NewRequest(httpRequest)
|
||||
wrappedRequest.pathParameters = params
|
||||
wrappedRequest.selectedRoutePath = r.Path
|
||||
wrappedResponse := NewResponse(httpWriter)
|
||||
wrappedResponse.requestAccept = httpRequest.Header.Get(HEADER_Accept)
|
||||
wrappedResponse.routeProduces = r.Produces
|
||||
return wrappedRequest, wrappedResponse
|
||||
}
|
||||
|
||||
// dispatchWithFilters call the function after passing through its own filters
|
||||
func (r *Route) dispatchWithFilters(wrappedRequest *Request, wrappedResponse *Response) {
|
||||
if len(r.Filters) > 0 {
|
||||
chain := FilterChain{Filters: r.Filters, Target: r.Function}
|
||||
chain.ProcessFilter(wrappedRequest, wrappedResponse)
|
||||
} else {
|
||||
// unfiltered
|
||||
r.Function(wrappedRequest, wrappedResponse)
|
||||
}
|
||||
}
|
||||
|
||||
// Return whether the mimeType matches to what this Route can produce.
|
||||
func (r Route) matchesAccept(mimeTypesWithQuality string) bool {
|
||||
parts := strings.Split(mimeTypesWithQuality, ",")
|
||||
for _, each := range parts {
|
||||
var withoutQuality string
|
||||
if strings.Contains(each, ";") {
|
||||
withoutQuality = strings.Split(each, ";")[0]
|
||||
} else {
|
||||
withoutQuality = each
|
||||
}
|
||||
// trim before compare
|
||||
withoutQuality = strings.Trim(withoutQuality, " ")
|
||||
if withoutQuality == "*/*" {
|
||||
return true
|
||||
}
|
||||
for _, producibleType := range r.Produces {
|
||||
if producibleType == "*/*" || producibleType == withoutQuality {
|
||||
return true
|
||||
}
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// Return whether this Route can consume content with a type specified by mimeTypes (can be empty).
|
||||
func (r Route) matchesContentType(mimeTypes string) bool {
|
||||
|
||||
if len(r.Consumes) == 0 {
|
||||
// did not specify what it can consume ; any media type (“*/*”) is assumed
|
||||
return true
|
||||
}
|
||||
|
||||
if len(mimeTypes) == 0 {
|
||||
// idempotent methods with (most-likely or garanteed) empty content match missing Content-Type
|
||||
m := r.Method
|
||||
if m == "GET" || m == "HEAD" || m == "OPTIONS" || m == "DELETE" || m == "TRACE" {
|
||||
return true
|
||||
}
|
||||
// proceed with default
|
||||
mimeTypes = MIME_OCTET
|
||||
}
|
||||
|
||||
parts := strings.Split(mimeTypes, ",")
|
||||
for _, each := range parts {
|
||||
var contentType string
|
||||
if strings.Contains(each, ";") {
|
||||
contentType = strings.Split(each, ";")[0]
|
||||
} else {
|
||||
contentType = each
|
||||
}
|
||||
// trim before compare
|
||||
contentType = strings.Trim(contentType, " ")
|
||||
for _, consumeableType := range r.Consumes {
|
||||
if consumeableType == "*/*" || consumeableType == contentType {
|
||||
return true
|
||||
}
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// Extract the parameters from the request url path
|
||||
func (r Route) extractParameters(urlPath string) map[string]string {
|
||||
urlParts := tokenizePath(urlPath)
|
||||
pathParameters := map[string]string{}
|
||||
for i, key := range r.pathParts {
|
||||
var value string
|
||||
if i >= len(urlParts) {
|
||||
value = ""
|
||||
} else {
|
||||
value = urlParts[i]
|
||||
}
|
||||
if strings.HasPrefix(key, "{") { // path-parameter
|
||||
if colon := strings.Index(key, ":"); colon != -1 {
|
||||
// extract by regex
|
||||
regPart := key[colon+1 : len(key)-1]
|
||||
keyPart := key[1:colon]
|
||||
if regPart == "*" {
|
||||
pathParameters[keyPart] = untokenizePath(i, urlParts)
|
||||
break
|
||||
} else {
|
||||
pathParameters[keyPart] = value
|
||||
}
|
||||
} else {
|
||||
// without enclosing {}
|
||||
pathParameters[key[1:len(key)-1]] = value
|
||||
}
|
||||
}
|
||||
}
|
||||
return pathParameters
|
||||
}
|
||||
|
||||
// Untokenize back into an URL path using the slash separator
|
||||
func untokenizePath(offset int, parts []string) string {
|
||||
var buffer bytes.Buffer
|
||||
for p := offset; p < len(parts); p++ {
|
||||
buffer.WriteString(parts[p])
|
||||
// do not end
|
||||
if p < len(parts)-1 {
|
||||
buffer.WriteString("/")
|
||||
}
|
||||
}
|
||||
return buffer.String()
|
||||
}
|
||||
|
||||
// Tokenize an URL path using the slash separator ; the result does not have empty tokens
|
||||
func tokenizePath(path string) []string {
|
||||
if "/" == path {
|
||||
return []string{}
|
||||
}
|
||||
return strings.Split(strings.Trim(path, "/"), "/")
|
||||
}
|
||||
|
||||
// for debugging
|
||||
func (r Route) String() string {
|
||||
return r.Method + " " + r.Path
|
||||
}
|
293
vendor/github.com/emicklei/go-restful/route_builder.go
generated
vendored
Normal file
293
vendor/github.com/emicklei/go-restful/route_builder.go
generated
vendored
Normal file
@ -0,0 +1,293 @@
|
||||
package restful
|
||||
|
||||
// Copyright 2013 Ernest Micklei. All rights reserved.
|
||||
// Use of this source code is governed by a license
|
||||
// that can be found in the LICENSE file.
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"reflect"
|
||||
"runtime"
|
||||
"strings"
|
||||
"sync/atomic"
|
||||
|
||||
"github.com/emicklei/go-restful/log"
|
||||
)
|
||||
|
||||
// RouteBuilder is a helper to construct Routes.
|
||||
type RouteBuilder struct {
|
||||
rootPath string
|
||||
currentPath string
|
||||
produces []string
|
||||
consumes []string
|
||||
httpMethod string // required
|
||||
function RouteFunction // required
|
||||
filters []FilterFunction
|
||||
|
||||
typeNameHandleFunc TypeNameHandleFunction // required
|
||||
|
||||
// documentation
|
||||
doc string
|
||||
notes string
|
||||
operation string
|
||||
readSample, writeSample interface{}
|
||||
parameters []*Parameter
|
||||
errorMap map[int]ResponseError
|
||||
metadata map[string]interface{}
|
||||
}
|
||||
|
||||
// Do evaluates each argument with the RouteBuilder itself.
|
||||
// This allows you to follow DRY principles without breaking the fluent programming style.
|
||||
// Example:
|
||||
// ws.Route(ws.DELETE("/{name}").To(t.deletePerson).Do(Returns200, Returns500))
|
||||
//
|
||||
// func Returns500(b *RouteBuilder) {
|
||||
// b.Returns(500, "Internal Server Error", restful.ServiceError{})
|
||||
// }
|
||||
func (b *RouteBuilder) Do(oneArgBlocks ...func(*RouteBuilder)) *RouteBuilder {
|
||||
for _, each := range oneArgBlocks {
|
||||
each(b)
|
||||
}
|
||||
return b
|
||||
}
|
||||
|
||||
// To bind the route to a function.
|
||||
// If this route is matched with the incoming Http Request then call this function with the *Request,*Response pair. Required.
|
||||
func (b *RouteBuilder) To(function RouteFunction) *RouteBuilder {
|
||||
b.function = function
|
||||
return b
|
||||
}
|
||||
|
||||
// Method specifies what HTTP method to match. Required.
|
||||
func (b *RouteBuilder) Method(method string) *RouteBuilder {
|
||||
b.httpMethod = method
|
||||
return b
|
||||
}
|
||||
|
||||
// Produces specifies what MIME types can be produced ; the matched one will appear in the Content-Type Http header.
|
||||
func (b *RouteBuilder) Produces(mimeTypes ...string) *RouteBuilder {
|
||||
b.produces = mimeTypes
|
||||
return b
|
||||
}
|
||||
|
||||
// Consumes specifies what MIME types can be consumes ; the Accept Http header must matched any of these
|
||||
func (b *RouteBuilder) Consumes(mimeTypes ...string) *RouteBuilder {
|
||||
b.consumes = mimeTypes
|
||||
return b
|
||||
}
|
||||
|
||||
// Path specifies the relative (w.r.t WebService root path) URL path to match. Default is "/".
|
||||
func (b *RouteBuilder) Path(subPath string) *RouteBuilder {
|
||||
b.currentPath = subPath
|
||||
return b
|
||||
}
|
||||
|
||||
// Doc tells what this route is all about. Optional.
|
||||
func (b *RouteBuilder) Doc(documentation string) *RouteBuilder {
|
||||
b.doc = documentation
|
||||
return b
|
||||
}
|
||||
|
||||
// A verbose explanation of the operation behavior. Optional.
|
||||
func (b *RouteBuilder) Notes(notes string) *RouteBuilder {
|
||||
b.notes = notes
|
||||
return b
|
||||
}
|
||||
|
||||
// Reads tells what resource type will be read from the request payload. Optional.
|
||||
// A parameter of type "body" is added ,required is set to true and the dataType is set to the qualified name of the sample's type.
|
||||
func (b *RouteBuilder) Reads(sample interface{}) *RouteBuilder {
|
||||
fn := b.typeNameHandleFunc
|
||||
if fn == nil {
|
||||
fn = reflectTypeName
|
||||
}
|
||||
typeAsName := fn(sample)
|
||||
|
||||
b.readSample = sample
|
||||
bodyParameter := &Parameter{&ParameterData{Name: "body"}}
|
||||
bodyParameter.beBody()
|
||||
bodyParameter.Required(true)
|
||||
bodyParameter.DataType(typeAsName)
|
||||
b.Param(bodyParameter)
|
||||
return b
|
||||
}
|
||||
|
||||
// ParameterNamed returns a Parameter already known to the RouteBuilder. Returns nil if not.
|
||||
// Use this to modify or extend information for the Parameter (through its Data()).
|
||||
func (b RouteBuilder) ParameterNamed(name string) (p *Parameter) {
|
||||
for _, each := range b.parameters {
|
||||
if each.Data().Name == name {
|
||||
return each
|
||||
}
|
||||
}
|
||||
return p
|
||||
}
|
||||
|
||||
// Writes tells what resource type will be written as the response payload. Optional.
|
||||
func (b *RouteBuilder) Writes(sample interface{}) *RouteBuilder {
|
||||
b.writeSample = sample
|
||||
return b
|
||||
}
|
||||
|
||||
// Param allows you to document the parameters of the Route. It adds a new Parameter (does not check for duplicates).
|
||||
func (b *RouteBuilder) Param(parameter *Parameter) *RouteBuilder {
|
||||
if b.parameters == nil {
|
||||
b.parameters = []*Parameter{}
|
||||
}
|
||||
b.parameters = append(b.parameters, parameter)
|
||||
return b
|
||||
}
|
||||
|
||||
// Operation allows you to document what the actual method/function call is of the Route.
|
||||
// Unless called, the operation name is derived from the RouteFunction set using To(..).
|
||||
func (b *RouteBuilder) Operation(name string) *RouteBuilder {
|
||||
b.operation = name
|
||||
return b
|
||||
}
|
||||
|
||||
// ReturnsError is deprecated, use Returns instead.
|
||||
func (b *RouteBuilder) ReturnsError(code int, message string, model interface{}) *RouteBuilder {
|
||||
log.Print("ReturnsError is deprecated, use Returns instead.")
|
||||
return b.Returns(code, message, model)
|
||||
}
|
||||
|
||||
// Returns allows you to document what responses (errors or regular) can be expected.
|
||||
// The model parameter is optional ; either pass a struct instance or use nil if not applicable.
|
||||
func (b *RouteBuilder) Returns(code int, message string, model interface{}) *RouteBuilder {
|
||||
err := ResponseError{
|
||||
Code: code,
|
||||
Message: message,
|
||||
Model: model,
|
||||
IsDefault: false,
|
||||
}
|
||||
// lazy init because there is no NewRouteBuilder (yet)
|
||||
if b.errorMap == nil {
|
||||
b.errorMap = map[int]ResponseError{}
|
||||
}
|
||||
b.errorMap[code] = err
|
||||
return b
|
||||
}
|
||||
|
||||
// DefaultReturns is a special Returns call that sets the default of the response ; the code is zero.
|
||||
func (b *RouteBuilder) DefaultReturns(message string, model interface{}) *RouteBuilder {
|
||||
b.Returns(0, message, model)
|
||||
// Modify the ResponseError just added/updated
|
||||
re := b.errorMap[0]
|
||||
// errorMap is initialized
|
||||
b.errorMap[0] = ResponseError{
|
||||
Code: re.Code,
|
||||
Message: re.Message,
|
||||
Model: re.Model,
|
||||
IsDefault: true,
|
||||
}
|
||||
return b
|
||||
}
|
||||
|
||||
// Metadata adds or updates a key=value pair to the metadata map.
|
||||
func (b *RouteBuilder) Metadata(key string, value interface{}) *RouteBuilder {
|
||||
if b.metadata == nil {
|
||||
b.metadata = map[string]interface{}{}
|
||||
}
|
||||
b.metadata[key] = value
|
||||
return b
|
||||
}
|
||||
|
||||
// ResponseError represents a response; not necessarily an error.
|
||||
type ResponseError struct {
|
||||
Code int
|
||||
Message string
|
||||
Model interface{}
|
||||
IsDefault bool
|
||||
}
|
||||
|
||||
func (b *RouteBuilder) servicePath(path string) *RouteBuilder {
|
||||
b.rootPath = path
|
||||
return b
|
||||
}
|
||||
|
||||
// Filter appends a FilterFunction to the end of filters for this Route to build.
|
||||
func (b *RouteBuilder) Filter(filter FilterFunction) *RouteBuilder {
|
||||
b.filters = append(b.filters, filter)
|
||||
return b
|
||||
}
|
||||
|
||||
// If no specific Route path then set to rootPath
|
||||
// If no specific Produces then set to rootProduces
|
||||
// If no specific Consumes then set to rootConsumes
|
||||
func (b *RouteBuilder) copyDefaults(rootProduces, rootConsumes []string) {
|
||||
if len(b.produces) == 0 {
|
||||
b.produces = rootProduces
|
||||
}
|
||||
if len(b.consumes) == 0 {
|
||||
b.consumes = rootConsumes
|
||||
}
|
||||
}
|
||||
|
||||
// typeNameHandler sets the function that will convert types to strings in the parameter
|
||||
// and model definitions.
|
||||
func (b *RouteBuilder) typeNameHandler(handler TypeNameHandleFunction) *RouteBuilder {
|
||||
b.typeNameHandleFunc = handler
|
||||
return b
|
||||
}
|
||||
|
||||
// Build creates a new Route using the specification details collected by the RouteBuilder
|
||||
func (b *RouteBuilder) Build() Route {
|
||||
pathExpr, err := newPathExpression(b.currentPath)
|
||||
if err != nil {
|
||||
log.Printf("[restful] Invalid path:%s because:%v", b.currentPath, err)
|
||||
os.Exit(1)
|
||||
}
|
||||
if b.function == nil {
|
||||
log.Printf("[restful] No function specified for route:" + b.currentPath)
|
||||
os.Exit(1)
|
||||
}
|
||||
operationName := b.operation
|
||||
if len(operationName) == 0 && b.function != nil {
|
||||
// extract from definition
|
||||
operationName = nameOfFunction(b.function)
|
||||
}
|
||||
route := Route{
|
||||
Method: b.httpMethod,
|
||||
Path: concatPath(b.rootPath, b.currentPath),
|
||||
Produces: b.produces,
|
||||
Consumes: b.consumes,
|
||||
Function: b.function,
|
||||
Filters: b.filters,
|
||||
relativePath: b.currentPath,
|
||||
pathExpr: pathExpr,
|
||||
Doc: b.doc,
|
||||
Notes: b.notes,
|
||||
Operation: operationName,
|
||||
ParameterDocs: b.parameters,
|
||||
ResponseErrors: b.errorMap,
|
||||
ReadSample: b.readSample,
|
||||
WriteSample: b.writeSample,
|
||||
Metadata: b.metadata}
|
||||
route.postBuild()
|
||||
return route
|
||||
}
|
||||
|
||||
func concatPath(path1, path2 string) string {
|
||||
return strings.TrimRight(path1, "/") + "/" + strings.TrimLeft(path2, "/")
|
||||
}
|
||||
|
||||
var anonymousFuncCount int32
|
||||
|
||||
// nameOfFunction returns the short name of the function f for documentation.
|
||||
// It uses a runtime feature for debugging ; its value may change for later Go versions.
|
||||
func nameOfFunction(f interface{}) string {
|
||||
fun := runtime.FuncForPC(reflect.ValueOf(f).Pointer())
|
||||
tokenized := strings.Split(fun.Name(), ".")
|
||||
last := tokenized[len(tokenized)-1]
|
||||
last = strings.TrimSuffix(last, ")·fm") // < Go 1.5
|
||||
last = strings.TrimSuffix(last, ")-fm") // Go 1.5
|
||||
last = strings.TrimSuffix(last, "·fm") // < Go 1.5
|
||||
last = strings.TrimSuffix(last, "-fm") // Go 1.5
|
||||
if last == "func1" { // this could mean conflicts in API docs
|
||||
val := atomic.AddInt32(&anonymousFuncCount, 1)
|
||||
last = "func" + fmt.Sprintf("%d", val)
|
||||
atomic.StoreInt32(&anonymousFuncCount, val)
|
||||
}
|
||||
return last
|
||||
}
|
18
vendor/github.com/emicklei/go-restful/router.go
generated
vendored
Normal file
18
vendor/github.com/emicklei/go-restful/router.go
generated
vendored
Normal file
@ -0,0 +1,18 @@
|
||||
package restful
|
||||
|
||||
// Copyright 2013 Ernest Micklei. All rights reserved.
|
||||
// Use of this source code is governed by a license
|
||||
// that can be found in the LICENSE file.
|
||||
|
||||
import "net/http"
|
||||
|
||||
// A RouteSelector finds the best matching Route given the input HTTP Request
|
||||
type RouteSelector interface {
|
||||
|
||||
// SelectRoute finds a Route given the input HTTP Request and a list of WebServices.
|
||||
// It returns a selected Route and its containing WebService or an error indicating
|
||||
// a problem.
|
||||
SelectRoute(
|
||||
webServices []*WebService,
|
||||
httpRequest *http.Request) (selectedService *WebService, selected *Route, err error)
|
||||
}
|
23
vendor/github.com/emicklei/go-restful/service_error.go
generated
vendored
Normal file
23
vendor/github.com/emicklei/go-restful/service_error.go
generated
vendored
Normal file
@ -0,0 +1,23 @@
|
||||
package restful
|
||||
|
||||
// Copyright 2013 Ernest Micklei. All rights reserved.
|
||||
// Use of this source code is governed by a license
|
||||
// that can be found in the LICENSE file.
|
||||
|
||||
import "fmt"
|
||||
|
||||
// ServiceError is a transport object to pass information about a non-Http error occurred in a WebService while processing a request.
|
||||
type ServiceError struct {
|
||||
Code int
|
||||
Message string
|
||||
}
|
||||
|
||||
// NewError returns a ServiceError using the code and reason
|
||||
func NewError(code int, message string) ServiceError {
|
||||
return ServiceError{Code: code, Message: message}
|
||||
}
|
||||
|
||||
// Error returns a text representation of the service error
|
||||
func (s ServiceError) Error() string {
|
||||
return fmt.Sprintf("[ServiceError:%v] %v", s.Code, s.Message)
|
||||
}
|
290
vendor/github.com/emicklei/go-restful/web_service.go
generated
vendored
Normal file
290
vendor/github.com/emicklei/go-restful/web_service.go
generated
vendored
Normal file
@ -0,0 +1,290 @@
|
||||
package restful
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"os"
|
||||
"reflect"
|
||||
"sync"
|
||||
|
||||
"github.com/emicklei/go-restful/log"
|
||||
)
|
||||
|
||||
// Copyright 2013 Ernest Micklei. All rights reserved.
|
||||
// Use of this source code is governed by a license
|
||||
// that can be found in the LICENSE file.
|
||||
|
||||
// WebService holds a collection of Route values that bind a Http Method + URL Path to a function.
|
||||
type WebService struct {
|
||||
rootPath string
|
||||
pathExpr *pathExpression // cached compilation of rootPath as RegExp
|
||||
routes []Route
|
||||
produces []string
|
||||
consumes []string
|
||||
pathParameters []*Parameter
|
||||
filters []FilterFunction
|
||||
documentation string
|
||||
apiVersion string
|
||||
|
||||
typeNameHandleFunc TypeNameHandleFunction
|
||||
|
||||
dynamicRoutes bool
|
||||
|
||||
// protects 'routes' if dynamic routes are enabled
|
||||
routesLock sync.RWMutex
|
||||
}
|
||||
|
||||
func (w *WebService) SetDynamicRoutes(enable bool) {
|
||||
w.dynamicRoutes = enable
|
||||
}
|
||||
|
||||
// TypeNameHandleFunction declares functions that can handle translating the name of a sample object
|
||||
// into the restful documentation for the service.
|
||||
type TypeNameHandleFunction func(sample interface{}) string
|
||||
|
||||
// TypeNameHandler sets the function that will convert types to strings in the parameter
|
||||
// and model definitions. If not set, the web service will invoke
|
||||
// reflect.TypeOf(object).String().
|
||||
func (w *WebService) TypeNameHandler(handler TypeNameHandleFunction) *WebService {
|
||||
w.typeNameHandleFunc = handler
|
||||
return w
|
||||
}
|
||||
|
||||
// reflectTypeName is the default TypeNameHandleFunction and for a given object
|
||||
// returns the name that Go identifies it with (e.g. "string" or "v1.Object") via
|
||||
// the reflection API.
|
||||
func reflectTypeName(sample interface{}) string {
|
||||
return reflect.TypeOf(sample).String()
|
||||
}
|
||||
|
||||
// compilePathExpression ensures that the path is compiled into a RegEx for those routers that need it.
|
||||
func (w *WebService) compilePathExpression() {
|
||||
compiled, err := newPathExpression(w.rootPath)
|
||||
if err != nil {
|
||||
log.Printf("[restful] invalid path:%s because:%v", w.rootPath, err)
|
||||
os.Exit(1)
|
||||
}
|
||||
w.pathExpr = compiled
|
||||
}
|
||||
|
||||
// ApiVersion sets the API version for documentation purposes.
|
||||
func (w *WebService) ApiVersion(apiVersion string) *WebService {
|
||||
w.apiVersion = apiVersion
|
||||
return w
|
||||
}
|
||||
|
||||
// Version returns the API version for documentation purposes.
|
||||
func (w *WebService) Version() string { return w.apiVersion }
|
||||
|
||||
// Path specifies the root URL template path of the WebService.
|
||||
// All Routes will be relative to this path.
|
||||
func (w *WebService) Path(root string) *WebService {
|
||||
w.rootPath = root
|
||||
if len(w.rootPath) == 0 {
|
||||
w.rootPath = "/"
|
||||
}
|
||||
w.compilePathExpression()
|
||||
return w
|
||||
}
|
||||
|
||||
// Param adds a PathParameter to document parameters used in the root path.
|
||||
func (w *WebService) Param(parameter *Parameter) *WebService {
|
||||
if w.pathParameters == nil {
|
||||
w.pathParameters = []*Parameter{}
|
||||
}
|
||||
w.pathParameters = append(w.pathParameters, parameter)
|
||||
return w
|
||||
}
|
||||
|
||||
// PathParameter creates a new Parameter of kind Path for documentation purposes.
|
||||
// It is initialized as required with string as its DataType.
|
||||
func (w *WebService) PathParameter(name, description string) *Parameter {
|
||||
return PathParameter(name, description)
|
||||
}
|
||||
|
||||
// PathParameter creates a new Parameter of kind Path for documentation purposes.
|
||||
// It is initialized as required with string as its DataType.
|
||||
func PathParameter(name, description string) *Parameter {
|
||||
p := &Parameter{&ParameterData{Name: name, Description: description, Required: true, DataType: "string"}}
|
||||
p.bePath()
|
||||
return p
|
||||
}
|
||||
|
||||
// QueryParameter creates a new Parameter of kind Query for documentation purposes.
|
||||
// It is initialized as not required with string as its DataType.
|
||||
func (w *WebService) QueryParameter(name, description string) *Parameter {
|
||||
return QueryParameter(name, description)
|
||||
}
|
||||
|
||||
// QueryParameter creates a new Parameter of kind Query for documentation purposes.
|
||||
// It is initialized as not required with string as its DataType.
|
||||
func QueryParameter(name, description string) *Parameter {
|
||||
p := &Parameter{&ParameterData{Name: name, Description: description, Required: false, DataType: "string"}}
|
||||
p.beQuery()
|
||||
return p
|
||||
}
|
||||
|
||||
// BodyParameter creates a new Parameter of kind Body for documentation purposes.
|
||||
// It is initialized as required without a DataType.
|
||||
func (w *WebService) BodyParameter(name, description string) *Parameter {
|
||||
return BodyParameter(name, description)
|
||||
}
|
||||
|
||||
// BodyParameter creates a new Parameter of kind Body for documentation purposes.
|
||||
// It is initialized as required without a DataType.
|
||||
func BodyParameter(name, description string) *Parameter {
|
||||
p := &Parameter{&ParameterData{Name: name, Description: description, Required: true}}
|
||||
p.beBody()
|
||||
return p
|
||||
}
|
||||
|
||||
// HeaderParameter creates a new Parameter of kind (Http) Header for documentation purposes.
|
||||
// It is initialized as not required with string as its DataType.
|
||||
func (w *WebService) HeaderParameter(name, description string) *Parameter {
|
||||
return HeaderParameter(name, description)
|
||||
}
|
||||
|
||||
// HeaderParameter creates a new Parameter of kind (Http) Header for documentation purposes.
|
||||
// It is initialized as not required with string as its DataType.
|
||||
func HeaderParameter(name, description string) *Parameter {
|
||||
p := &Parameter{&ParameterData{Name: name, Description: description, Required: false, DataType: "string"}}
|
||||
p.beHeader()
|
||||
return p
|
||||
}
|
||||
|
||||
// FormParameter creates a new Parameter of kind Form (using application/x-www-form-urlencoded) for documentation purposes.
|
||||
// It is initialized as required with string as its DataType.
|
||||
func (w *WebService) FormParameter(name, description string) *Parameter {
|
||||
return FormParameter(name, description)
|
||||
}
|
||||
|
||||
// FormParameter creates a new Parameter of kind Form (using application/x-www-form-urlencoded) for documentation purposes.
|
||||
// It is initialized as required with string as its DataType.
|
||||
func FormParameter(name, description string) *Parameter {
|
||||
p := &Parameter{&ParameterData{Name: name, Description: description, Required: false, DataType: "string"}}
|
||||
p.beForm()
|
||||
return p
|
||||
}
|
||||
|
||||
// Route creates a new Route using the RouteBuilder and add to the ordered list of Routes.
|
||||
func (w *WebService) Route(builder *RouteBuilder) *WebService {
|
||||
w.routesLock.Lock()
|
||||
defer w.routesLock.Unlock()
|
||||
builder.copyDefaults(w.produces, w.consumes)
|
||||
w.routes = append(w.routes, builder.Build())
|
||||
return w
|
||||
}
|
||||
|
||||
// RemoveRoute removes the specified route, looks for something that matches 'path' and 'method'
|
||||
func (w *WebService) RemoveRoute(path, method string) error {
|
||||
if !w.dynamicRoutes {
|
||||
return errors.New("dynamic routes are not enabled.")
|
||||
}
|
||||
w.routesLock.Lock()
|
||||
defer w.routesLock.Unlock()
|
||||
newRoutes := make([]Route, (len(w.routes) - 1))
|
||||
current := 0
|
||||
for ix := range w.routes {
|
||||
if w.routes[ix].Method == method && w.routes[ix].Path == path {
|
||||
continue
|
||||
}
|
||||
newRoutes[current] = w.routes[ix]
|
||||
current = current + 1
|
||||
}
|
||||
w.routes = newRoutes
|
||||
return nil
|
||||
}
|
||||
|
||||
// Method creates a new RouteBuilder and initialize its http method
|
||||
func (w *WebService) Method(httpMethod string) *RouteBuilder {
|
||||
return new(RouteBuilder).typeNameHandler(w.typeNameHandleFunc).servicePath(w.rootPath).Method(httpMethod)
|
||||
}
|
||||
|
||||
// Produces specifies that this WebService can produce one or more MIME types.
|
||||
// Http requests must have one of these values set for the Accept header.
|
||||
func (w *WebService) Produces(contentTypes ...string) *WebService {
|
||||
w.produces = contentTypes
|
||||
return w
|
||||
}
|
||||
|
||||
// Consumes specifies that this WebService can consume one or more MIME types.
|
||||
// Http requests must have one of these values set for the Content-Type header.
|
||||
func (w *WebService) Consumes(accepts ...string) *WebService {
|
||||
w.consumes = accepts
|
||||
return w
|
||||
}
|
||||
|
||||
// Routes returns the Routes associated with this WebService
|
||||
func (w *WebService) Routes() []Route {
|
||||
if !w.dynamicRoutes {
|
||||
return w.routes
|
||||
}
|
||||
// Make a copy of the array to prevent concurrency problems
|
||||
w.routesLock.RLock()
|
||||
defer w.routesLock.RUnlock()
|
||||
result := make([]Route, len(w.routes))
|
||||
for ix := range w.routes {
|
||||
result[ix] = w.routes[ix]
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
// RootPath returns the RootPath associated with this WebService. Default "/"
|
||||
func (w *WebService) RootPath() string {
|
||||
return w.rootPath
|
||||
}
|
||||
|
||||
// PathParameters return the path parameter names for (shared amoung its Routes)
|
||||
func (w *WebService) PathParameters() []*Parameter {
|
||||
return w.pathParameters
|
||||
}
|
||||
|
||||
// Filter adds a filter function to the chain of filters applicable to all its Routes
|
||||
func (w *WebService) Filter(filter FilterFunction) *WebService {
|
||||
w.filters = append(w.filters, filter)
|
||||
return w
|
||||
}
|
||||
|
||||
// Doc is used to set the documentation of this service.
|
||||
func (w *WebService) Doc(plainText string) *WebService {
|
||||
w.documentation = plainText
|
||||
return w
|
||||
}
|
||||
|
||||
// Documentation returns it.
|
||||
func (w *WebService) Documentation() string {
|
||||
return w.documentation
|
||||
}
|
||||
|
||||
/*
|
||||
Convenience methods
|
||||
*/
|
||||
|
||||
// HEAD is a shortcut for .Method("HEAD").Path(subPath)
|
||||
func (w *WebService) HEAD(subPath string) *RouteBuilder {
|
||||
return new(RouteBuilder).typeNameHandler(w.typeNameHandleFunc).servicePath(w.rootPath).Method("HEAD").Path(subPath)
|
||||
}
|
||||
|
||||
// GET is a shortcut for .Method("GET").Path(subPath)
|
||||
func (w *WebService) GET(subPath string) *RouteBuilder {
|
||||
return new(RouteBuilder).typeNameHandler(w.typeNameHandleFunc).servicePath(w.rootPath).Method("GET").Path(subPath)
|
||||
}
|
||||
|
||||
// POST is a shortcut for .Method("POST").Path(subPath)
|
||||
func (w *WebService) POST(subPath string) *RouteBuilder {
|
||||
return new(RouteBuilder).typeNameHandler(w.typeNameHandleFunc).servicePath(w.rootPath).Method("POST").Path(subPath)
|
||||
}
|
||||
|
||||
// PUT is a shortcut for .Method("PUT").Path(subPath)
|
||||
func (w *WebService) PUT(subPath string) *RouteBuilder {
|
||||
return new(RouteBuilder).typeNameHandler(w.typeNameHandleFunc).servicePath(w.rootPath).Method("PUT").Path(subPath)
|
||||
}
|
||||
|
||||
// PATCH is a shortcut for .Method("PATCH").Path(subPath)
|
||||
func (w *WebService) PATCH(subPath string) *RouteBuilder {
|
||||
return new(RouteBuilder).typeNameHandler(w.typeNameHandleFunc).servicePath(w.rootPath).Method("PATCH").Path(subPath)
|
||||
}
|
||||
|
||||
// DELETE is a shortcut for .Method("DELETE").Path(subPath)
|
||||
func (w *WebService) DELETE(subPath string) *RouteBuilder {
|
||||
return new(RouteBuilder).typeNameHandler(w.typeNameHandleFunc).servicePath(w.rootPath).Method("DELETE").Path(subPath)
|
||||
}
|
39
vendor/github.com/emicklei/go-restful/web_service_container.go
generated
vendored
Normal file
39
vendor/github.com/emicklei/go-restful/web_service_container.go
generated
vendored
Normal file
@ -0,0 +1,39 @@
|
||||
package restful
|
||||
|
||||
// Copyright 2013 Ernest Micklei. All rights reserved.
|
||||
// Use of this source code is governed by a license
|
||||
// that can be found in the LICENSE file.
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
)
|
||||
|
||||
// DefaultContainer is a restful.Container that uses http.DefaultServeMux
|
||||
var DefaultContainer *Container
|
||||
|
||||
func init() {
|
||||
DefaultContainer = NewContainer()
|
||||
DefaultContainer.ServeMux = http.DefaultServeMux
|
||||
}
|
||||
|
||||
// If set the true then panics will not be caught to return HTTP 500.
|
||||
// In that case, Route functions are responsible for handling any error situation.
|
||||
// Default value is false = recover from panics. This has performance implications.
|
||||
// OBSOLETE ; use restful.DefaultContainer.DoNotRecover(true)
|
||||
var DoNotRecover = false
|
||||
|
||||
// Add registers a new WebService add it to the DefaultContainer.
|
||||
func Add(service *WebService) {
|
||||
DefaultContainer.Add(service)
|
||||
}
|
||||
|
||||
// Filter appends a container FilterFunction from the DefaultContainer.
|
||||
// These are called before dispatching a http.Request to a WebService.
|
||||
func Filter(filter FilterFunction) {
|
||||
DefaultContainer.Filter(filter)
|
||||
}
|
||||
|
||||
// RegisteredWebServices returns the collections of WebServices from the DefaultContainer
|
||||
func RegisteredWebServices() []*WebService {
|
||||
return DefaultContainer.RegisteredWebServices()
|
||||
}
|
1
vendor/github.com/go-openapi/jsonpointer/.drone.sec
generated
vendored
Normal file
1
vendor/github.com/go-openapi/jsonpointer/.drone.sec
generated
vendored
Normal file
@ -0,0 +1 @@
|
||||
eyJhbGciOiJSU0EtT0FFUCIsImVuYyI6IkExMjhHQ00ifQ.pDqezepze0YqRx4u6M8GFaWtnVR-utTWZic-GX-RvMATAoYpG4H2sc9tlnGNCxa44dbRY0vY10qfBU7Sno8vkp21fsK42ofGLfen_suum_0ilm0sFS0X-kAwk7TIq5L5lPPKiChPMUiGp5oJW-g5MqMFX1jNiI-4fP-vSM3B3-eyZtJD_O517TgfIRLnblCzqwIkyRmAfPNopi-Fe8Y31TmO2Vd0nFc1Aqro_VaJSACzEVxOHTNpjETcMjlYzwgMXLeiAfLV-5hM0f6DXgHMlLSuMkB_Ndnw25dkB7hreGk4x0tHQ3X9mUfTgLq1hIDoyeeKDIM83Tqw4LBRph20BQ.qd_pNuyi23B0PlWz.JtpO7kqOm0SWOGzWDalkWheHuNd-eDpVbqI9WPAEFDOIBvz7TbsYMBlIYVWEGWbat4mkx_ejxnMn1L1l996NJnyP7eY-QE82cfPJbjx94d0Ob70KZ4DCm_UxcY2t-OKFiPJqxW7MA5jKyDuGD16bdxpjLEoe_cMSEr8FNu-MVG6wcchPcyYyRkqTQSl4mb09KikkAzHjwjo-DcO0f8ps4Uzsoc0aqAAWdE-ocG0YqierLoemjusYMiLH-eLF6MvaLRvHSte-cLzPuYCeZURnBDgxu3i3UApgddnX7g1c7tdGGBGvgCl-tEEDW58Vxgdjksim2S7y3lfoJ8FFzSWeRH2y7Kq04hgew3b2J_RiDB9ejzIopzG8ZGjJa3EO1-i9ORTl12nXK1RdlLGqu604ENaeVOPCIHL-0C8e6_wHdUGHydLZImSxKYSrNvy8resP1D_9t4B-3q2mkS9mhnMONrXbPDVw5QY5mvXlWs0Db99ARwzsl-Qlu0A_tsZwMjWT2I1QMvWPyTRScmMm0FJSv9zStjzxWa_q2GL7Naz1fI4Dd6ZgNJWYYq-mHN5chEeBdIcwb_zMPHczMQXXNL5nmfRGM1aPffkToFWCDpIlI8IXec83ZC6_POxZegS6n9Drrvc.6Nz8EXxs1lWX3ASaCeNElA
|
32
vendor/github.com/go-openapi/jsonpointer/.drone.yml
generated
vendored
Normal file
32
vendor/github.com/go-openapi/jsonpointer/.drone.yml
generated
vendored
Normal file
@ -0,0 +1,32 @@
|
||||
clone:
|
||||
path: github.com/go-openapi/jsonpointer
|
||||
|
||||
matrix:
|
||||
GO_VERSION:
|
||||
- "1.6"
|
||||
|
||||
build:
|
||||
integration:
|
||||
image: golang:$$GO_VERSION
|
||||
pull: true
|
||||
commands:
|
||||
- go get -u github.com/stretchr/testify/assert
|
||||
- go get -u github.com/go-openapi/swag
|
||||
- go test -race
|
||||
- go test -v -cover -coverprofile=coverage.out -covermode=count ./...
|
||||
|
||||
notify:
|
||||
slack:
|
||||
channel: bots
|
||||
webhook_url: $$SLACK_URL
|
||||
username: drone
|
||||
|
||||
publish:
|
||||
coverage:
|
||||
server: https://coverage.vmware.run
|
||||
token: $$GITHUB_TOKEN
|
||||
# threshold: 70
|
||||
# must_increase: true
|
||||
when:
|
||||
matrix:
|
||||
GO_VERSION: "1.6"
|
1
vendor/github.com/go-openapi/jsonpointer/.gitignore
generated
vendored
Normal file
1
vendor/github.com/go-openapi/jsonpointer/.gitignore
generated
vendored
Normal file
@ -0,0 +1 @@
|
||||
secrets.yml
|
13
vendor/github.com/go-openapi/jsonpointer/.pullapprove.yml
generated
vendored
Normal file
13
vendor/github.com/go-openapi/jsonpointer/.pullapprove.yml
generated
vendored
Normal file
@ -0,0 +1,13 @@
|
||||
approve_by_comment: true
|
||||
approve_regex: '^(:shipit:|:\+1:|\+1|LGTM|lgtm|Approved)'
|
||||
reject_regex: ^[Rr]ejected
|
||||
reset_on_push: false
|
||||
reviewers:
|
||||
members:
|
||||
- casualjim
|
||||
- chancez
|
||||
- frapposelli
|
||||
- vburenin
|
||||
- pytlesk4
|
||||
name: pullapprove
|
||||
required: 1
|
74
vendor/github.com/go-openapi/jsonpointer/CODE_OF_CONDUCT.md
generated
vendored
Normal file
74
vendor/github.com/go-openapi/jsonpointer/CODE_OF_CONDUCT.md
generated
vendored
Normal file
@ -0,0 +1,74 @@
|
||||
# Contributor Covenant Code of Conduct
|
||||
|
||||
## Our Pledge
|
||||
|
||||
In the interest of fostering an open and welcoming environment, we as
|
||||
contributors and maintainers pledge to making participation in our project and
|
||||
our community a harassment-free experience for everyone, regardless of age, body
|
||||
size, disability, ethnicity, gender identity and expression, level of experience,
|
||||
nationality, personal appearance, race, religion, or sexual identity and
|
||||
orientation.
|
||||
|
||||
## Our Standards
|
||||
|
||||
Examples of behavior that contributes to creating a positive environment
|
||||
include:
|
||||
|
||||
* Using welcoming and inclusive language
|
||||
* Being respectful of differing viewpoints and experiences
|
||||
* Gracefully accepting constructive criticism
|
||||
* Focusing on what is best for the community
|
||||
* Showing empathy towards other community members
|
||||
|
||||
Examples of unacceptable behavior by participants include:
|
||||
|
||||
* The use of sexualized language or imagery and unwelcome sexual attention or
|
||||
advances
|
||||
* Trolling, insulting/derogatory comments, and personal or political attacks
|
||||
* Public or private harassment
|
||||
* Publishing others' private information, such as a physical or electronic
|
||||
address, without explicit permission
|
||||
* Other conduct which could reasonably be considered inappropriate in a
|
||||
professional setting
|
||||
|
||||
## Our Responsibilities
|
||||
|
||||
Project maintainers are responsible for clarifying the standards of acceptable
|
||||
behavior and are expected to take appropriate and fair corrective action in
|
||||
response to any instances of unacceptable behavior.
|
||||
|
||||
Project maintainers have the right and responsibility to remove, edit, or
|
||||
reject comments, commits, code, wiki edits, issues, and other contributions
|
||||
that are not aligned to this Code of Conduct, or to ban temporarily or
|
||||
permanently any contributor for other behaviors that they deem inappropriate,
|
||||
threatening, offensive, or harmful.
|
||||
|
||||
## Scope
|
||||
|
||||
This Code of Conduct applies both within project spaces and in public spaces
|
||||
when an individual is representing the project or its community. Examples of
|
||||
representing a project or community include using an official project e-mail
|
||||
address, posting via an official social media account, or acting as an appointed
|
||||
representative at an online or offline event. Representation of a project may be
|
||||
further defined and clarified by project maintainers.
|
||||
|
||||
## Enforcement
|
||||
|
||||
Instances of abusive, harassing, or otherwise unacceptable behavior may be
|
||||
reported by contacting the project team at ivan+abuse@flanders.co.nz. All
|
||||
complaints will be reviewed and investigated and will result in a response that
|
||||
is deemed necessary and appropriate to the circumstances. The project team is
|
||||
obligated to maintain confidentiality with regard to the reporter of an incident.
|
||||
Further details of specific enforcement policies may be posted separately.
|
||||
|
||||
Project maintainers who do not follow or enforce the Code of Conduct in good
|
||||
faith may face temporary or permanent repercussions as determined by other
|
||||
members of the project's leadership.
|
||||
|
||||
## Attribution
|
||||
|
||||
This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4,
|
||||
available at [http://contributor-covenant.org/version/1/4][version]
|
||||
|
||||
[homepage]: http://contributor-covenant.org
|
||||
[version]: http://contributor-covenant.org/version/1/4/
|
202
vendor/github.com/go-openapi/jsonpointer/LICENSE
generated
vendored
Normal file
202
vendor/github.com/go-openapi/jsonpointer/LICENSE
generated
vendored
Normal file
@ -0,0 +1,202 @@
|
||||
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright [yyyy] [name of copyright owner]
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
15
vendor/github.com/go-openapi/jsonpointer/README.md
generated
vendored
Normal file
15
vendor/github.com/go-openapi/jsonpointer/README.md
generated
vendored
Normal file
@ -0,0 +1,15 @@
|
||||
# gojsonpointer [](https://ci.vmware.run/go-openapi/jsonpointer) [](https://coverage.vmware.run/go-openapi/jsonpointer) [](https://slackin.goswagger.io)
|
||||
|
||||
[](https://raw.githubusercontent.com/go-openapi/jsonpointer/master/LICENSE) [](http://godoc.org/github.com/go-openapi/jsonpointer)
|
||||
An implementation of JSON Pointer - Go language
|
||||
|
||||
## Status
|
||||
Completed YES
|
||||
|
||||
Tested YES
|
||||
|
||||
## References
|
||||
http://tools.ietf.org/html/draft-ietf-appsawg-json-pointer-07
|
||||
|
||||
### Note
|
||||
The 4.Evaluation part of the previous reference, starting with 'If the currently referenced value is a JSON array, the reference token MUST contain either...' is not implemented.
|
238
vendor/github.com/go-openapi/jsonpointer/pointer.go
generated
vendored
Normal file
238
vendor/github.com/go-openapi/jsonpointer/pointer.go
generated
vendored
Normal file
@ -0,0 +1,238 @@
|
||||
// Copyright 2013 sigu-399 ( https://github.com/sigu-399 )
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// author sigu-399
|
||||
// author-github https://github.com/sigu-399
|
||||
// author-mail sigu.399@gmail.com
|
||||
//
|
||||
// repository-name jsonpointer
|
||||
// repository-desc An implementation of JSON Pointer - Go language
|
||||
//
|
||||
// description Main and unique file.
|
||||
//
|
||||
// created 25-02-2013
|
||||
|
||||
package jsonpointer
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/go-openapi/swag"
|
||||
)
|
||||
|
||||
const (
|
||||
emptyPointer = ``
|
||||
pointerSeparator = `/`
|
||||
|
||||
invalidStart = `JSON pointer must be empty or start with a "` + pointerSeparator
|
||||
)
|
||||
|
||||
var jsonPointableType = reflect.TypeOf(new(JSONPointable)).Elem()
|
||||
|
||||
// JSONPointable is an interface for structs to implement when they need to customize the
|
||||
// json pointer process
|
||||
type JSONPointable interface {
|
||||
JSONLookup(string) (interface{}, error)
|
||||
}
|
||||
|
||||
type implStruct struct {
|
||||
mode string // "SET" or "GET"
|
||||
|
||||
inDocument interface{}
|
||||
|
||||
setInValue interface{}
|
||||
|
||||
getOutNode interface{}
|
||||
getOutKind reflect.Kind
|
||||
outError error
|
||||
}
|
||||
|
||||
// New creates a new json pointer for the given string
|
||||
func New(jsonPointerString string) (Pointer, error) {
|
||||
|
||||
var p Pointer
|
||||
err := p.parse(jsonPointerString)
|
||||
return p, err
|
||||
|
||||
}
|
||||
|
||||
// Pointer the json pointer reprsentation
|
||||
type Pointer struct {
|
||||
referenceTokens []string
|
||||
}
|
||||
|
||||
// "Constructor", parses the given string JSON pointer
|
||||
func (p *Pointer) parse(jsonPointerString string) error {
|
||||
|
||||
var err error
|
||||
|
||||
if jsonPointerString != emptyPointer {
|
||||
if !strings.HasPrefix(jsonPointerString, pointerSeparator) {
|
||||
err = errors.New(invalidStart)
|
||||
} else {
|
||||
referenceTokens := strings.Split(jsonPointerString, pointerSeparator)
|
||||
for _, referenceToken := range referenceTokens[1:] {
|
||||
p.referenceTokens = append(p.referenceTokens, referenceToken)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// Get uses the pointer to retrieve a value from a JSON document
|
||||
func (p *Pointer) Get(document interface{}) (interface{}, reflect.Kind, error) {
|
||||
return p.get(document, swag.DefaultJSONNameProvider)
|
||||
}
|
||||
|
||||
// GetForToken gets a value for a json pointer token 1 level deep
|
||||
func GetForToken(document interface{}, decodedToken string) (interface{}, reflect.Kind, error) {
|
||||
return getSingleImpl(document, decodedToken, swag.DefaultJSONNameProvider)
|
||||
}
|
||||
|
||||
func getSingleImpl(node interface{}, decodedToken string, nameProvider *swag.NameProvider) (interface{}, reflect.Kind, error) {
|
||||
kind := reflect.Invalid
|
||||
rValue := reflect.Indirect(reflect.ValueOf(node))
|
||||
kind = rValue.Kind()
|
||||
switch kind {
|
||||
|
||||
case reflect.Struct:
|
||||
if rValue.Type().Implements(jsonPointableType) {
|
||||
r, err := node.(JSONPointable).JSONLookup(decodedToken)
|
||||
if err != nil {
|
||||
return nil, kind, err
|
||||
}
|
||||
return r, kind, nil
|
||||
}
|
||||
nm, ok := nameProvider.GetGoNameForType(rValue.Type(), decodedToken)
|
||||
if !ok {
|
||||
return nil, kind, fmt.Errorf("object has no field %q", decodedToken)
|
||||
}
|
||||
fld := rValue.FieldByName(nm)
|
||||
return fld.Interface(), kind, nil
|
||||
|
||||
case reflect.Map:
|
||||
kv := reflect.ValueOf(decodedToken)
|
||||
mv := rValue.MapIndex(kv)
|
||||
if mv.IsValid() && !swag.IsZero(mv) {
|
||||
return mv.Interface(), kind, nil
|
||||
}
|
||||
return nil, kind, fmt.Errorf("object has no key %q", decodedToken)
|
||||
|
||||
case reflect.Slice:
|
||||
tokenIndex, err := strconv.Atoi(decodedToken)
|
||||
if err != nil {
|
||||
return nil, kind, err
|
||||
}
|
||||
sLength := rValue.Len()
|
||||
if tokenIndex < 0 || tokenIndex >= sLength {
|
||||
return nil, kind, fmt.Errorf("index out of bounds array[0,%d] index '%d'", sLength, tokenIndex)
|
||||
}
|
||||
|
||||
elem := rValue.Index(tokenIndex)
|
||||
return elem.Interface(), kind, nil
|
||||
|
||||
default:
|
||||
return nil, kind, fmt.Errorf("invalid token reference %q", decodedToken)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func (p *Pointer) get(node interface{}, nameProvider *swag.NameProvider) (interface{}, reflect.Kind, error) {
|
||||
|
||||
if nameProvider == nil {
|
||||
nameProvider = swag.DefaultJSONNameProvider
|
||||
}
|
||||
|
||||
kind := reflect.Invalid
|
||||
|
||||
// Full document when empty
|
||||
if len(p.referenceTokens) == 0 {
|
||||
return node, kind, nil
|
||||
}
|
||||
|
||||
for _, token := range p.referenceTokens {
|
||||
|
||||
decodedToken := Unescape(token)
|
||||
|
||||
r, knd, err := getSingleImpl(node, decodedToken, nameProvider)
|
||||
if err != nil {
|
||||
return nil, knd, err
|
||||
}
|
||||
node, kind = r, knd
|
||||
|
||||
}
|
||||
|
||||
rValue := reflect.ValueOf(node)
|
||||
kind = rValue.Kind()
|
||||
|
||||
return node, kind, nil
|
||||
}
|
||||
|
||||
// DecodedTokens returns the decoded tokens
|
||||
func (p *Pointer) DecodedTokens() []string {
|
||||
result := make([]string, 0, len(p.referenceTokens))
|
||||
for _, t := range p.referenceTokens {
|
||||
result = append(result, Unescape(t))
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
// IsEmpty returns true if this is an empty json pointer
|
||||
// this indicates that it points to the root document
|
||||
func (p *Pointer) IsEmpty() bool {
|
||||
return len(p.referenceTokens) == 0
|
||||
}
|
||||
|
||||
// Pointer to string representation function
|
||||
func (p *Pointer) String() string {
|
||||
|
||||
if len(p.referenceTokens) == 0 {
|
||||
return emptyPointer
|
||||
}
|
||||
|
||||
pointerString := pointerSeparator + strings.Join(p.referenceTokens, pointerSeparator)
|
||||
|
||||
return pointerString
|
||||
}
|
||||
|
||||
// Specific JSON pointer encoding here
|
||||
// ~0 => ~
|
||||
// ~1 => /
|
||||
// ... and vice versa
|
||||
|
||||
const (
|
||||
encRefTok0 = `~0`
|
||||
encRefTok1 = `~1`
|
||||
decRefTok0 = `~`
|
||||
decRefTok1 = `/`
|
||||
)
|
||||
|
||||
// Unescape unescapes a json pointer reference token string to the original representation
|
||||
func Unescape(token string) string {
|
||||
step1 := strings.Replace(token, encRefTok1, decRefTok1, -1)
|
||||
step2 := strings.Replace(step1, encRefTok0, decRefTok0, -1)
|
||||
return step2
|
||||
}
|
||||
|
||||
// Escape escapes a pointer reference token string
|
||||
func Escape(token string) string {
|
||||
step1 := strings.Replace(token, decRefTok0, encRefTok0, -1)
|
||||
step2 := strings.Replace(step1, decRefTok1, encRefTok1, -1)
|
||||
return step2
|
||||
}
|
1
vendor/github.com/go-openapi/jsonreference/.drone.sec
generated
vendored
Normal file
1
vendor/github.com/go-openapi/jsonreference/.drone.sec
generated
vendored
Normal file
@ -0,0 +1 @@
|
||||
eyJhbGciOiJSU0EtT0FFUCIsImVuYyI6IkExMjhHQ00ifQ.Xe40Wx6g5Y-iN0JVMhKyFfubtOId3zAVE564szw_yYGzFNhc_cGZO9F3BtAcJ55CfHG9C_ozn9dpnUDl_zYZoy_6cPCq13Ekb95z8NAC3ekDtbAATsc9HZwRNwI7UfkhstdwxljEouGB01qoLcUn6lFutrou-Ho21COHeDb2caemnPSA-rEAnXkOiBFu0RQ1MIwMygzvHXIHHYNpNwAtXqmiggM10miSjqBM3JmRPxCi7VK6_Rxij5p6LlhmK1BDi8Y6oBh-9BX3--5GAJeWZ6Vof5TnP-Enioia18j8c8KFtfY4q0y6Ednjb-AarLZ12gj695ppkBNJUdTJQmwGwA.fVcz_RiLrUB5fgMS.rjWllDYC6m_NB-ket_LizNEy9mlJ27odBTZQcMKaUqqXZBtWUCmPrOoMXGq-_cc-c7chg7D-WMh9SPQ23pV0P-DY-jsDpbOqHG2STOMEfW9ZREoaOLJXQaWcuBldLjRyWFcq0HGj97LgE6szD1Zlou3bmdHS_Q-U9Up9YQ_8_YnDcESD_cj1w5FZom7HjchKJFeGjQjfDQpoCKCQNMJaavUqy9jHQEeQ_uVocSrETg3GpewDcUF2tuv8uGq7ZZWu7Vl8zmnY1MFTynaGBWzTCSRmCkAXjcsaUheDP_NT5D7k-xUS6LwtqEUiXAXV07SNFraorFj5lnBQZRDlZMYcA3NWR6zHiOxekR9LBYPofst6w1rIqUchj_5m1tDpVTBMPir1eAaFcnJtPgo4ch17OF-kmcmQGLhJI3U7n8wv4sTrmP1dewtRRKrvlJe5r3_6eDiK4xZ8K0rnK1D4g6zuQqU1gA8KaU7pmZkKpFx3Bew4v-6DH32YwQBvAI7Lbb8afou9WsCNB_iswz5XGimP4bifiJRwpWBEz9VGhZFdiw-hZpYWgbxzVb5gtqfTDLIvpbLDmFz1vge16uUQHHVFpo1pSozyr7A60X8qsh9pmmO3RcJ-ZGZBWqiRC-Kl5ejz7WQ.LFoK4Ibi11B2lWQ5WcPSag
|
33
vendor/github.com/go-openapi/jsonreference/.drone.yml
generated
vendored
Normal file
33
vendor/github.com/go-openapi/jsonreference/.drone.yml
generated
vendored
Normal file
@ -0,0 +1,33 @@
|
||||
clone:
|
||||
path: github.com/go-openapi/jsonreference
|
||||
|
||||
matrix:
|
||||
GO_VERSION:
|
||||
- "1.6"
|
||||
|
||||
build:
|
||||
integration:
|
||||
image: golang:$$GO_VERSION
|
||||
pull: true
|
||||
commands:
|
||||
- go get -u github.com/stretchr/testify/assert
|
||||
- go get -u github.com/PuerkitoBio/purell
|
||||
- go get -u github.com/go-openapi/jsonpointer
|
||||
- go test -race
|
||||
- go test -v -cover -coverprofile=coverage.out -covermode=count ./...
|
||||
|
||||
notify:
|
||||
slack:
|
||||
channel: bots
|
||||
webhook_url: $$SLACK_URL
|
||||
username: drone
|
||||
|
||||
publish:
|
||||
coverage:
|
||||
server: https://coverage.vmware.run
|
||||
token: $$GITHUB_TOKEN
|
||||
# threshold: 70
|
||||
# must_increase: true
|
||||
when:
|
||||
matrix:
|
||||
GO_VERSION: "1.6"
|
1
vendor/github.com/go-openapi/jsonreference/.gitignore
generated
vendored
Normal file
1
vendor/github.com/go-openapi/jsonreference/.gitignore
generated
vendored
Normal file
@ -0,0 +1 @@
|
||||
secrets.yml
|
13
vendor/github.com/go-openapi/jsonreference/.pullapprove.yml
generated
vendored
Normal file
13
vendor/github.com/go-openapi/jsonreference/.pullapprove.yml
generated
vendored
Normal file
@ -0,0 +1,13 @@
|
||||
approve_by_comment: true
|
||||
approve_regex: '^(:shipit:|:\+1:|\+1|LGTM|lgtm|Approved)'
|
||||
reject_regex: ^[Rr]ejected
|
||||
reset_on_push: false
|
||||
reviewers:
|
||||
members:
|
||||
- casualjim
|
||||
- chancez
|
||||
- frapposelli
|
||||
- vburenin
|
||||
- pytlesk4
|
||||
name: pullapprove
|
||||
required: 1
|
74
vendor/github.com/go-openapi/jsonreference/CODE_OF_CONDUCT.md
generated
vendored
Normal file
74
vendor/github.com/go-openapi/jsonreference/CODE_OF_CONDUCT.md
generated
vendored
Normal file
@ -0,0 +1,74 @@
|
||||
# Contributor Covenant Code of Conduct
|
||||
|
||||
## Our Pledge
|
||||
|
||||
In the interest of fostering an open and welcoming environment, we as
|
||||
contributors and maintainers pledge to making participation in our project and
|
||||
our community a harassment-free experience for everyone, regardless of age, body
|
||||
size, disability, ethnicity, gender identity and expression, level of experience,
|
||||
nationality, personal appearance, race, religion, or sexual identity and
|
||||
orientation.
|
||||
|
||||
## Our Standards
|
||||
|
||||
Examples of behavior that contributes to creating a positive environment
|
||||
include:
|
||||
|
||||
* Using welcoming and inclusive language
|
||||
* Being respectful of differing viewpoints and experiences
|
||||
* Gracefully accepting constructive criticism
|
||||
* Focusing on what is best for the community
|
||||
* Showing empathy towards other community members
|
||||
|
||||
Examples of unacceptable behavior by participants include:
|
||||
|
||||
* The use of sexualized language or imagery and unwelcome sexual attention or
|
||||
advances
|
||||
* Trolling, insulting/derogatory comments, and personal or political attacks
|
||||
* Public or private harassment
|
||||
* Publishing others' private information, such as a physical or electronic
|
||||
address, without explicit permission
|
||||
* Other conduct which could reasonably be considered inappropriate in a
|
||||
professional setting
|
||||
|
||||
## Our Responsibilities
|
||||
|
||||
Project maintainers are responsible for clarifying the standards of acceptable
|
||||
behavior and are expected to take appropriate and fair corrective action in
|
||||
response to any instances of unacceptable behavior.
|
||||
|
||||
Project maintainers have the right and responsibility to remove, edit, or
|
||||
reject comments, commits, code, wiki edits, issues, and other contributions
|
||||
that are not aligned to this Code of Conduct, or to ban temporarily or
|
||||
permanently any contributor for other behaviors that they deem inappropriate,
|
||||
threatening, offensive, or harmful.
|
||||
|
||||
## Scope
|
||||
|
||||
This Code of Conduct applies both within project spaces and in public spaces
|
||||
when an individual is representing the project or its community. Examples of
|
||||
representing a project or community include using an official project e-mail
|
||||
address, posting via an official social media account, or acting as an appointed
|
||||
representative at an online or offline event. Representation of a project may be
|
||||
further defined and clarified by project maintainers.
|
||||
|
||||
## Enforcement
|
||||
|
||||
Instances of abusive, harassing, or otherwise unacceptable behavior may be
|
||||
reported by contacting the project team at ivan+abuse@flanders.co.nz. All
|
||||
complaints will be reviewed and investigated and will result in a response that
|
||||
is deemed necessary and appropriate to the circumstances. The project team is
|
||||
obligated to maintain confidentiality with regard to the reporter of an incident.
|
||||
Further details of specific enforcement policies may be posted separately.
|
||||
|
||||
Project maintainers who do not follow or enforce the Code of Conduct in good
|
||||
faith may face temporary or permanent repercussions as determined by other
|
||||
members of the project's leadership.
|
||||
|
||||
## Attribution
|
||||
|
||||
This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4,
|
||||
available at [http://contributor-covenant.org/version/1/4][version]
|
||||
|
||||
[homepage]: http://contributor-covenant.org
|
||||
[version]: http://contributor-covenant.org/version/1/4/
|
202
vendor/github.com/go-openapi/jsonreference/LICENSE
generated
vendored
Normal file
202
vendor/github.com/go-openapi/jsonreference/LICENSE
generated
vendored
Normal file
@ -0,0 +1,202 @@
|
||||
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright [yyyy] [name of copyright owner]
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
15
vendor/github.com/go-openapi/jsonreference/README.md
generated
vendored
Normal file
15
vendor/github.com/go-openapi/jsonreference/README.md
generated
vendored
Normal file
@ -0,0 +1,15 @@
|
||||
# gojsonreference [](https://ci.vmware.run/go-openapi/jsonreference) [](https://coverage.vmware.run/go-openapi/jsonreference) [](https://slackin.goswagger.io)
|
||||
|
||||
[](https://raw.githubusercontent.com/go-openapi/jsonreference/master/LICENSE) [](http://godoc.org/github.com/go-openapi/jsonreference)
|
||||
An implementation of JSON Reference - Go language
|
||||
|
||||
## Status
|
||||
Work in progress ( 90% done )
|
||||
|
||||
## Dependencies
|
||||
https://github.com/xeipuuv/gojsonpointer
|
||||
|
||||
## References
|
||||
http://tools.ietf.org/html/draft-ietf-appsawg-json-pointer-07
|
||||
|
||||
http://tools.ietf.org/html/draft-pbryan-zyp-json-ref-03
|
156
vendor/github.com/go-openapi/jsonreference/reference.go
generated
vendored
Normal file
156
vendor/github.com/go-openapi/jsonreference/reference.go
generated
vendored
Normal file
@ -0,0 +1,156 @@
|
||||
// Copyright 2013 sigu-399 ( https://github.com/sigu-399 )
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// author sigu-399
|
||||
// author-github https://github.com/sigu-399
|
||||
// author-mail sigu.399@gmail.com
|
||||
//
|
||||
// repository-name jsonreference
|
||||
// repository-desc An implementation of JSON Reference - Go language
|
||||
//
|
||||
// description Main and unique file.
|
||||
//
|
||||
// created 26-02-2013
|
||||
|
||||
package jsonreference
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"net/url"
|
||||
"strings"
|
||||
|
||||
"github.com/PuerkitoBio/purell"
|
||||
"github.com/go-openapi/jsonpointer"
|
||||
)
|
||||
|
||||
const (
|
||||
fragmentRune = `#`
|
||||
)
|
||||
|
||||
// New creates a new reference for the given string
|
||||
func New(jsonReferenceString string) (Ref, error) {
|
||||
|
||||
var r Ref
|
||||
err := r.parse(jsonReferenceString)
|
||||
return r, err
|
||||
|
||||
}
|
||||
|
||||
// MustCreateRef parses the ref string and panics when it's invalid.
|
||||
// Use the New method for a version that returns an error
|
||||
func MustCreateRef(ref string) Ref {
|
||||
r, err := New(ref)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return r
|
||||
}
|
||||
|
||||
// Ref represents a json reference object
|
||||
type Ref struct {
|
||||
referenceURL *url.URL
|
||||
referencePointer jsonpointer.Pointer
|
||||
|
||||
HasFullURL bool
|
||||
HasURLPathOnly bool
|
||||
HasFragmentOnly bool
|
||||
HasFileScheme bool
|
||||
HasFullFilePath bool
|
||||
}
|
||||
|
||||
// GetURL gets the URL for this reference
|
||||
func (r *Ref) GetURL() *url.URL {
|
||||
return r.referenceURL
|
||||
}
|
||||
|
||||
// GetPointer gets the json pointer for this reference
|
||||
func (r *Ref) GetPointer() *jsonpointer.Pointer {
|
||||
return &r.referencePointer
|
||||
}
|
||||
|
||||
// String returns the best version of the url for this reference
|
||||
func (r *Ref) String() string {
|
||||
|
||||
if r.referenceURL != nil {
|
||||
return r.referenceURL.String()
|
||||
}
|
||||
|
||||
if r.HasFragmentOnly {
|
||||
return fragmentRune + r.referencePointer.String()
|
||||
}
|
||||
|
||||
return r.referencePointer.String()
|
||||
}
|
||||
|
||||
// IsRoot returns true if this reference is a root document
|
||||
func (r *Ref) IsRoot() bool {
|
||||
return r.referenceURL != nil &&
|
||||
!r.IsCanonical() &&
|
||||
!r.HasURLPathOnly &&
|
||||
r.referenceURL.Fragment == ""
|
||||
}
|
||||
|
||||
// IsCanonical returns true when this pointer starts with http(s):// or file://
|
||||
func (r *Ref) IsCanonical() bool {
|
||||
return (r.HasFileScheme && r.HasFullFilePath) || (!r.HasFileScheme && r.HasFullURL)
|
||||
}
|
||||
|
||||
// "Constructor", parses the given string JSON reference
|
||||
func (r *Ref) parse(jsonReferenceString string) error {
|
||||
|
||||
parsed, err := url.Parse(jsonReferenceString)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
r.referenceURL, _ = url.Parse(purell.NormalizeURL(parsed, purell.FlagsSafe|purell.FlagRemoveDuplicateSlashes))
|
||||
refURL := r.referenceURL
|
||||
|
||||
if refURL.Scheme != "" && refURL.Host != "" {
|
||||
r.HasFullURL = true
|
||||
} else {
|
||||
if refURL.Path != "" {
|
||||
r.HasURLPathOnly = true
|
||||
} else if refURL.RawQuery == "" && refURL.Fragment != "" {
|
||||
r.HasFragmentOnly = true
|
||||
}
|
||||
}
|
||||
|
||||
r.HasFileScheme = refURL.Scheme == "file"
|
||||
r.HasFullFilePath = strings.HasPrefix(refURL.Path, "/")
|
||||
|
||||
// invalid json-pointer error means url has no json-pointer fragment. simply ignore error
|
||||
r.referencePointer, _ = jsonpointer.New(refURL.Fragment)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Inherits creates a new reference from a parent and a child
|
||||
// If the child cannot inherit from the parent, an error is returned
|
||||
func (r *Ref) Inherits(child Ref) (*Ref, error) {
|
||||
childURL := child.GetURL()
|
||||
parentURL := r.GetURL()
|
||||
if childURL == nil {
|
||||
return nil, errors.New("child url is nil")
|
||||
}
|
||||
if parentURL == nil {
|
||||
return &child, nil
|
||||
}
|
||||
|
||||
ref, err := New(parentURL.ResolveReference(childURL).String())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &ref, nil
|
||||
}
|
26
vendor/github.com/go-openapi/spec/.editorconfig
generated
vendored
Normal file
26
vendor/github.com/go-openapi/spec/.editorconfig
generated
vendored
Normal file
@ -0,0 +1,26 @@
|
||||
# top-most EditorConfig file
|
||||
root = true
|
||||
|
||||
# Unix-style newlines with a newline ending every file
|
||||
[*]
|
||||
end_of_line = lf
|
||||
insert_final_newline = true
|
||||
indent_style = space
|
||||
indent_size = 2
|
||||
trim_trailing_whitespace = true
|
||||
|
||||
# Set default charset
|
||||
[*.{js,py,go,scala,rb,java,html,css,less,sass,md}]
|
||||
charset = utf-8
|
||||
|
||||
# Tab indentation (no size specified)
|
||||
[*.go]
|
||||
indent_style = tab
|
||||
|
||||
[*.md]
|
||||
trim_trailing_whitespace = false
|
||||
|
||||
# Matches the exact files either package.json or .travis.yml
|
||||
[{package.json,.travis.yml}]
|
||||
indent_style = space
|
||||
indent_size = 2
|
2
vendor/github.com/go-openapi/spec/.gitignore
generated
vendored
Normal file
2
vendor/github.com/go-openapi/spec/.gitignore
generated
vendored
Normal file
@ -0,0 +1,2 @@
|
||||
secrets.yml
|
||||
coverage.out
|
16
vendor/github.com/go-openapi/spec/.travis.yml
generated
vendored
Normal file
16
vendor/github.com/go-openapi/spec/.travis.yml
generated
vendored
Normal file
@ -0,0 +1,16 @@
|
||||
language: go
|
||||
go:
|
||||
- 1.7
|
||||
install:
|
||||
- go get -u github.com/stretchr/testify
|
||||
- go get -u github.com/go-openapi/swag
|
||||
- go get -u gopkg.in/yaml.v2
|
||||
- go get -u github.com/go-openapi/jsonpointer
|
||||
- go get -u github.com/go-openapi/jsonreference
|
||||
script:
|
||||
- go test -v -race -cover -coverprofile=coverage.txt -covermode=atomic ./...
|
||||
after_success:
|
||||
- bash <(curl -s https://codecov.io/bash)
|
||||
notifications:
|
||||
slack:
|
||||
secure: QUWvCkBBK09GF7YtEvHHVt70JOkdlNBG0nIKu/5qc4/nW5HP8I2w0SEf/XR2je0eED1Qe3L/AfMCWwrEj+IUZc3l4v+ju8X8R3Lomhme0Eb0jd1MTMCuPcBT47YCj0M7RON7vXtbFfm1hFJ/jLe5+9FXz0hpXsR24PJc5ZIi/ogNwkaPqG4BmndzecpSh0vc2FJPZUD9LT0I09REY/vXR0oQAalLkW0asGD5taHZTUZq/kBpsNxaAFrLM23i4mUcf33M5fjLpvx5LRICrX/57XpBrDh2TooBU6Qj3CgoY0uPRYUmSNxbVx1czNzl2JtEpb5yjoxfVPQeg0BvQM00G8LJINISR+ohrjhkZmAqchDupAX+yFrxTtORa78CtnIL6z/aTNlgwwVD8kvL/1pFA/JWYmKDmz93mV/+6wubGzNSQCstzjkFA4/iZEKewKUoRIAi/fxyscP6L/rCpmY/4llZZvrnyTqVbt6URWpopUpH4rwYqreXAtJxJsfBJIeSmUIiDIOMGkCTvyTEW3fWGmGoqWtSHLoaWDyAIGb7azb+KvfpWtEcoPFWfSWU+LGee0A/YsUhBl7ADB9A0CJEuR8q4BPpKpfLwPKSiKSAXL7zDkyjExyhtgqbSl2jS+rKIHOZNL8JkCcTP2MKMVd563C5rC5FMKqu3S9m2b6380E=
|
74
vendor/github.com/go-openapi/spec/CODE_OF_CONDUCT.md
generated
vendored
Normal file
74
vendor/github.com/go-openapi/spec/CODE_OF_CONDUCT.md
generated
vendored
Normal file
@ -0,0 +1,74 @@
|
||||
# Contributor Covenant Code of Conduct
|
||||
|
||||
## Our Pledge
|
||||
|
||||
In the interest of fostering an open and welcoming environment, we as
|
||||
contributors and maintainers pledge to making participation in our project and
|
||||
our community a harassment-free experience for everyone, regardless of age, body
|
||||
size, disability, ethnicity, gender identity and expression, level of experience,
|
||||
nationality, personal appearance, race, religion, or sexual identity and
|
||||
orientation.
|
||||
|
||||
## Our Standards
|
||||
|
||||
Examples of behavior that contributes to creating a positive environment
|
||||
include:
|
||||
|
||||
* Using welcoming and inclusive language
|
||||
* Being respectful of differing viewpoints and experiences
|
||||
* Gracefully accepting constructive criticism
|
||||
* Focusing on what is best for the community
|
||||
* Showing empathy towards other community members
|
||||
|
||||
Examples of unacceptable behavior by participants include:
|
||||
|
||||
* The use of sexualized language or imagery and unwelcome sexual attention or
|
||||
advances
|
||||
* Trolling, insulting/derogatory comments, and personal or political attacks
|
||||
* Public or private harassment
|
||||
* Publishing others' private information, such as a physical or electronic
|
||||
address, without explicit permission
|
||||
* Other conduct which could reasonably be considered inappropriate in a
|
||||
professional setting
|
||||
|
||||
## Our Responsibilities
|
||||
|
||||
Project maintainers are responsible for clarifying the standards of acceptable
|
||||
behavior and are expected to take appropriate and fair corrective action in
|
||||
response to any instances of unacceptable behavior.
|
||||
|
||||
Project maintainers have the right and responsibility to remove, edit, or
|
||||
reject comments, commits, code, wiki edits, issues, and other contributions
|
||||
that are not aligned to this Code of Conduct, or to ban temporarily or
|
||||
permanently any contributor for other behaviors that they deem inappropriate,
|
||||
threatening, offensive, or harmful.
|
||||
|
||||
## Scope
|
||||
|
||||
This Code of Conduct applies both within project spaces and in public spaces
|
||||
when an individual is representing the project or its community. Examples of
|
||||
representing a project or community include using an official project e-mail
|
||||
address, posting via an official social media account, or acting as an appointed
|
||||
representative at an online or offline event. Representation of a project may be
|
||||
further defined and clarified by project maintainers.
|
||||
|
||||
## Enforcement
|
||||
|
||||
Instances of abusive, harassing, or otherwise unacceptable behavior may be
|
||||
reported by contacting the project team at ivan+abuse@flanders.co.nz. All
|
||||
complaints will be reviewed and investigated and will result in a response that
|
||||
is deemed necessary and appropriate to the circumstances. The project team is
|
||||
obligated to maintain confidentiality with regard to the reporter of an incident.
|
||||
Further details of specific enforcement policies may be posted separately.
|
||||
|
||||
Project maintainers who do not follow or enforce the Code of Conduct in good
|
||||
faith may face temporary or permanent repercussions as determined by other
|
||||
members of the project's leadership.
|
||||
|
||||
## Attribution
|
||||
|
||||
This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4,
|
||||
available at [http://contributor-covenant.org/version/1/4][version]
|
||||
|
||||
[homepage]: http://contributor-covenant.org
|
||||
[version]: http://contributor-covenant.org/version/1/4/
|
202
vendor/github.com/go-openapi/spec/LICENSE
generated
vendored
Normal file
202
vendor/github.com/go-openapi/spec/LICENSE
generated
vendored
Normal file
@ -0,0 +1,202 @@
|
||||
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright [yyyy] [name of copyright owner]
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
5
vendor/github.com/go-openapi/spec/README.md
generated
vendored
Normal file
5
vendor/github.com/go-openapi/spec/README.md
generated
vendored
Normal file
@ -0,0 +1,5 @@
|
||||
# OAI object model [](https://travis-ci.org/go-openapi/spec) [](https://codecov.io/gh/go-openapi/spec) [](https://slackin.goswagger.io)
|
||||
|
||||
[](https://raw.githubusercontent.com/go-openapi/spec/master/LICENSE) [](http://godoc.org/github.com/go-openapi/spec)
|
||||
|
||||
The object model for OpenAPI specification documents
|
260
vendor/github.com/go-openapi/spec/bindata.go
generated
vendored
Normal file
260
vendor/github.com/go-openapi/spec/bindata.go
generated
vendored
Normal file
File diff suppressed because one or more lines are too long
24
vendor/github.com/go-openapi/spec/contact_info.go
generated
vendored
Normal file
24
vendor/github.com/go-openapi/spec/contact_info.go
generated
vendored
Normal file
@ -0,0 +1,24 @@
|
||||
// Copyright 2015 go-swagger maintainers
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package spec
|
||||
|
||||
// ContactInfo contact information for the exposed API.
|
||||
//
|
||||
// For more information: http://goo.gl/8us55a#contactObject
|
||||
type ContactInfo struct {
|
||||
Name string `json:"name,omitempty"`
|
||||
URL string `json:"url,omitempty"`
|
||||
Email string `json:"email,omitempty"`
|
||||
}
|
992
vendor/github.com/go-openapi/spec/expander.go
generated
vendored
Normal file
992
vendor/github.com/go-openapi/spec/expander.go
generated
vendored
Normal file
@ -0,0 +1,992 @@
|
||||
// Copyright 2015 go-swagger maintainers
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package spec
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"log"
|
||||
"net/url"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"reflect"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"github.com/go-openapi/jsonpointer"
|
||||
"github.com/go-openapi/swag"
|
||||
)
|
||||
|
||||
var (
|
||||
// Debug enables logging when SWAGGER_DEBUG env var is not empty
|
||||
Debug = os.Getenv("SWAGGER_DEBUG") != ""
|
||||
)
|
||||
|
||||
// ExpandOptions provides options for expand.
|
||||
type ExpandOptions struct {
|
||||
RelativeBase string
|
||||
SkipSchemas bool
|
||||
ContinueOnError bool
|
||||
}
|
||||
|
||||
// ResolutionCache a cache for resolving urls
|
||||
type ResolutionCache interface {
|
||||
Get(string) (interface{}, bool)
|
||||
Set(string, interface{})
|
||||
}
|
||||
|
||||
type simpleCache struct {
|
||||
lock sync.Mutex
|
||||
store map[string]interface{}
|
||||
}
|
||||
|
||||
var resCache ResolutionCache
|
||||
|
||||
func init() {
|
||||
resCache = initResolutionCache()
|
||||
}
|
||||
|
||||
func initResolutionCache() ResolutionCache {
|
||||
return &simpleCache{store: map[string]interface{}{
|
||||
"http://swagger.io/v2/schema.json": MustLoadSwagger20Schema(),
|
||||
"http://json-schema.org/draft-04/schema": MustLoadJSONSchemaDraft04(),
|
||||
}}
|
||||
}
|
||||
|
||||
func (s *simpleCache) Get(uri string) (interface{}, bool) {
|
||||
debugLog("getting %q from resolution cache", uri)
|
||||
s.lock.Lock()
|
||||
v, ok := s.store[uri]
|
||||
debugLog("got %q from resolution cache: %t", uri, ok)
|
||||
|
||||
s.lock.Unlock()
|
||||
return v, ok
|
||||
}
|
||||
|
||||
func (s *simpleCache) Set(uri string, data interface{}) {
|
||||
s.lock.Lock()
|
||||
s.store[uri] = data
|
||||
s.lock.Unlock()
|
||||
}
|
||||
|
||||
// ResolveRefWithBase resolves a reference against a context root with preservation of base path
|
||||
func ResolveRefWithBase(root interface{}, ref *Ref, opts *ExpandOptions) (*Schema, error) {
|
||||
resolver, err := defaultSchemaLoader(root, opts, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
specBasePath := ""
|
||||
if opts != nil && opts.RelativeBase != "" {
|
||||
specBasePath, _ = absPath(opts.RelativeBase)
|
||||
}
|
||||
|
||||
result := new(Schema)
|
||||
if err := resolver.Resolve(ref, result, specBasePath); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// ResolveRef resolves a reference against a context root
|
||||
// ref is guaranteed to be in root (no need to go to external files)
|
||||
// ResolveRef is ONLY called from the code generation module
|
||||
func ResolveRef(root interface{}, ref *Ref) (*Schema, error) {
|
||||
res, _, err := ref.GetPointer().Get(root)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
switch sch := res.(type) {
|
||||
case Schema:
|
||||
return &sch, nil
|
||||
case *Schema:
|
||||
return sch, nil
|
||||
case map[string]interface{}:
|
||||
b, _ := json.Marshal(sch)
|
||||
newSch := new(Schema)
|
||||
json.Unmarshal(b, newSch)
|
||||
return newSch, nil
|
||||
default:
|
||||
return nil, fmt.Errorf("unknown type for the resolved reference")
|
||||
}
|
||||
}
|
||||
|
||||
// ResolveParameter resolves a paramter reference against a context root
|
||||
func ResolveParameter(root interface{}, ref Ref) (*Parameter, error) {
|
||||
return ResolveParameterWithBase(root, ref, nil)
|
||||
}
|
||||
|
||||
// ResolveParameterWithBase resolves a paramter reference against a context root and base path
|
||||
func ResolveParameterWithBase(root interface{}, ref Ref, opts *ExpandOptions) (*Parameter, error) {
|
||||
resolver, err := defaultSchemaLoader(root, opts, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
result := new(Parameter)
|
||||
if err := resolver.Resolve(&ref, result, ""); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// ResolveResponse resolves response a reference against a context root
|
||||
func ResolveResponse(root interface{}, ref Ref) (*Response, error) {
|
||||
return ResolveResponseWithBase(root, ref, nil)
|
||||
}
|
||||
|
||||
// ResolveResponseWithBase resolves response a reference against a context root and base path
|
||||
func ResolveResponseWithBase(root interface{}, ref Ref, opts *ExpandOptions) (*Response, error) {
|
||||
resolver, err := defaultSchemaLoader(root, opts, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
result := new(Response)
|
||||
if err := resolver.Resolve(&ref, result, ""); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// ResolveItems resolves header and parameter items reference against a context root and base path
|
||||
func ResolveItems(root interface{}, ref Ref, opts *ExpandOptions) (*Items, error) {
|
||||
resolver, err := defaultSchemaLoader(root, opts, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
basePath := ""
|
||||
if opts.RelativeBase != "" {
|
||||
basePath = opts.RelativeBase
|
||||
}
|
||||
result := new(Items)
|
||||
if err := resolver.Resolve(&ref, result, basePath); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// ResolvePathItem resolves response a path item against a context root and base path
|
||||
func ResolvePathItem(root interface{}, ref Ref, opts *ExpandOptions) (*PathItem, error) {
|
||||
resolver, err := defaultSchemaLoader(root, opts, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
basePath := ""
|
||||
if opts.RelativeBase != "" {
|
||||
basePath = opts.RelativeBase
|
||||
}
|
||||
result := new(PathItem)
|
||||
if err := resolver.Resolve(&ref, result, basePath); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
|
||||
type schemaLoader struct {
|
||||
root interface{}
|
||||
options *ExpandOptions
|
||||
cache ResolutionCache
|
||||
loadDoc func(string) (json.RawMessage, error)
|
||||
}
|
||||
|
||||
var idPtr, _ = jsonpointer.New("/id")
|
||||
var refPtr, _ = jsonpointer.New("/$ref")
|
||||
|
||||
// PathLoader function to use when loading remote refs
|
||||
var PathLoader func(string) (json.RawMessage, error)
|
||||
|
||||
func init() {
|
||||
PathLoader = func(path string) (json.RawMessage, error) {
|
||||
data, err := swag.LoadFromFileOrHTTP(path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return json.RawMessage(data), nil
|
||||
}
|
||||
}
|
||||
|
||||
func defaultSchemaLoader(
|
||||
root interface{},
|
||||
expandOptions *ExpandOptions,
|
||||
cache ResolutionCache) (*schemaLoader, error) {
|
||||
|
||||
if cache == nil {
|
||||
cache = resCache
|
||||
}
|
||||
if expandOptions == nil {
|
||||
expandOptions = &ExpandOptions{}
|
||||
}
|
||||
|
||||
return &schemaLoader{
|
||||
root: root,
|
||||
options: expandOptions,
|
||||
cache: cache,
|
||||
loadDoc: func(path string) (json.RawMessage, error) {
|
||||
debugLog("fetching document at %q", path)
|
||||
return PathLoader(path)
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
func idFromNode(node interface{}) (*Ref, error) {
|
||||
if idValue, _, err := idPtr.Get(node); err == nil {
|
||||
if refStr, ok := idValue.(string); ok && refStr != "" {
|
||||
idRef, err := NewRef(refStr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &idRef, nil
|
||||
}
|
||||
}
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func nextRef(startingNode interface{}, startingRef *Ref, ptr *jsonpointer.Pointer) *Ref {
|
||||
if startingRef == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
if ptr == nil {
|
||||
return startingRef
|
||||
}
|
||||
|
||||
ret := startingRef
|
||||
var idRef *Ref
|
||||
node := startingNode
|
||||
|
||||
for _, tok := range ptr.DecodedTokens() {
|
||||
node, _, _ = jsonpointer.GetForToken(node, tok)
|
||||
if node == nil {
|
||||
break
|
||||
}
|
||||
|
||||
idRef, _ = idFromNode(node)
|
||||
if idRef != nil {
|
||||
nw, err := ret.Inherits(*idRef)
|
||||
if err != nil {
|
||||
break
|
||||
}
|
||||
ret = nw
|
||||
}
|
||||
|
||||
refRef, _, _ := refPtr.Get(node)
|
||||
if refRef != nil {
|
||||
var rf Ref
|
||||
switch value := refRef.(type) {
|
||||
case string:
|
||||
rf, _ = NewRef(value)
|
||||
}
|
||||
nw, err := ret.Inherits(rf)
|
||||
if err != nil {
|
||||
break
|
||||
}
|
||||
nwURL := nw.GetURL()
|
||||
if nwURL.Scheme == "file" || (nwURL.Scheme == "" && nwURL.Host == "") {
|
||||
nwpt := filepath.ToSlash(nwURL.Path)
|
||||
if filepath.IsAbs(nwpt) {
|
||||
_, err := os.Stat(nwpt)
|
||||
if err != nil {
|
||||
nwURL.Path = filepath.Join(".", nwpt)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
ret = nw
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
return ret
|
||||
}
|
||||
|
||||
func debugLog(msg string, args ...interface{}) {
|
||||
if Debug {
|
||||
log.Printf(msg, args...)
|
||||
}
|
||||
}
|
||||
|
||||
// normalize absolute path for cache.
|
||||
// on Windows, drive letters should be converted to lower as scheme in net/url.URL
|
||||
func normalizeAbsPath(path string) string {
|
||||
u, err := url.Parse(path)
|
||||
if err != nil {
|
||||
debugLog("normalize absolute path failed: %s", err)
|
||||
return path
|
||||
}
|
||||
return u.String()
|
||||
}
|
||||
|
||||
// base or refPath could be a file path or a URL
|
||||
// given a base absolute path and a ref path, return the absolute path of refPath
|
||||
// 1) if refPath is absolute, return it
|
||||
// 2) if refPath is relative, join it with basePath keeping the scheme, hosts, and ports if exists
|
||||
// base could be a directory or a full file path
|
||||
func normalizePaths(refPath, base string) string {
|
||||
refURL, _ := url.Parse(refPath)
|
||||
if path.IsAbs(refURL.Path) {
|
||||
// refPath is actually absolute
|
||||
if refURL.Host != "" {
|
||||
return refPath
|
||||
}
|
||||
return filepath.FromSlash(refPath)
|
||||
}
|
||||
|
||||
// relative refPath
|
||||
baseURL, _ := url.Parse(base)
|
||||
if !strings.HasPrefix(refPath, "#") {
|
||||
// combining paths
|
||||
if baseURL.Host != "" {
|
||||
baseURL.Path = path.Join(path.Dir(baseURL.Path), refURL.Path)
|
||||
} else { // base is a file
|
||||
newBase := fmt.Sprintf("%s#%s", filepath.Join(filepath.Dir(base), filepath.FromSlash(refURL.Path)), refURL.Fragment)
|
||||
return newBase
|
||||
}
|
||||
|
||||
}
|
||||
// copying fragment from ref to base
|
||||
baseURL.Fragment = refURL.Fragment
|
||||
return baseURL.String()
|
||||
}
|
||||
|
||||
// relativeBase could be an ABSOLUTE file path or an ABSOLUTE URL
|
||||
func normalizeFileRef(ref *Ref, relativeBase string) *Ref {
|
||||
// This is important for when the reference is pointing to the root schema
|
||||
if ref.String() == "" {
|
||||
r, _ := NewRef(relativeBase)
|
||||
return &r
|
||||
}
|
||||
|
||||
refURL := ref.GetURL()
|
||||
debugLog("normalizing %s against %s (%s)", ref.String(), relativeBase, refURL.String())
|
||||
|
||||
s := normalizePaths(ref.String(), relativeBase)
|
||||
r, _ := NewRef(s)
|
||||
return &r
|
||||
}
|
||||
|
||||
func (r *schemaLoader) resolveRef(ref *Ref, target interface{}, basePath string) error {
|
||||
tgt := reflect.ValueOf(target)
|
||||
if tgt.Kind() != reflect.Ptr {
|
||||
return fmt.Errorf("resolve ref: target needs to be a pointer")
|
||||
}
|
||||
|
||||
refURL := ref.GetURL()
|
||||
if refURL == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
var res interface{}
|
||||
var data interface{}
|
||||
var err error
|
||||
// Resolve against the root if it isn't nil, and if ref is pointing at the root, or has a fragment only which means
|
||||
// it is pointing somewhere in the root.
|
||||
root := r.root
|
||||
if (ref.IsRoot() || ref.HasFragmentOnly) && root == nil && basePath != "" {
|
||||
if baseRef, err := NewRef(basePath); err == nil {
|
||||
root, _, _, _ = r.load(baseRef.GetURL())
|
||||
}
|
||||
}
|
||||
if (ref.IsRoot() || ref.HasFragmentOnly) && root != nil {
|
||||
data = root
|
||||
} else {
|
||||
baseRef := normalizeFileRef(ref, basePath)
|
||||
debugLog("current ref is: %s", ref.String())
|
||||
debugLog("current ref normalized file: %s", baseRef.String())
|
||||
data, _, _, err = r.load(baseRef.GetURL())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
res = data
|
||||
if ref.String() != "" {
|
||||
res, _, err = ref.GetPointer().Get(data)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if err := swag.DynamicJSONToStruct(res, target); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *schemaLoader) load(refURL *url.URL) (interface{}, url.URL, bool, error) {
|
||||
debugLog("loading schema from url: %s", refURL)
|
||||
toFetch := *refURL
|
||||
toFetch.Fragment = ""
|
||||
|
||||
data, fromCache := r.cache.Get(toFetch.String())
|
||||
if !fromCache {
|
||||
b, err := r.loadDoc(toFetch.String())
|
||||
if err != nil {
|
||||
return nil, url.URL{}, false, err
|
||||
}
|
||||
|
||||
if err := json.Unmarshal(b, &data); err != nil {
|
||||
return nil, url.URL{}, false, err
|
||||
}
|
||||
r.cache.Set(toFetch.String(), data)
|
||||
}
|
||||
|
||||
return data, toFetch, fromCache, nil
|
||||
}
|
||||
|
||||
// Resolve resolves a reference against basePath and stores the result in target
|
||||
// Resolve is not in charge of following references, it only resolves ref by following its URL
|
||||
// if the schema that ref is referring to has more refs in it. Resolve doesn't resolve them
|
||||
// if basePath is an empty string, ref is resolved against the root schema stored in the schemaLoader struct
|
||||
func (r *schemaLoader) Resolve(ref *Ref, target interface{}, basePath string) error {
|
||||
return r.resolveRef(ref, target, basePath)
|
||||
}
|
||||
|
||||
// absPath returns the absolute path of a file
|
||||
func absPath(fname string) (string, error) {
|
||||
if strings.HasPrefix(fname, "http") {
|
||||
return fname, nil
|
||||
}
|
||||
if filepath.IsAbs(fname) {
|
||||
return fname, nil
|
||||
}
|
||||
wd, err := os.Getwd()
|
||||
return filepath.Join(wd, fname), err
|
||||
}
|
||||
|
||||
// ExpandSpec expands the references in a swagger spec
|
||||
func ExpandSpec(spec *Swagger, options *ExpandOptions) error {
|
||||
resolver, err := defaultSchemaLoader(spec, options, nil)
|
||||
// Just in case this ever returns an error.
|
||||
if shouldStopOnError(err, resolver.options) {
|
||||
return err
|
||||
}
|
||||
|
||||
// getting the base path of the spec to adjust all subsequent reference resolutions
|
||||
specBasePath := ""
|
||||
if options != nil && options.RelativeBase != "" {
|
||||
specBasePath, _ = absPath(options.RelativeBase)
|
||||
}
|
||||
|
||||
if options == nil || !options.SkipSchemas {
|
||||
for key, definition := range spec.Definitions {
|
||||
var def *Schema
|
||||
var err error
|
||||
if def, err = expandSchema(definition, []string{fmt.Sprintf("#/defintions/%s", key)}, resolver, specBasePath); shouldStopOnError(err, resolver.options) {
|
||||
return err
|
||||
}
|
||||
if def != nil {
|
||||
spec.Definitions[key] = *def
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for key, parameter := range spec.Parameters {
|
||||
if err := expandParameter(¶meter, resolver, specBasePath); shouldStopOnError(err, resolver.options) {
|
||||
return err
|
||||
}
|
||||
spec.Parameters[key] = parameter
|
||||
}
|
||||
|
||||
for key, response := range spec.Responses {
|
||||
if err := expandResponse(&response, resolver, specBasePath); shouldStopOnError(err, resolver.options) {
|
||||
return err
|
||||
}
|
||||
spec.Responses[key] = response
|
||||
}
|
||||
|
||||
if spec.Paths != nil {
|
||||
for key, path := range spec.Paths.Paths {
|
||||
if err := expandPathItem(&path, resolver, specBasePath); shouldStopOnError(err, resolver.options) {
|
||||
return err
|
||||
}
|
||||
spec.Paths.Paths[key] = path
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func shouldStopOnError(err error, opts *ExpandOptions) bool {
|
||||
if err != nil && !opts.ContinueOnError {
|
||||
return true
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
log.Println(err)
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
// ExpandSchema expands the refs in the schema object with reference to the root object
|
||||
// go-openapi/validate uses this function
|
||||
// notice that it is impossible to reference a json scema in a different file other than root
|
||||
func ExpandSchema(schema *Schema, root interface{}, cache ResolutionCache) error {
|
||||
// Only save the root to a tmp file if it isn't nil.
|
||||
var base string
|
||||
if root != nil {
|
||||
base, _ = absPath("root")
|
||||
if cache == nil {
|
||||
cache = resCache
|
||||
}
|
||||
cache.Set(normalizeAbsPath(base), root)
|
||||
base = "root"
|
||||
}
|
||||
|
||||
opts := &ExpandOptions{
|
||||
RelativeBase: base,
|
||||
SkipSchemas: false,
|
||||
ContinueOnError: false,
|
||||
}
|
||||
return ExpandSchemaWithBasePath(schema, cache, opts)
|
||||
}
|
||||
|
||||
// ExpandSchemaWithBasePath expands the refs in the schema object, base path configured through expand options
|
||||
func ExpandSchemaWithBasePath(schema *Schema, cache ResolutionCache, opts *ExpandOptions) error {
|
||||
if schema == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
var basePath string
|
||||
if opts.RelativeBase != "" {
|
||||
basePath, _ = absPath(opts.RelativeBase)
|
||||
}
|
||||
|
||||
resolver, err := defaultSchemaLoader(nil, opts, cache)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
refs := []string{""}
|
||||
var s *Schema
|
||||
if s, err = expandSchema(*schema, refs, resolver, basePath); err != nil {
|
||||
return err
|
||||
}
|
||||
*schema = *s
|
||||
return nil
|
||||
}
|
||||
|
||||
func expandItems(target Schema, parentRefs []string, resolver *schemaLoader, basePath string) (*Schema, error) {
|
||||
if target.Items != nil {
|
||||
if target.Items.Schema != nil {
|
||||
t, err := expandSchema(*target.Items.Schema, parentRefs, resolver, basePath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
*target.Items.Schema = *t
|
||||
}
|
||||
for i := range target.Items.Schemas {
|
||||
t, err := expandSchema(target.Items.Schemas[i], parentRefs, resolver, basePath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
target.Items.Schemas[i] = *t
|
||||
}
|
||||
}
|
||||
return &target, nil
|
||||
}
|
||||
|
||||
// basePathFromSchemaID returns a new basePath based on an existing basePath and a schema ID
|
||||
func basePathFromSchemaID(oldBasePath, id string) string {
|
||||
u, err := url.Parse(oldBasePath)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
uid, err := url.Parse(id)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
if path.IsAbs(uid.Path) {
|
||||
return id
|
||||
}
|
||||
u.Path = path.Join(path.Dir(u.Path), uid.Path)
|
||||
return u.String()
|
||||
}
|
||||
|
||||
func isCircular(ref *Ref, basePath string, parentRefs ...string) bool {
|
||||
return basePath != "" && swag.ContainsStringsCI(parentRefs, ref.String())
|
||||
}
|
||||
|
||||
func expandSchema(target Schema, parentRefs []string, resolver *schemaLoader, basePath string) (*Schema, error) {
|
||||
if target.Ref.String() == "" && target.Ref.IsRoot() {
|
||||
// normalizing is important
|
||||
newRef := normalizeFileRef(&target.Ref, basePath)
|
||||
target.Ref = *newRef
|
||||
return &target, nil
|
||||
|
||||
}
|
||||
|
||||
/* change the base path of resolution when an ID is encountered
|
||||
otherwise the basePath should inherit the parent's */
|
||||
// important: ID can be relative path
|
||||
if target.ID != "" {
|
||||
// handling the case when id is a folder
|
||||
// remember that basePath has to be a file
|
||||
refPath := target.ID
|
||||
if strings.HasSuffix(target.ID, "/") {
|
||||
// path.Clean here would not work correctly if basepath is http
|
||||
refPath = fmt.Sprintf("%s%s", refPath, "placeholder.json")
|
||||
}
|
||||
basePath = normalizePaths(refPath, basePath)
|
||||
}
|
||||
|
||||
/* Explain here what this function does */
|
||||
|
||||
var t *Schema
|
||||
/* if Ref is found, everything else doesn't matter */
|
||||
/* Ref also changes the resolution scope of children expandSchema */
|
||||
if target.Ref.String() != "" {
|
||||
/* Here the resolution scope is changed because a $ref was encountered */
|
||||
normalizedRef := normalizeFileRef(&target.Ref, basePath)
|
||||
normalizedBasePath := normalizedRef.RemoteURI()
|
||||
|
||||
/* this means there is a circle in the recursion tree */
|
||||
/* return the Ref */
|
||||
if isCircular(normalizedRef, basePath, parentRefs...) {
|
||||
target.Ref = *normalizedRef
|
||||
return &target, nil
|
||||
}
|
||||
|
||||
debugLog("\nbasePath: %s", basePath)
|
||||
if Debug {
|
||||
b, _ := json.Marshal(target)
|
||||
debugLog("calling Resolve with target: %s", string(b))
|
||||
}
|
||||
if err := resolver.Resolve(&target.Ref, &t, basePath); shouldStopOnError(err, resolver.options) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if t != nil {
|
||||
parentRefs = append(parentRefs, normalizedRef.String())
|
||||
return expandSchema(*t, parentRefs, resolver, normalizedBasePath)
|
||||
}
|
||||
}
|
||||
|
||||
t, err := expandItems(target, parentRefs, resolver, basePath)
|
||||
if shouldStopOnError(err, resolver.options) {
|
||||
return &target, err
|
||||
}
|
||||
if t != nil {
|
||||
target = *t
|
||||
}
|
||||
|
||||
for i := range target.AllOf {
|
||||
t, err := expandSchema(target.AllOf[i], parentRefs, resolver, basePath)
|
||||
if shouldStopOnError(err, resolver.options) {
|
||||
return &target, err
|
||||
}
|
||||
target.AllOf[i] = *t
|
||||
}
|
||||
for i := range target.AnyOf {
|
||||
t, err := expandSchema(target.AnyOf[i], parentRefs, resolver, basePath)
|
||||
if shouldStopOnError(err, resolver.options) {
|
||||
return &target, err
|
||||
}
|
||||
target.AnyOf[i] = *t
|
||||
}
|
||||
for i := range target.OneOf {
|
||||
t, err := expandSchema(target.OneOf[i], parentRefs, resolver, basePath)
|
||||
if shouldStopOnError(err, resolver.options) {
|
||||
return &target, err
|
||||
}
|
||||
if t != nil {
|
||||
target.OneOf[i] = *t
|
||||
}
|
||||
}
|
||||
if target.Not != nil {
|
||||
t, err := expandSchema(*target.Not, parentRefs, resolver, basePath)
|
||||
if shouldStopOnError(err, resolver.options) {
|
||||
return &target, err
|
||||
}
|
||||
if t != nil {
|
||||
*target.Not = *t
|
||||
}
|
||||
}
|
||||
for k := range target.Properties {
|
||||
t, err := expandSchema(target.Properties[k], parentRefs, resolver, basePath)
|
||||
if shouldStopOnError(err, resolver.options) {
|
||||
return &target, err
|
||||
}
|
||||
if t != nil {
|
||||
target.Properties[k] = *t
|
||||
}
|
||||
}
|
||||
if target.AdditionalProperties != nil && target.AdditionalProperties.Schema != nil {
|
||||
t, err := expandSchema(*target.AdditionalProperties.Schema, parentRefs, resolver, basePath)
|
||||
if shouldStopOnError(err, resolver.options) {
|
||||
return &target, err
|
||||
}
|
||||
if t != nil {
|
||||
*target.AdditionalProperties.Schema = *t
|
||||
}
|
||||
}
|
||||
for k := range target.PatternProperties {
|
||||
t, err := expandSchema(target.PatternProperties[k], parentRefs, resolver, basePath)
|
||||
if shouldStopOnError(err, resolver.options) {
|
||||
return &target, err
|
||||
}
|
||||
if t != nil {
|
||||
target.PatternProperties[k] = *t
|
||||
}
|
||||
}
|
||||
for k := range target.Dependencies {
|
||||
if target.Dependencies[k].Schema != nil {
|
||||
t, err := expandSchema(*target.Dependencies[k].Schema, parentRefs, resolver, basePath)
|
||||
if shouldStopOnError(err, resolver.options) {
|
||||
return &target, err
|
||||
}
|
||||
if t != nil {
|
||||
*target.Dependencies[k].Schema = *t
|
||||
}
|
||||
}
|
||||
}
|
||||
if target.AdditionalItems != nil && target.AdditionalItems.Schema != nil {
|
||||
t, err := expandSchema(*target.AdditionalItems.Schema, parentRefs, resolver, basePath)
|
||||
if shouldStopOnError(err, resolver.options) {
|
||||
return &target, err
|
||||
}
|
||||
if t != nil {
|
||||
*target.AdditionalItems.Schema = *t
|
||||
}
|
||||
}
|
||||
for k := range target.Definitions {
|
||||
t, err := expandSchema(target.Definitions[k], parentRefs, resolver, basePath)
|
||||
if shouldStopOnError(err, resolver.options) {
|
||||
return &target, err
|
||||
}
|
||||
if t != nil {
|
||||
target.Definitions[k] = *t
|
||||
}
|
||||
}
|
||||
return &target, nil
|
||||
}
|
||||
|
||||
func derefPathItem(pathItem *PathItem, parentRefs []string, resolver *schemaLoader, basePath string) error {
|
||||
curRef := pathItem.Ref.String()
|
||||
if curRef != "" {
|
||||
normalizedRef := normalizeFileRef(&pathItem.Ref, basePath)
|
||||
normalizedBasePath := normalizedRef.RemoteURI()
|
||||
|
||||
if isCircular(normalizedRef, basePath, parentRefs...) {
|
||||
return nil
|
||||
}
|
||||
|
||||
if err := resolver.Resolve(&pathItem.Ref, pathItem, basePath); shouldStopOnError(err, resolver.options) {
|
||||
return err
|
||||
}
|
||||
|
||||
if pathItem.Ref.String() != "" && pathItem.Ref.String() != curRef && basePath != normalizedBasePath {
|
||||
parentRefs = append(parentRefs, normalizedRef.String())
|
||||
return derefPathItem(pathItem, parentRefs, resolver, normalizedBasePath)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func expandPathItem(pathItem *PathItem, resolver *schemaLoader, basePath string) error {
|
||||
if pathItem == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
parentRefs := []string{}
|
||||
if err := derefPathItem(pathItem, parentRefs, resolver, basePath); shouldStopOnError(err, resolver.options) {
|
||||
return err
|
||||
}
|
||||
pathItem.Ref = Ref{}
|
||||
|
||||
parentRefs = parentRefs[0:]
|
||||
|
||||
for idx := range pathItem.Parameters {
|
||||
if err := expandParameter(&(pathItem.Parameters[idx]), resolver, basePath); shouldStopOnError(err, resolver.options) {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if err := expandOperation(pathItem.Get, resolver, basePath); shouldStopOnError(err, resolver.options) {
|
||||
return err
|
||||
}
|
||||
if err := expandOperation(pathItem.Head, resolver, basePath); shouldStopOnError(err, resolver.options) {
|
||||
return err
|
||||
}
|
||||
if err := expandOperation(pathItem.Options, resolver, basePath); shouldStopOnError(err, resolver.options) {
|
||||
return err
|
||||
}
|
||||
if err := expandOperation(pathItem.Put, resolver, basePath); shouldStopOnError(err, resolver.options) {
|
||||
return err
|
||||
}
|
||||
if err := expandOperation(pathItem.Post, resolver, basePath); shouldStopOnError(err, resolver.options) {
|
||||
return err
|
||||
}
|
||||
if err := expandOperation(pathItem.Patch, resolver, basePath); shouldStopOnError(err, resolver.options) {
|
||||
return err
|
||||
}
|
||||
if err := expandOperation(pathItem.Delete, resolver, basePath); shouldStopOnError(err, resolver.options) {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func expandOperation(op *Operation, resolver *schemaLoader, basePath string) error {
|
||||
if op == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
for i, param := range op.Parameters {
|
||||
if err := expandParameter(¶m, resolver, basePath); shouldStopOnError(err, resolver.options) {
|
||||
return err
|
||||
}
|
||||
op.Parameters[i] = param
|
||||
}
|
||||
|
||||
if op.Responses != nil {
|
||||
responses := op.Responses
|
||||
if err := expandResponse(responses.Default, resolver, basePath); shouldStopOnError(err, resolver.options) {
|
||||
return err
|
||||
}
|
||||
for code, response := range responses.StatusCodeResponses {
|
||||
if err := expandResponse(&response, resolver, basePath); shouldStopOnError(err, resolver.options) {
|
||||
return err
|
||||
}
|
||||
responses.StatusCodeResponses[code] = response
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ExpandResponse expands a response based on a basepath
|
||||
// This is the exported version of expandResponse
|
||||
// all refs inside response will be resolved relative to basePath
|
||||
func ExpandResponse(response *Response, basePath string) error {
|
||||
opts := &ExpandOptions{
|
||||
RelativeBase: basePath,
|
||||
}
|
||||
resolver, err := defaultSchemaLoader(nil, opts, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return expandResponse(response, resolver, basePath)
|
||||
}
|
||||
|
||||
func derefResponse(response *Response, parentRefs []string, resolver *schemaLoader, basePath string) error {
|
||||
curRef := response.Ref.String()
|
||||
if curRef != "" {
|
||||
/* Here the resolution scope is changed because a $ref was encountered */
|
||||
normalizedRef := normalizeFileRef(&response.Ref, basePath)
|
||||
normalizedBasePath := normalizedRef.RemoteURI()
|
||||
|
||||
if isCircular(normalizedRef, basePath, parentRefs...) {
|
||||
return nil
|
||||
}
|
||||
|
||||
if err := resolver.Resolve(&response.Ref, response, basePath); shouldStopOnError(err, resolver.options) {
|
||||
return err
|
||||
}
|
||||
|
||||
if response.Ref.String() != "" && response.Ref.String() != curRef && basePath != normalizedBasePath {
|
||||
parentRefs = append(parentRefs, normalizedRef.String())
|
||||
return derefResponse(response, parentRefs, resolver, normalizedBasePath)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func expandResponse(response *Response, resolver *schemaLoader, basePath string) error {
|
||||
if response == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
parentRefs := []string{}
|
||||
if err := derefResponse(response, parentRefs, resolver, basePath); shouldStopOnError(err, resolver.options) {
|
||||
return err
|
||||
}
|
||||
response.Ref = Ref{}
|
||||
|
||||
parentRefs = parentRefs[0:]
|
||||
if !resolver.options.SkipSchemas && response.Schema != nil {
|
||||
parentRefs = append(parentRefs, response.Schema.Ref.String())
|
||||
s, err := expandSchema(*response.Schema, parentRefs, resolver, basePath)
|
||||
if shouldStopOnError(err, resolver.options) {
|
||||
return err
|
||||
}
|
||||
*response.Schema = *s
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// ExpandParameter expands a parameter based on a basepath
|
||||
// This is the exported version of expandParameter
|
||||
// all refs inside parameter will be resolved relative to basePath
|
||||
func ExpandParameter(parameter *Parameter, basePath string) error {
|
||||
opts := &ExpandOptions{
|
||||
RelativeBase: basePath,
|
||||
}
|
||||
resolver, err := defaultSchemaLoader(nil, opts, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return expandParameter(parameter, resolver, basePath)
|
||||
}
|
||||
|
||||
func derefParameter(parameter *Parameter, parentRefs []string, resolver *schemaLoader, basePath string) error {
|
||||
curRef := parameter.Ref.String()
|
||||
if curRef != "" {
|
||||
normalizedRef := normalizeFileRef(¶meter.Ref, basePath)
|
||||
normalizedBasePath := normalizedRef.RemoteURI()
|
||||
|
||||
if isCircular(normalizedRef, basePath, parentRefs...) {
|
||||
return nil
|
||||
}
|
||||
|
||||
if err := resolver.Resolve(¶meter.Ref, parameter, basePath); shouldStopOnError(err, resolver.options) {
|
||||
return err
|
||||
}
|
||||
|
||||
if parameter.Ref.String() != "" && parameter.Ref.String() != curRef && basePath != normalizedBasePath {
|
||||
parentRefs = append(parentRefs, normalizedRef.String())
|
||||
return derefParameter(parameter, parentRefs, resolver, normalizedBasePath)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func expandParameter(parameter *Parameter, resolver *schemaLoader, basePath string) error {
|
||||
if parameter == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
parentRefs := []string{}
|
||||
if err := derefParameter(parameter, parentRefs, resolver, basePath); shouldStopOnError(err, resolver.options) {
|
||||
return err
|
||||
}
|
||||
parameter.Ref = Ref{}
|
||||
|
||||
parentRefs = parentRefs[0:]
|
||||
if !resolver.options.SkipSchemas && parameter.Schema != nil {
|
||||
parentRefs = append(parentRefs, parameter.Schema.Ref.String())
|
||||
s, err := expandSchema(*parameter.Schema, parentRefs, resolver, basePath)
|
||||
if shouldStopOnError(err, resolver.options) {
|
||||
return err
|
||||
}
|
||||
*parameter.Schema = *s
|
||||
}
|
||||
return nil
|
||||
}
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user