mirror of
https://github.com/kubernetes/client-go.git
synced 2025-10-21 13:58:40 +00:00
published by bot
(https://github.com/kubernetes/contrib/tree/master/mungegithub) copied from https://github.com/kubernetes/kubernetes.git, branch master, last commit is baaaf26609565b4299008018486ec75fb30903eb
This commit is contained in:
@@ -1,215 +0,0 @@
|
||||
/*
|
||||
Copyright 2014 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package cert
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/ecdsa"
|
||||
"crypto/elliptic"
|
||||
cryptorand "crypto/rand"
|
||||
"crypto/rsa"
|
||||
"crypto/x509"
|
||||
"crypto/x509/pkix"
|
||||
"encoding/pem"
|
||||
"errors"
|
||||
"fmt"
|
||||
"math"
|
||||
"math/big"
|
||||
"net"
|
||||
"time"
|
||||
)
|
||||
|
||||
const (
|
||||
rsaKeySize = 2048
|
||||
duration365d = time.Hour * 24 * 365
|
||||
)
|
||||
|
||||
// Config containes the basic fields required for creating a certificate
|
||||
type Config struct {
|
||||
CommonName string
|
||||
Organization []string
|
||||
AltNames AltNames
|
||||
Usages []x509.ExtKeyUsage
|
||||
}
|
||||
|
||||
// AltNames contains the domain names and IP addresses that will be added
|
||||
// to the API Server's x509 certificate SubAltNames field. The values will
|
||||
// be passed directly to the x509.Certificate object.
|
||||
type AltNames struct {
|
||||
DNSNames []string
|
||||
IPs []net.IP
|
||||
}
|
||||
|
||||
// NewPrivateKey creates an RSA private key
|
||||
func NewPrivateKey() (*rsa.PrivateKey, error) {
|
||||
return rsa.GenerateKey(cryptorand.Reader, rsaKeySize)
|
||||
}
|
||||
|
||||
// NewSelfSignedCACert creates a CA certificate
|
||||
func NewSelfSignedCACert(cfg Config, key *rsa.PrivateKey) (*x509.Certificate, error) {
|
||||
now := time.Now()
|
||||
tmpl := x509.Certificate{
|
||||
SerialNumber: new(big.Int).SetInt64(0),
|
||||
Subject: pkix.Name{
|
||||
CommonName: cfg.CommonName,
|
||||
Organization: cfg.Organization,
|
||||
},
|
||||
NotBefore: now.UTC(),
|
||||
NotAfter: now.Add(duration365d * 10).UTC(),
|
||||
KeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature | x509.KeyUsageCertSign,
|
||||
BasicConstraintsValid: true,
|
||||
IsCA: true,
|
||||
}
|
||||
|
||||
certDERBytes, err := x509.CreateCertificate(cryptorand.Reader, &tmpl, &tmpl, key.Public(), key)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return x509.ParseCertificate(certDERBytes)
|
||||
}
|
||||
|
||||
// NewSignedCert creates a signed certificate using the given CA certificate and key
|
||||
func NewSignedCert(cfg Config, key *rsa.PrivateKey, caCert *x509.Certificate, caKey *rsa.PrivateKey) (*x509.Certificate, error) {
|
||||
serial, err := cryptorand.Int(cryptorand.Reader, new(big.Int).SetInt64(math.MaxInt64))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(cfg.CommonName) == 0 {
|
||||
return nil, errors.New("must specify a CommonName")
|
||||
}
|
||||
if len(cfg.Usages) == 0 {
|
||||
return nil, errors.New("must specify at least one ExtKeyUsage")
|
||||
}
|
||||
|
||||
certTmpl := x509.Certificate{
|
||||
Subject: pkix.Name{
|
||||
CommonName: cfg.CommonName,
|
||||
Organization: cfg.Organization,
|
||||
},
|
||||
DNSNames: cfg.AltNames.DNSNames,
|
||||
IPAddresses: cfg.AltNames.IPs,
|
||||
SerialNumber: serial,
|
||||
NotBefore: caCert.NotBefore,
|
||||
NotAfter: time.Now().Add(duration365d).UTC(),
|
||||
KeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature,
|
||||
ExtKeyUsage: cfg.Usages,
|
||||
}
|
||||
certDERBytes, err := x509.CreateCertificate(cryptorand.Reader, &certTmpl, caCert, key.Public(), caKey)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return x509.ParseCertificate(certDERBytes)
|
||||
}
|
||||
|
||||
// MakeEllipticPrivateKeyPEM creates an ECDSA private key
|
||||
func MakeEllipticPrivateKeyPEM() ([]byte, error) {
|
||||
privateKey, err := ecdsa.GenerateKey(elliptic.P256(), cryptorand.Reader)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
derBytes, err := x509.MarshalECPrivateKey(privateKey)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
privateKeyPemBlock := &pem.Block{
|
||||
Type: "EC PRIVATE KEY",
|
||||
Bytes: derBytes,
|
||||
}
|
||||
return pem.EncodeToMemory(privateKeyPemBlock), nil
|
||||
}
|
||||
|
||||
// GenerateSelfSignedCertKey creates a self-signed certificate and key for the given host.
|
||||
// Host may be an IP or a DNS name
|
||||
// You may also specify additional subject alt names (either ip or dns names) for the certificate
|
||||
func GenerateSelfSignedCertKey(host string, alternateIPs []net.IP, alternateDNS []string) ([]byte, []byte, error) {
|
||||
priv, err := rsa.GenerateKey(cryptorand.Reader, 2048)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
template := x509.Certificate{
|
||||
SerialNumber: big.NewInt(1),
|
||||
Subject: pkix.Name{
|
||||
CommonName: fmt.Sprintf("%s@%d", host, time.Now().Unix()),
|
||||
},
|
||||
NotBefore: time.Now(),
|
||||
NotAfter: time.Now().Add(time.Hour * 24 * 365),
|
||||
|
||||
KeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature | x509.KeyUsageCertSign,
|
||||
ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth},
|
||||
BasicConstraintsValid: true,
|
||||
IsCA: true,
|
||||
}
|
||||
|
||||
if ip := net.ParseIP(host); ip != nil {
|
||||
template.IPAddresses = append(template.IPAddresses, ip)
|
||||
} else {
|
||||
template.DNSNames = append(template.DNSNames, host)
|
||||
}
|
||||
|
||||
template.IPAddresses = append(template.IPAddresses, alternateIPs...)
|
||||
template.DNSNames = append(template.DNSNames, alternateDNS...)
|
||||
|
||||
derBytes, err := x509.CreateCertificate(cryptorand.Reader, &template, &template, &priv.PublicKey, priv)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
// Generate cert
|
||||
certBuffer := bytes.Buffer{}
|
||||
if err := pem.Encode(&certBuffer, &pem.Block{Type: "CERTIFICATE", Bytes: derBytes}); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
// Generate key
|
||||
keyBuffer := bytes.Buffer{}
|
||||
if err := pem.Encode(&keyBuffer, &pem.Block{Type: "RSA PRIVATE KEY", Bytes: x509.MarshalPKCS1PrivateKey(priv)}); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
return certBuffer.Bytes(), keyBuffer.Bytes(), nil
|
||||
}
|
||||
|
||||
// FormatBytesCert receives byte array certificate and formats in human-readable format
|
||||
func FormatBytesCert(cert []byte) (string, error) {
|
||||
block, _ := pem.Decode(cert)
|
||||
c, err := x509.ParseCertificate(block.Bytes)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to parse certificate [%v]", err)
|
||||
}
|
||||
return FormatCert(c), nil
|
||||
}
|
||||
|
||||
// FormatCert receives certificate and formats in human-readable format
|
||||
func FormatCert(c *x509.Certificate) string {
|
||||
var ips []string
|
||||
for _, ip := range c.IPAddresses {
|
||||
ips = append(ips, ip.String())
|
||||
}
|
||||
altNames := append(ips, c.DNSNames...)
|
||||
res := fmt.Sprintf(
|
||||
"Issuer: CN=%s | Subject: CN=%s | CA: %t\n",
|
||||
c.Issuer.CommonName, c.Subject.CommonName, c.IsCA,
|
||||
)
|
||||
res += fmt.Sprintf("Not before: %s Not After: %s", c.NotBefore, c.NotAfter)
|
||||
if len(altNames) > 0 {
|
||||
res += fmt.Sprintf("\nAlternate Names: %v", altNames)
|
||||
}
|
||||
return res
|
||||
}
|
@@ -1,63 +0,0 @@
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package cert
|
||||
|
||||
import (
|
||||
cryptorand "crypto/rand"
|
||||
"crypto/rsa"
|
||||
"crypto/x509"
|
||||
"crypto/x509/pkix"
|
||||
"encoding/pem"
|
||||
"net"
|
||||
)
|
||||
|
||||
// MakeCSR generates a PEM-encoded CSR using the supplied private key, subject, and SANs.
|
||||
// All key types that are implemented via crypto.Signer are supported (This includes *rsa.PrivateKey and *ecdsa.PrivateKey.)
|
||||
func MakeCSR(privateKey interface{}, subject *pkix.Name, dnsSANs []string, ipSANs []net.IP) (csr []byte, err error) {
|
||||
// Customize the signature for RSA keys, depending on the key size
|
||||
var sigType x509.SignatureAlgorithm
|
||||
if privateKey, ok := privateKey.(*rsa.PrivateKey); ok {
|
||||
keySize := privateKey.N.BitLen()
|
||||
switch {
|
||||
case keySize >= 4096:
|
||||
sigType = x509.SHA512WithRSA
|
||||
case keySize >= 3072:
|
||||
sigType = x509.SHA384WithRSA
|
||||
default:
|
||||
sigType = x509.SHA256WithRSA
|
||||
}
|
||||
}
|
||||
|
||||
template := &x509.CertificateRequest{
|
||||
Subject: *subject,
|
||||
SignatureAlgorithm: sigType,
|
||||
DNSNames: dnsSANs,
|
||||
IPAddresses: ipSANs,
|
||||
}
|
||||
|
||||
csr, err = x509.CreateCertificateRequest(cryptorand.Reader, template, privateKey)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
csrPemBlock := &pem.Block{
|
||||
Type: "CERTIFICATE REQUEST",
|
||||
Bytes: csr,
|
||||
}
|
||||
|
||||
return pem.EncodeToMemory(csrPemBlock), nil
|
||||
}
|
@@ -1,46 +0,0 @@
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package cert
|
||||
|
||||
import (
|
||||
"crypto/x509/pkix"
|
||||
"io/ioutil"
|
||||
"net"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestMakeCSR(t *testing.T) {
|
||||
keyFile := "testdata/dontUseThisKey.pem"
|
||||
subject := &pkix.Name{
|
||||
CommonName: "kube-worker",
|
||||
}
|
||||
dnsSANs := []string{"localhost"}
|
||||
ipSANs := []net.IP{net.ParseIP("127.0.0.1")}
|
||||
|
||||
keyData, err := ioutil.ReadFile(keyFile)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
key, err := ParsePrivateKeyPEM(keyData)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
_, err = MakeCSR(key, subject, dnsSANs, ipSANs)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
}
|
@@ -1,129 +0,0 @@
|
||||
/*
|
||||
Copyright 2014 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package cert
|
||||
|
||||
import (
|
||||
"crypto/x509"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
)
|
||||
|
||||
// CanReadCertAndKey returns true if the certificate and key files already exists,
|
||||
// otherwise returns false. If lost one of cert and key, returns error.
|
||||
func CanReadCertAndKey(certPath, keyPath string) (bool, error) {
|
||||
certReadable := canReadFile(certPath)
|
||||
keyReadable := canReadFile(keyPath)
|
||||
|
||||
if certReadable == false && keyReadable == false {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
if certReadable == false {
|
||||
return false, fmt.Errorf("error reading %s, certificate and key must be supplied as a pair", certPath)
|
||||
}
|
||||
|
||||
if keyReadable == false {
|
||||
return false, fmt.Errorf("error reading %s, certificate and key must be supplied as a pair", keyPath)
|
||||
}
|
||||
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// If the file represented by path exists and
|
||||
// readable, returns true otherwise returns false.
|
||||
func canReadFile(path string) bool {
|
||||
f, err := os.Open(path)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
defer f.Close()
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
// WriteCert writes the pem-encoded certificate data to certPath.
|
||||
// The certificate file will be created with file mode 0644.
|
||||
// If the certificate file already exists, it will be overwritten.
|
||||
// The parent directory of the certPath will be created as needed with file mode 0755.
|
||||
func WriteCert(certPath string, data []byte) error {
|
||||
if err := os.MkdirAll(filepath.Dir(certPath), os.FileMode(0755)); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := ioutil.WriteFile(certPath, data, os.FileMode(0644)); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// WriteKey writes the pem-encoded key data to keyPath.
|
||||
// The key file will be created with file mode 0600.
|
||||
// If the key file already exists, it will be overwritten.
|
||||
// The parent directory of the keyPath will be created as needed with file mode 0755.
|
||||
func WriteKey(keyPath string, data []byte) error {
|
||||
if err := os.MkdirAll(filepath.Dir(keyPath), os.FileMode(0755)); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := ioutil.WriteFile(keyPath, data, os.FileMode(0600)); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// NewPool returns an x509.CertPool containing the certificates in the given PEM-encoded file.
|
||||
// Returns an error if the file could not be read, a certificate could not be parsed, or if the file does not contain any certificates
|
||||
func NewPool(filename string) (*x509.CertPool, error) {
|
||||
certs, err := CertsFromFile(filename)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
pool := x509.NewCertPool()
|
||||
for _, cert := range certs {
|
||||
pool.AddCert(cert)
|
||||
}
|
||||
return pool, nil
|
||||
}
|
||||
|
||||
// CertsFromFile returns the x509.Certificates contained in the given PEM-encoded file.
|
||||
// Returns an error if the file could not be read, a certificate could not be parsed, or if the file does not contain any certificates
|
||||
func CertsFromFile(file string) ([]*x509.Certificate, error) {
|
||||
pemBlock, err := ioutil.ReadFile(file)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
certs, err := ParseCertsPEM(pemBlock)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error reading %s: %s", file, err)
|
||||
}
|
||||
return certs, nil
|
||||
}
|
||||
|
||||
// PrivateKeyFromFile returns the private key in rsa.PrivateKey or ecdsa.PrivateKey format from a given PEM-encoded file.
|
||||
// Returns an error if the file could not be read or if the private key could not be parsed.
|
||||
func PrivateKeyFromFile(file string) (interface{}, error) {
|
||||
pemBlock, err := ioutil.ReadFile(file)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
key, err := ParsePrivateKeyPEM(pemBlock)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error reading %s: %v", file, err)
|
||||
}
|
||||
return key, nil
|
||||
}
|
@@ -1,107 +0,0 @@
|
||||
/*
|
||||
Copyright 2014 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package cert
|
||||
|
||||
import (
|
||||
"crypto/rsa"
|
||||
"crypto/x509"
|
||||
"encoding/pem"
|
||||
"errors"
|
||||
"fmt"
|
||||
)
|
||||
|
||||
// EncodePublicKeyPEM returns PEM-endcode public data
|
||||
func EncodePublicKeyPEM(key *rsa.PublicKey) ([]byte, error) {
|
||||
der, err := x509.MarshalPKIXPublicKey(key)
|
||||
if err != nil {
|
||||
return []byte{}, err
|
||||
}
|
||||
block := pem.Block{
|
||||
Type: "PUBLIC KEY",
|
||||
Bytes: der,
|
||||
}
|
||||
return pem.EncodeToMemory(&block), nil
|
||||
}
|
||||
|
||||
// EncodePrivateKeyPEM returns PEM-encoded private key data
|
||||
func EncodePrivateKeyPEM(key *rsa.PrivateKey) []byte {
|
||||
block := pem.Block{
|
||||
Type: "RSA PRIVATE KEY",
|
||||
Bytes: x509.MarshalPKCS1PrivateKey(key),
|
||||
}
|
||||
return pem.EncodeToMemory(&block)
|
||||
}
|
||||
|
||||
// EncodeCertPEM returns PEM-endcoded certificate data
|
||||
func EncodeCertPEM(cert *x509.Certificate) []byte {
|
||||
block := pem.Block{
|
||||
Type: "CERTIFICATE",
|
||||
Bytes: cert.Raw,
|
||||
}
|
||||
return pem.EncodeToMemory(&block)
|
||||
}
|
||||
|
||||
// ParsePrivateKeyPEM returns a private key parsed from a PEM block in the supplied data.
|
||||
// Recognizes PEM blocks for "EC PRIVATE KEY" and "RSA PRIVATE KEY"
|
||||
func ParsePrivateKeyPEM(keyData []byte) (interface{}, error) {
|
||||
for {
|
||||
var privateKeyPemBlock *pem.Block
|
||||
privateKeyPemBlock, keyData = pem.Decode(keyData)
|
||||
if privateKeyPemBlock == nil {
|
||||
// we read all the PEM blocks and didn't recognize one
|
||||
return nil, fmt.Errorf("no private key PEM block found")
|
||||
}
|
||||
|
||||
switch privateKeyPemBlock.Type {
|
||||
case "EC PRIVATE KEY":
|
||||
return x509.ParseECPrivateKey(privateKeyPemBlock.Bytes)
|
||||
case "RSA PRIVATE KEY":
|
||||
return x509.ParsePKCS1PrivateKey(privateKeyPemBlock.Bytes)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// ParseCertsPEM returns the x509.Certificates contained in the given PEM-encoded byte array
|
||||
// Returns an error if a certificate could not be parsed, or if the data does not contain any certificates
|
||||
func ParseCertsPEM(pemCerts []byte) ([]*x509.Certificate, error) {
|
||||
ok := false
|
||||
certs := []*x509.Certificate{}
|
||||
for len(pemCerts) > 0 {
|
||||
var block *pem.Block
|
||||
block, pemCerts = pem.Decode(pemCerts)
|
||||
if block == nil {
|
||||
break
|
||||
}
|
||||
// Only use PEM "CERTIFICATE" blocks without extra headers
|
||||
if block.Type != "CERTIFICATE" || len(block.Headers) != 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
cert, err := x509.ParseCertificate(block.Bytes)
|
||||
if err != nil {
|
||||
return certs, err
|
||||
}
|
||||
|
||||
certs = append(certs, cert)
|
||||
ok = true
|
||||
}
|
||||
|
||||
if !ok {
|
||||
return certs, errors.New("could not read any certificates")
|
||||
}
|
||||
return certs, nil
|
||||
}
|
6
pkg/util/cert/testdata/dontUseThisKey.pem
vendored
6
pkg/util/cert/testdata/dontUseThisKey.pem
vendored
@@ -1,6 +0,0 @@
|
||||
-----BEGIN EC PRIVATE KEY-----
|
||||
MIGkAgEBBDAPEbSXwyDfWf0+61Oofd7aHkmdX69mrzD2Xb1CHF5syfsoRIhnG0dJ
|
||||
ozBulPZCDDWgBwYFK4EEACKhZANiAATjlMJAtKhEPqU/i7MsrgKcK/RmXHC6He7W
|
||||
0p69+9qFXg2raJ9zvvbKxkiu2ELOYRDAz0utcFTBOIgoUJEzBVmsjZQ7dvFa1BKP
|
||||
Ym7MFAKG3O2espBqXn+audgdHGh5B0I=
|
||||
-----END EC PRIVATE KEY-----
|
@@ -1,116 +0,0 @@
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Package triple generates key-certificate pairs for the
|
||||
// triple (CA, Server, Client).
|
||||
package triple
|
||||
|
||||
import (
|
||||
"crypto/rsa"
|
||||
"crypto/x509"
|
||||
"fmt"
|
||||
"net"
|
||||
|
||||
certutil "k8s.io/client-go/pkg/util/cert"
|
||||
)
|
||||
|
||||
type KeyPair struct {
|
||||
Key *rsa.PrivateKey
|
||||
Cert *x509.Certificate
|
||||
}
|
||||
|
||||
func NewCA(name string) (*KeyPair, error) {
|
||||
key, err := certutil.NewPrivateKey()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to create a private key for a new CA: %v", err)
|
||||
}
|
||||
|
||||
config := certutil.Config{
|
||||
CommonName: name,
|
||||
}
|
||||
|
||||
cert, err := certutil.NewSelfSignedCACert(config, key)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to create a self-signed certificate for a new CA: %v", err)
|
||||
}
|
||||
|
||||
return &KeyPair{
|
||||
Key: key,
|
||||
Cert: cert,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func NewServerKeyPair(ca *KeyPair, commonName, svcName, svcNamespace, dnsDomain string, ips, hostnames []string) (*KeyPair, error) {
|
||||
key, err := certutil.NewPrivateKey()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to create a server private key: %v", err)
|
||||
}
|
||||
|
||||
namespacedName := fmt.Sprintf("%s.%s", svcName, svcNamespace)
|
||||
internalAPIServerFQDN := []string{
|
||||
svcName,
|
||||
namespacedName,
|
||||
fmt.Sprintf("%s.svc", namespacedName),
|
||||
fmt.Sprintf("%s.svc.%s", namespacedName, dnsDomain),
|
||||
}
|
||||
|
||||
altNames := certutil.AltNames{}
|
||||
for _, ipStr := range ips {
|
||||
ip := net.ParseIP(ipStr)
|
||||
if ip != nil {
|
||||
altNames.IPs = append(altNames.IPs, ip)
|
||||
}
|
||||
}
|
||||
altNames.DNSNames = append(altNames.DNSNames, hostnames...)
|
||||
altNames.DNSNames = append(altNames.DNSNames, internalAPIServerFQDN...)
|
||||
|
||||
config := certutil.Config{
|
||||
CommonName: commonName,
|
||||
AltNames: altNames,
|
||||
Usages: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth},
|
||||
}
|
||||
cert, err := certutil.NewSignedCert(config, key, ca.Cert, ca.Key)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to sign the server certificate: %v", err)
|
||||
}
|
||||
|
||||
return &KeyPair{
|
||||
Key: key,
|
||||
Cert: cert,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func NewClientKeyPair(ca *KeyPair, commonName string, organizations []string) (*KeyPair, error) {
|
||||
key, err := certutil.NewPrivateKey()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to create a client private key: %v", err)
|
||||
}
|
||||
|
||||
config := certutil.Config{
|
||||
CommonName: commonName,
|
||||
Organization: organizations,
|
||||
Usages: []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth},
|
||||
}
|
||||
cert, err := certutil.NewSignedCert(config, key, ca.Cert, ca.Key)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to sign the client certificate: %v", err)
|
||||
}
|
||||
|
||||
return &KeyPair{
|
||||
Key: key,
|
||||
Cert: cert,
|
||||
}, nil
|
||||
}
|
@@ -1,218 +0,0 @@
|
||||
/*
|
||||
Copyright 2014 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package clock
|
||||
|
||||
import (
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
// Clock allows for injecting fake or real clocks into code that
|
||||
// needs to do arbitrary things based on time.
|
||||
type Clock interface {
|
||||
Now() time.Time
|
||||
Since(time.Time) time.Duration
|
||||
After(d time.Duration) <-chan time.Time
|
||||
Sleep(d time.Duration)
|
||||
Tick(d time.Duration) <-chan time.Time
|
||||
}
|
||||
|
||||
var (
|
||||
_ = Clock(RealClock{})
|
||||
_ = Clock(&FakeClock{})
|
||||
_ = Clock(&IntervalClock{})
|
||||
)
|
||||
|
||||
// RealClock really calls time.Now()
|
||||
type RealClock struct{}
|
||||
|
||||
// Now returns the current time.
|
||||
func (RealClock) Now() time.Time {
|
||||
return time.Now()
|
||||
}
|
||||
|
||||
// Since returns time since the specified timestamp.
|
||||
func (RealClock) Since(ts time.Time) time.Duration {
|
||||
return time.Since(ts)
|
||||
}
|
||||
|
||||
// Same as time.After(d).
|
||||
func (RealClock) After(d time.Duration) <-chan time.Time {
|
||||
return time.After(d)
|
||||
}
|
||||
|
||||
func (RealClock) Tick(d time.Duration) <-chan time.Time {
|
||||
return time.Tick(d)
|
||||
}
|
||||
|
||||
func (RealClock) Sleep(d time.Duration) {
|
||||
time.Sleep(d)
|
||||
}
|
||||
|
||||
// FakeClock implements Clock, but returns an arbitrary time.
|
||||
type FakeClock struct {
|
||||
lock sync.RWMutex
|
||||
time time.Time
|
||||
|
||||
// waiters are waiting for the fake time to pass their specified time
|
||||
waiters []fakeClockWaiter
|
||||
}
|
||||
|
||||
type fakeClockWaiter struct {
|
||||
targetTime time.Time
|
||||
stepInterval time.Duration
|
||||
skipIfBlocked bool
|
||||
destChan chan<- time.Time
|
||||
}
|
||||
|
||||
func NewFakeClock(t time.Time) *FakeClock {
|
||||
return &FakeClock{
|
||||
time: t,
|
||||
}
|
||||
}
|
||||
|
||||
// Now returns f's time.
|
||||
func (f *FakeClock) Now() time.Time {
|
||||
f.lock.RLock()
|
||||
defer f.lock.RUnlock()
|
||||
return f.time
|
||||
}
|
||||
|
||||
// Since returns time since the time in f.
|
||||
func (f *FakeClock) Since(ts time.Time) time.Duration {
|
||||
f.lock.RLock()
|
||||
defer f.lock.RUnlock()
|
||||
return f.time.Sub(ts)
|
||||
}
|
||||
|
||||
// Fake version of time.After(d).
|
||||
func (f *FakeClock) After(d time.Duration) <-chan time.Time {
|
||||
f.lock.Lock()
|
||||
defer f.lock.Unlock()
|
||||
stopTime := f.time.Add(d)
|
||||
ch := make(chan time.Time, 1) // Don't block!
|
||||
f.waiters = append(f.waiters, fakeClockWaiter{
|
||||
targetTime: stopTime,
|
||||
destChan: ch,
|
||||
})
|
||||
return ch
|
||||
}
|
||||
|
||||
func (f *FakeClock) Tick(d time.Duration) <-chan time.Time {
|
||||
f.lock.Lock()
|
||||
defer f.lock.Unlock()
|
||||
tickTime := f.time.Add(d)
|
||||
ch := make(chan time.Time, 1) // hold one tick
|
||||
f.waiters = append(f.waiters, fakeClockWaiter{
|
||||
targetTime: tickTime,
|
||||
stepInterval: d,
|
||||
skipIfBlocked: true,
|
||||
destChan: ch,
|
||||
})
|
||||
|
||||
return ch
|
||||
}
|
||||
|
||||
// Move clock by Duration, notify anyone that's called After or Tick
|
||||
func (f *FakeClock) Step(d time.Duration) {
|
||||
f.lock.Lock()
|
||||
defer f.lock.Unlock()
|
||||
f.setTimeLocked(f.time.Add(d))
|
||||
}
|
||||
|
||||
// Sets the time.
|
||||
func (f *FakeClock) SetTime(t time.Time) {
|
||||
f.lock.Lock()
|
||||
defer f.lock.Unlock()
|
||||
f.setTimeLocked(t)
|
||||
}
|
||||
|
||||
// Actually changes the time and checks any waiters. f must be write-locked.
|
||||
func (f *FakeClock) setTimeLocked(t time.Time) {
|
||||
f.time = t
|
||||
newWaiters := make([]fakeClockWaiter, 0, len(f.waiters))
|
||||
for i := range f.waiters {
|
||||
w := &f.waiters[i]
|
||||
if !w.targetTime.After(t) {
|
||||
|
||||
if w.skipIfBlocked {
|
||||
select {
|
||||
case w.destChan <- t:
|
||||
default:
|
||||
}
|
||||
} else {
|
||||
w.destChan <- t
|
||||
}
|
||||
|
||||
if w.stepInterval > 0 {
|
||||
for !w.targetTime.After(t) {
|
||||
w.targetTime = w.targetTime.Add(w.stepInterval)
|
||||
}
|
||||
newWaiters = append(newWaiters, *w)
|
||||
}
|
||||
|
||||
} else {
|
||||
newWaiters = append(newWaiters, f.waiters[i])
|
||||
}
|
||||
}
|
||||
f.waiters = newWaiters
|
||||
}
|
||||
|
||||
// Returns true if After has been called on f but not yet satisfied (so you can
|
||||
// write race-free tests).
|
||||
func (f *FakeClock) HasWaiters() bool {
|
||||
f.lock.RLock()
|
||||
defer f.lock.RUnlock()
|
||||
return len(f.waiters) > 0
|
||||
}
|
||||
|
||||
func (f *FakeClock) Sleep(d time.Duration) {
|
||||
f.Step(d)
|
||||
}
|
||||
|
||||
// IntervalClock implements Clock, but each invocation of Now steps the clock forward the specified duration
|
||||
type IntervalClock struct {
|
||||
Time time.Time
|
||||
Duration time.Duration
|
||||
}
|
||||
|
||||
// Now returns i's time.
|
||||
func (i *IntervalClock) Now() time.Time {
|
||||
i.Time = i.Time.Add(i.Duration)
|
||||
return i.Time
|
||||
}
|
||||
|
||||
// Since returns time since the time in i.
|
||||
func (i *IntervalClock) Since(ts time.Time) time.Duration {
|
||||
return i.Time.Sub(ts)
|
||||
}
|
||||
|
||||
// Unimplemented, will panic.
|
||||
// TODO: make interval clock use FakeClock so this can be implemented.
|
||||
func (*IntervalClock) After(d time.Duration) <-chan time.Time {
|
||||
panic("IntervalClock doesn't implement After")
|
||||
}
|
||||
|
||||
// Unimplemented, will panic.
|
||||
// TODO: make interval clock use FakeClock so this can be implemented.
|
||||
func (*IntervalClock) Tick(d time.Duration) <-chan time.Time {
|
||||
panic("IntervalClock doesn't implement Tick")
|
||||
}
|
||||
|
||||
func (*IntervalClock) Sleep(d time.Duration) {
|
||||
panic("IntervalClock doesn't implement Sleep")
|
||||
}
|
@@ -1,184 +0,0 @@
|
||||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package clock
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
func TestFakeClock(t *testing.T) {
|
||||
startTime := time.Now()
|
||||
tc := NewFakeClock(startTime)
|
||||
tc.Step(time.Second)
|
||||
now := tc.Now()
|
||||
if now.Sub(startTime) != time.Second {
|
||||
t.Errorf("input: %s now=%s gap=%s expected=%s", startTime, now, now.Sub(startTime), time.Second)
|
||||
}
|
||||
|
||||
tt := tc.Now()
|
||||
tc.SetTime(tt.Add(time.Hour))
|
||||
if tc.Now().Sub(tt) != time.Hour {
|
||||
t.Errorf("input: %s now=%s gap=%s expected=%s", tt, tc.Now(), tc.Now().Sub(tt), time.Hour)
|
||||
}
|
||||
}
|
||||
|
||||
func TestFakeClockSleep(t *testing.T) {
|
||||
startTime := time.Now()
|
||||
tc := NewFakeClock(startTime)
|
||||
tc.Sleep(time.Duration(1) * time.Hour)
|
||||
now := tc.Now()
|
||||
if now.Sub(startTime) != time.Hour {
|
||||
t.Errorf("Fake sleep failed, expected time to advance by one hour, instead, its %v", now.Sub(startTime))
|
||||
}
|
||||
}
|
||||
|
||||
func TestFakeAfter(t *testing.T) {
|
||||
tc := NewFakeClock(time.Now())
|
||||
if tc.HasWaiters() {
|
||||
t.Errorf("unexpected waiter?")
|
||||
}
|
||||
oneSec := tc.After(time.Second)
|
||||
if !tc.HasWaiters() {
|
||||
t.Errorf("unexpected lack of waiter?")
|
||||
}
|
||||
|
||||
oneOhOneSec := tc.After(time.Second + time.Millisecond)
|
||||
twoSec := tc.After(2 * time.Second)
|
||||
select {
|
||||
case <-oneSec:
|
||||
t.Errorf("unexpected channel read")
|
||||
case <-oneOhOneSec:
|
||||
t.Errorf("unexpected channel read")
|
||||
case <-twoSec:
|
||||
t.Errorf("unexpected channel read")
|
||||
default:
|
||||
}
|
||||
|
||||
tc.Step(999 * time.Millisecond)
|
||||
select {
|
||||
case <-oneSec:
|
||||
t.Errorf("unexpected channel read")
|
||||
case <-oneOhOneSec:
|
||||
t.Errorf("unexpected channel read")
|
||||
case <-twoSec:
|
||||
t.Errorf("unexpected channel read")
|
||||
default:
|
||||
}
|
||||
|
||||
tc.Step(time.Millisecond)
|
||||
select {
|
||||
case <-oneSec:
|
||||
// Expected!
|
||||
case <-oneOhOneSec:
|
||||
t.Errorf("unexpected channel read")
|
||||
case <-twoSec:
|
||||
t.Errorf("unexpected channel read")
|
||||
default:
|
||||
t.Errorf("unexpected non-channel read")
|
||||
}
|
||||
tc.Step(time.Millisecond)
|
||||
select {
|
||||
case <-oneSec:
|
||||
// should not double-trigger!
|
||||
t.Errorf("unexpected channel read")
|
||||
case <-oneOhOneSec:
|
||||
// Expected!
|
||||
case <-twoSec:
|
||||
t.Errorf("unexpected channel read")
|
||||
default:
|
||||
t.Errorf("unexpected non-channel read")
|
||||
}
|
||||
}
|
||||
|
||||
func TestFakeTick(t *testing.T) {
|
||||
tc := NewFakeClock(time.Now())
|
||||
if tc.HasWaiters() {
|
||||
t.Errorf("unexpected waiter?")
|
||||
}
|
||||
oneSec := tc.Tick(time.Second)
|
||||
if !tc.HasWaiters() {
|
||||
t.Errorf("unexpected lack of waiter?")
|
||||
}
|
||||
|
||||
oneOhOneSec := tc.Tick(time.Second + time.Millisecond)
|
||||
twoSec := tc.Tick(2 * time.Second)
|
||||
select {
|
||||
case <-oneSec:
|
||||
t.Errorf("unexpected channel read")
|
||||
case <-oneOhOneSec:
|
||||
t.Errorf("unexpected channel read")
|
||||
case <-twoSec:
|
||||
t.Errorf("unexpected channel read")
|
||||
default:
|
||||
}
|
||||
|
||||
tc.Step(999 * time.Millisecond) // t=.999
|
||||
select {
|
||||
case <-oneSec:
|
||||
t.Errorf("unexpected channel read")
|
||||
case <-oneOhOneSec:
|
||||
t.Errorf("unexpected channel read")
|
||||
case <-twoSec:
|
||||
t.Errorf("unexpected channel read")
|
||||
default:
|
||||
}
|
||||
|
||||
tc.Step(time.Millisecond) // t=1.000
|
||||
select {
|
||||
case <-oneSec:
|
||||
// Expected!
|
||||
case <-oneOhOneSec:
|
||||
t.Errorf("unexpected channel read")
|
||||
case <-twoSec:
|
||||
t.Errorf("unexpected channel read")
|
||||
default:
|
||||
t.Errorf("unexpected non-channel read")
|
||||
}
|
||||
tc.Step(time.Millisecond) // t=1.001
|
||||
select {
|
||||
case <-oneSec:
|
||||
// should not double-trigger!
|
||||
t.Errorf("unexpected channel read")
|
||||
case <-oneOhOneSec:
|
||||
// Expected!
|
||||
case <-twoSec:
|
||||
t.Errorf("unexpected channel read")
|
||||
default:
|
||||
t.Errorf("unexpected non-channel read")
|
||||
}
|
||||
|
||||
tc.Step(time.Second) // t=2.001
|
||||
tc.Step(time.Second) // t=3.001
|
||||
tc.Step(time.Second) // t=4.001
|
||||
tc.Step(time.Second) // t=5.001
|
||||
|
||||
// The one second ticker should not accumulate ticks
|
||||
accumulatedTicks := 0
|
||||
drained := false
|
||||
for !drained {
|
||||
select {
|
||||
case <-oneSec:
|
||||
accumulatedTicks++
|
||||
default:
|
||||
drained = true
|
||||
}
|
||||
}
|
||||
if accumulatedTicks != 1 {
|
||||
t.Errorf("unexpected number of accumulated ticks: %d", accumulatedTicks)
|
||||
}
|
||||
}
|
@@ -1,149 +0,0 @@
|
||||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package flowcontrol
|
||||
|
||||
import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"k8s.io/client-go/pkg/util/clock"
|
||||
"k8s.io/client-go/pkg/util/integer"
|
||||
)
|
||||
|
||||
type backoffEntry struct {
|
||||
backoff time.Duration
|
||||
lastUpdate time.Time
|
||||
}
|
||||
|
||||
type Backoff struct {
|
||||
sync.Mutex
|
||||
Clock clock.Clock
|
||||
defaultDuration time.Duration
|
||||
maxDuration time.Duration
|
||||
perItemBackoff map[string]*backoffEntry
|
||||
}
|
||||
|
||||
func NewFakeBackOff(initial, max time.Duration, tc *clock.FakeClock) *Backoff {
|
||||
return &Backoff{
|
||||
perItemBackoff: map[string]*backoffEntry{},
|
||||
Clock: tc,
|
||||
defaultDuration: initial,
|
||||
maxDuration: max,
|
||||
}
|
||||
}
|
||||
|
||||
func NewBackOff(initial, max time.Duration) *Backoff {
|
||||
return &Backoff{
|
||||
perItemBackoff: map[string]*backoffEntry{},
|
||||
Clock: clock.RealClock{},
|
||||
defaultDuration: initial,
|
||||
maxDuration: max,
|
||||
}
|
||||
}
|
||||
|
||||
// Get the current backoff Duration
|
||||
func (p *Backoff) Get(id string) time.Duration {
|
||||
p.Lock()
|
||||
defer p.Unlock()
|
||||
var delay time.Duration
|
||||
entry, ok := p.perItemBackoff[id]
|
||||
if ok {
|
||||
delay = entry.backoff
|
||||
}
|
||||
return delay
|
||||
}
|
||||
|
||||
// move backoff to the next mark, capping at maxDuration
|
||||
func (p *Backoff) Next(id string, eventTime time.Time) {
|
||||
p.Lock()
|
||||
defer p.Unlock()
|
||||
entry, ok := p.perItemBackoff[id]
|
||||
if !ok || hasExpired(eventTime, entry.lastUpdate, p.maxDuration) {
|
||||
entry = p.initEntryUnsafe(id)
|
||||
} else {
|
||||
delay := entry.backoff * 2 // exponential
|
||||
entry.backoff = time.Duration(integer.Int64Min(int64(delay), int64(p.maxDuration)))
|
||||
}
|
||||
entry.lastUpdate = p.Clock.Now()
|
||||
}
|
||||
|
||||
// Reset forces clearing of all backoff data for a given key.
|
||||
func (p *Backoff) Reset(id string) {
|
||||
p.Lock()
|
||||
defer p.Unlock()
|
||||
delete(p.perItemBackoff, id)
|
||||
}
|
||||
|
||||
// Returns True if the elapsed time since eventTime is smaller than the current backoff window
|
||||
func (p *Backoff) IsInBackOffSince(id string, eventTime time.Time) bool {
|
||||
p.Lock()
|
||||
defer p.Unlock()
|
||||
entry, ok := p.perItemBackoff[id]
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
if hasExpired(eventTime, entry.lastUpdate, p.maxDuration) {
|
||||
return false
|
||||
}
|
||||
return p.Clock.Now().Sub(eventTime) < entry.backoff
|
||||
}
|
||||
|
||||
// Returns True if time since lastupdate is less than the current backoff window.
|
||||
func (p *Backoff) IsInBackOffSinceUpdate(id string, eventTime time.Time) bool {
|
||||
p.Lock()
|
||||
defer p.Unlock()
|
||||
entry, ok := p.perItemBackoff[id]
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
if hasExpired(eventTime, entry.lastUpdate, p.maxDuration) {
|
||||
return false
|
||||
}
|
||||
return eventTime.Sub(entry.lastUpdate) < entry.backoff
|
||||
}
|
||||
|
||||
// Garbage collect records that have aged past maxDuration. Backoff users are expected
|
||||
// to invoke this periodically.
|
||||
func (p *Backoff) GC() {
|
||||
p.Lock()
|
||||
defer p.Unlock()
|
||||
now := p.Clock.Now()
|
||||
for id, entry := range p.perItemBackoff {
|
||||
if now.Sub(entry.lastUpdate) > p.maxDuration*2 {
|
||||
// GC when entry has not been updated for 2*maxDuration
|
||||
delete(p.perItemBackoff, id)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (p *Backoff) DeleteEntry(id string) {
|
||||
p.Lock()
|
||||
defer p.Unlock()
|
||||
delete(p.perItemBackoff, id)
|
||||
}
|
||||
|
||||
// Take a lock on *Backoff, before calling initEntryUnsafe
|
||||
func (p *Backoff) initEntryUnsafe(id string) *backoffEntry {
|
||||
entry := &backoffEntry{backoff: p.defaultDuration}
|
||||
p.perItemBackoff[id] = entry
|
||||
return entry
|
||||
}
|
||||
|
||||
// After 2*maxDuration we restart the backoff factor to the beginning
|
||||
func hasExpired(eventTime time.Time, lastUpdate time.Time, maxDuration time.Duration) bool {
|
||||
return eventTime.Sub(lastUpdate) > maxDuration*2 // consider stable if it's ok for twice the maxDuration
|
||||
}
|
@@ -1,195 +0,0 @@
|
||||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package flowcontrol
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"k8s.io/client-go/pkg/util/clock"
|
||||
)
|
||||
|
||||
func TestSlowBackoff(t *testing.T) {
|
||||
id := "_idSlow"
|
||||
tc := clock.NewFakeClock(time.Now())
|
||||
step := time.Second
|
||||
maxDuration := 50 * step
|
||||
|
||||
b := NewFakeBackOff(step, maxDuration, tc)
|
||||
cases := []time.Duration{0, 1, 2, 4, 8, 16, 32, 50, 50, 50}
|
||||
for ix, c := range cases {
|
||||
tc.Step(step)
|
||||
w := b.Get(id)
|
||||
if w != c*step {
|
||||
t.Errorf("input: '%d': expected %s, got %s", ix, c*step, w)
|
||||
}
|
||||
b.Next(id, tc.Now())
|
||||
}
|
||||
|
||||
//Now confirm that the Reset cancels backoff.
|
||||
b.Next(id, tc.Now())
|
||||
b.Reset(id)
|
||||
if b.Get(id) != 0 {
|
||||
t.Errorf("Reset didn't clear the backoff.")
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func TestBackoffReset(t *testing.T) {
|
||||
id := "_idReset"
|
||||
tc := clock.NewFakeClock(time.Now())
|
||||
step := time.Second
|
||||
maxDuration := step * 5
|
||||
b := NewFakeBackOff(step, maxDuration, tc)
|
||||
startTime := tc.Now()
|
||||
|
||||
// get to backoff = maxDuration
|
||||
for i := 0; i <= int(maxDuration/step); i++ {
|
||||
tc.Step(step)
|
||||
b.Next(id, tc.Now())
|
||||
}
|
||||
|
||||
// backoff should be capped at maxDuration
|
||||
if !b.IsInBackOffSince(id, tc.Now()) {
|
||||
t.Errorf("expected to be in Backoff got %s", b.Get(id))
|
||||
}
|
||||
|
||||
lastUpdate := tc.Now()
|
||||
tc.Step(2*maxDuration + step) // time += 11s, 11 > 2*maxDuration
|
||||
if b.IsInBackOffSince(id, lastUpdate) {
|
||||
t.Errorf("expected to not be in Backoff after reset (start=%s, now=%s, lastUpdate=%s), got %s", startTime, tc.Now(), lastUpdate, b.Get(id))
|
||||
}
|
||||
}
|
||||
|
||||
func TestBackoffHightWaterMark(t *testing.T) {
|
||||
id := "_idHiWaterMark"
|
||||
tc := clock.NewFakeClock(time.Now())
|
||||
step := time.Second
|
||||
maxDuration := 5 * step
|
||||
b := NewFakeBackOff(step, maxDuration, tc)
|
||||
|
||||
// get to backoff = maxDuration
|
||||
for i := 0; i <= int(maxDuration/step); i++ {
|
||||
tc.Step(step)
|
||||
b.Next(id, tc.Now())
|
||||
}
|
||||
|
||||
// backoff high watermark expires after 2*maxDuration
|
||||
tc.Step(maxDuration + step)
|
||||
b.Next(id, tc.Now())
|
||||
|
||||
if b.Get(id) != maxDuration {
|
||||
t.Errorf("expected Backoff to stay at high watermark %s got %s", maxDuration, b.Get(id))
|
||||
}
|
||||
}
|
||||
|
||||
func TestBackoffGC(t *testing.T) {
|
||||
id := "_idGC"
|
||||
tc := clock.NewFakeClock(time.Now())
|
||||
step := time.Second
|
||||
maxDuration := 5 * step
|
||||
|
||||
b := NewFakeBackOff(step, maxDuration, tc)
|
||||
|
||||
for i := 0; i <= int(maxDuration/step); i++ {
|
||||
tc.Step(step)
|
||||
b.Next(id, tc.Now())
|
||||
}
|
||||
lastUpdate := tc.Now()
|
||||
tc.Step(maxDuration + step)
|
||||
b.GC()
|
||||
_, found := b.perItemBackoff[id]
|
||||
if !found {
|
||||
t.Errorf("expected GC to skip entry, elapsed time=%s maxDuration=%s", tc.Now().Sub(lastUpdate), maxDuration)
|
||||
}
|
||||
|
||||
tc.Step(maxDuration + step)
|
||||
b.GC()
|
||||
r, found := b.perItemBackoff[id]
|
||||
if found {
|
||||
t.Errorf("expected GC of entry after %s got entry %v", tc.Now().Sub(lastUpdate), r)
|
||||
}
|
||||
}
|
||||
|
||||
func TestIsInBackOffSinceUpdate(t *testing.T) {
|
||||
id := "_idIsInBackOffSinceUpdate"
|
||||
tc := clock.NewFakeClock(time.Now())
|
||||
step := time.Second
|
||||
maxDuration := 10 * step
|
||||
b := NewFakeBackOff(step, maxDuration, tc)
|
||||
startTime := tc.Now()
|
||||
|
||||
cases := []struct {
|
||||
tick time.Duration
|
||||
inBackOff bool
|
||||
value int
|
||||
}{
|
||||
{tick: 0, inBackOff: false, value: 0},
|
||||
{tick: 1, inBackOff: false, value: 1},
|
||||
{tick: 2, inBackOff: true, value: 2},
|
||||
{tick: 3, inBackOff: false, value: 2},
|
||||
{tick: 4, inBackOff: true, value: 4},
|
||||
{tick: 5, inBackOff: true, value: 4},
|
||||
{tick: 6, inBackOff: true, value: 4},
|
||||
{tick: 7, inBackOff: false, value: 4},
|
||||
{tick: 8, inBackOff: true, value: 8},
|
||||
{tick: 9, inBackOff: true, value: 8},
|
||||
{tick: 10, inBackOff: true, value: 8},
|
||||
{tick: 11, inBackOff: true, value: 8},
|
||||
{tick: 12, inBackOff: true, value: 8},
|
||||
{tick: 13, inBackOff: true, value: 8},
|
||||
{tick: 14, inBackOff: true, value: 8},
|
||||
{tick: 15, inBackOff: false, value: 8},
|
||||
{tick: 16, inBackOff: true, value: 10},
|
||||
{tick: 17, inBackOff: true, value: 10},
|
||||
{tick: 18, inBackOff: true, value: 10},
|
||||
{tick: 19, inBackOff: true, value: 10},
|
||||
{tick: 20, inBackOff: true, value: 10},
|
||||
{tick: 21, inBackOff: true, value: 10},
|
||||
{tick: 22, inBackOff: true, value: 10},
|
||||
{tick: 23, inBackOff: true, value: 10},
|
||||
{tick: 24, inBackOff: true, value: 10},
|
||||
{tick: 25, inBackOff: false, value: 10},
|
||||
{tick: 26, inBackOff: true, value: 10},
|
||||
{tick: 27, inBackOff: true, value: 10},
|
||||
{tick: 28, inBackOff: true, value: 10},
|
||||
{tick: 29, inBackOff: true, value: 10},
|
||||
{tick: 30, inBackOff: true, value: 10},
|
||||
{tick: 31, inBackOff: true, value: 10},
|
||||
{tick: 32, inBackOff: true, value: 10},
|
||||
{tick: 33, inBackOff: true, value: 10},
|
||||
{tick: 34, inBackOff: true, value: 10},
|
||||
{tick: 35, inBackOff: false, value: 10},
|
||||
{tick: 56, inBackOff: false, value: 0},
|
||||
{tick: 57, inBackOff: false, value: 1},
|
||||
}
|
||||
|
||||
for _, c := range cases {
|
||||
tc.SetTime(startTime.Add(c.tick * step))
|
||||
if c.inBackOff != b.IsInBackOffSinceUpdate(id, tc.Now()) {
|
||||
t.Errorf("expected IsInBackOffSinceUpdate %v got %v at tick %s", c.inBackOff, b.IsInBackOffSinceUpdate(id, tc.Now()), c.tick*step)
|
||||
}
|
||||
|
||||
if c.inBackOff && (time.Duration(c.value)*step != b.Get(id)) {
|
||||
t.Errorf("expected backoff value=%s got %s at tick %s", time.Duration(c.value)*step, b.Get(id), c.tick*step)
|
||||
}
|
||||
|
||||
if !c.inBackOff {
|
||||
b.Next(id, tc.Now())
|
||||
}
|
||||
}
|
||||
}
|
@@ -1,132 +0,0 @@
|
||||
/*
|
||||
Copyright 2014 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package flowcontrol
|
||||
|
||||
import (
|
||||
"sync"
|
||||
|
||||
"github.com/juju/ratelimit"
|
||||
)
|
||||
|
||||
type RateLimiter interface {
|
||||
// TryAccept returns true if a token is taken immediately. Otherwise,
|
||||
// it returns false.
|
||||
TryAccept() bool
|
||||
// Accept returns once a token becomes available.
|
||||
Accept()
|
||||
// Stop stops the rate limiter, subsequent calls to CanAccept will return false
|
||||
Stop()
|
||||
// Saturation returns a percentage number which describes how saturated
|
||||
// this rate limiter is.
|
||||
// Usually we use token bucket rate limiter. In that case,
|
||||
// 1.0 means no tokens are available; 0.0 means we have a full bucket of tokens to use.
|
||||
Saturation() float64
|
||||
// QPS returns QPS of this rate limiter
|
||||
QPS() float32
|
||||
}
|
||||
|
||||
type tokenBucketRateLimiter struct {
|
||||
limiter *ratelimit.Bucket
|
||||
qps float32
|
||||
}
|
||||
|
||||
// NewTokenBucketRateLimiter creates a rate limiter which implements a token bucket approach.
|
||||
// The rate limiter allows bursts of up to 'burst' to exceed the QPS, while still maintaining a
|
||||
// smoothed qps rate of 'qps'.
|
||||
// The bucket is initially filled with 'burst' tokens, and refills at a rate of 'qps'.
|
||||
// The maximum number of tokens in the bucket is capped at 'burst'.
|
||||
func NewTokenBucketRateLimiter(qps float32, burst int) RateLimiter {
|
||||
limiter := ratelimit.NewBucketWithRate(float64(qps), int64(burst))
|
||||
return &tokenBucketRateLimiter{
|
||||
limiter: limiter,
|
||||
qps: qps,
|
||||
}
|
||||
}
|
||||
|
||||
func (t *tokenBucketRateLimiter) TryAccept() bool {
|
||||
return t.limiter.TakeAvailable(1) == 1
|
||||
}
|
||||
|
||||
func (t *tokenBucketRateLimiter) Saturation() float64 {
|
||||
capacity := t.limiter.Capacity()
|
||||
avail := t.limiter.Available()
|
||||
return float64(capacity-avail) / float64(capacity)
|
||||
}
|
||||
|
||||
// Accept will block until a token becomes available
|
||||
func (t *tokenBucketRateLimiter) Accept() {
|
||||
t.limiter.Wait(1)
|
||||
}
|
||||
|
||||
func (t *tokenBucketRateLimiter) Stop() {
|
||||
}
|
||||
|
||||
func (t *tokenBucketRateLimiter) QPS() float32 {
|
||||
return t.qps
|
||||
}
|
||||
|
||||
type fakeAlwaysRateLimiter struct{}
|
||||
|
||||
func NewFakeAlwaysRateLimiter() RateLimiter {
|
||||
return &fakeAlwaysRateLimiter{}
|
||||
}
|
||||
|
||||
func (t *fakeAlwaysRateLimiter) TryAccept() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
func (t *fakeAlwaysRateLimiter) Saturation() float64 {
|
||||
return 0
|
||||
}
|
||||
|
||||
func (t *fakeAlwaysRateLimiter) Stop() {}
|
||||
|
||||
func (t *fakeAlwaysRateLimiter) Accept() {}
|
||||
|
||||
func (t *fakeAlwaysRateLimiter) QPS() float32 {
|
||||
return 1
|
||||
}
|
||||
|
||||
type fakeNeverRateLimiter struct {
|
||||
wg sync.WaitGroup
|
||||
}
|
||||
|
||||
func NewFakeNeverRateLimiter() RateLimiter {
|
||||
rl := fakeNeverRateLimiter{}
|
||||
rl.wg.Add(1)
|
||||
return &rl
|
||||
}
|
||||
|
||||
func (t *fakeNeverRateLimiter) TryAccept() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func (t *fakeNeverRateLimiter) Saturation() float64 {
|
||||
return 1
|
||||
}
|
||||
|
||||
func (t *fakeNeverRateLimiter) Stop() {
|
||||
t.wg.Done()
|
||||
}
|
||||
|
||||
func (t *fakeNeverRateLimiter) Accept() {
|
||||
t.wg.Wait()
|
||||
}
|
||||
|
||||
func (t *fakeNeverRateLimiter) QPS() float32 {
|
||||
return 1
|
||||
}
|
@@ -1,177 +0,0 @@
|
||||
/*
|
||||
Copyright 2014 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package flowcontrol
|
||||
|
||||
import (
|
||||
"math"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
func TestMultithreadedThrottling(t *testing.T) {
|
||||
// Bucket with 100QPS and no burst
|
||||
r := NewTokenBucketRateLimiter(100, 1)
|
||||
|
||||
// channel to collect 100 tokens
|
||||
taken := make(chan bool, 100)
|
||||
|
||||
// Set up goroutines to hammer the throttler
|
||||
startCh := make(chan bool)
|
||||
endCh := make(chan bool)
|
||||
for i := 0; i < 10; i++ {
|
||||
go func() {
|
||||
// wait for the starting signal
|
||||
<-startCh
|
||||
for {
|
||||
// get a token
|
||||
r.Accept()
|
||||
select {
|
||||
// try to add it to the taken channel
|
||||
case taken <- true:
|
||||
continue
|
||||
// if taken is full, notify and return
|
||||
default:
|
||||
endCh <- true
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
// record wall time
|
||||
startTime := time.Now()
|
||||
// take the initial capacity so all tokens are the result of refill
|
||||
r.Accept()
|
||||
// start the thundering herd
|
||||
close(startCh)
|
||||
// wait for the first signal that we collected 100 tokens
|
||||
<-endCh
|
||||
// record wall time
|
||||
endTime := time.Now()
|
||||
|
||||
// tolerate a 1% clock change because these things happen
|
||||
if duration := endTime.Sub(startTime); duration < (time.Second * 99 / 100) {
|
||||
// We shouldn't be able to get 100 tokens out of the bucket in less than 1 second of wall clock time, no matter what
|
||||
t.Errorf("Expected it to take at least 1 second to get 100 tokens, took %v", duration)
|
||||
} else {
|
||||
t.Logf("Took %v to get 100 tokens", duration)
|
||||
}
|
||||
}
|
||||
|
||||
func TestBasicThrottle(t *testing.T) {
|
||||
r := NewTokenBucketRateLimiter(1, 3)
|
||||
for i := 0; i < 3; i++ {
|
||||
if !r.TryAccept() {
|
||||
t.Error("unexpected false accept")
|
||||
}
|
||||
}
|
||||
if r.TryAccept() {
|
||||
t.Error("unexpected true accept")
|
||||
}
|
||||
}
|
||||
|
||||
func TestIncrementThrottle(t *testing.T) {
|
||||
r := NewTokenBucketRateLimiter(1, 1)
|
||||
if !r.TryAccept() {
|
||||
t.Error("unexpected false accept")
|
||||
}
|
||||
if r.TryAccept() {
|
||||
t.Error("unexpected true accept")
|
||||
}
|
||||
|
||||
// Allow to refill
|
||||
time.Sleep(2 * time.Second)
|
||||
|
||||
if !r.TryAccept() {
|
||||
t.Error("unexpected false accept")
|
||||
}
|
||||
}
|
||||
|
||||
func TestThrottle(t *testing.T) {
|
||||
r := NewTokenBucketRateLimiter(10, 5)
|
||||
|
||||
// Should consume 5 tokens immediately, then
|
||||
// the remaining 11 should take at least 1 second (0.1s each)
|
||||
expectedFinish := time.Now().Add(time.Second * 1)
|
||||
for i := 0; i < 16; i++ {
|
||||
r.Accept()
|
||||
}
|
||||
if time.Now().Before(expectedFinish) {
|
||||
t.Error("rate limit was not respected, finished too early")
|
||||
}
|
||||
}
|
||||
|
||||
func TestRateLimiterSaturation(t *testing.T) {
|
||||
const e = 0.000001
|
||||
tests := []struct {
|
||||
capacity int
|
||||
take int
|
||||
|
||||
expectedSaturation float64
|
||||
}{
|
||||
{1, 1, 1},
|
||||
{10, 3, 0.3},
|
||||
}
|
||||
for i, tt := range tests {
|
||||
rl := NewTokenBucketRateLimiter(1, tt.capacity)
|
||||
for i := 0; i < tt.take; i++ {
|
||||
rl.Accept()
|
||||
}
|
||||
if math.Abs(rl.Saturation()-tt.expectedSaturation) > e {
|
||||
t.Fatalf("#%d: Saturation rate difference isn't within tolerable range\n want=%f, get=%f",
|
||||
i, tt.expectedSaturation, rl.Saturation())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestAlwaysFake(t *testing.T) {
|
||||
rl := NewFakeAlwaysRateLimiter()
|
||||
if !rl.TryAccept() {
|
||||
t.Error("TryAccept in AlwaysFake should return true.")
|
||||
}
|
||||
// If this will block the test will timeout
|
||||
rl.Accept()
|
||||
}
|
||||
|
||||
func TestNeverFake(t *testing.T) {
|
||||
rl := NewFakeNeverRateLimiter()
|
||||
if rl.TryAccept() {
|
||||
t.Error("TryAccept in NeverFake should return false.")
|
||||
}
|
||||
|
||||
finished := false
|
||||
wg := sync.WaitGroup{}
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
rl.Accept()
|
||||
finished = true
|
||||
wg.Done()
|
||||
}()
|
||||
|
||||
// Wait some time to make sure it never finished.
|
||||
time.Sleep(time.Second)
|
||||
if finished {
|
||||
t.Error("Accept should block forever in NeverFake.")
|
||||
}
|
||||
|
||||
rl.Stop()
|
||||
wg.Wait()
|
||||
if !finished {
|
||||
t.Error("Stop should make Accept unblock in NeverFake.")
|
||||
}
|
||||
}
|
@@ -1,47 +0,0 @@
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package homedir
|
||||
|
||||
import (
|
||||
"os"
|
||||
"runtime"
|
||||
)
|
||||
|
||||
// HomeDir returns the home directory for the current user
|
||||
func HomeDir() string {
|
||||
if runtime.GOOS == "windows" {
|
||||
|
||||
// First prefer the HOME environmental variable
|
||||
if home := os.Getenv("HOME"); len(home) > 0 {
|
||||
if _, err := os.Stat(home); err == nil {
|
||||
return home
|
||||
}
|
||||
}
|
||||
if homeDrive, homePath := os.Getenv("HOMEDRIVE"), os.Getenv("HOMEPATH"); len(homeDrive) > 0 && len(homePath) > 0 {
|
||||
homeDir := homeDrive + homePath
|
||||
if _, err := os.Stat(homeDir); err == nil {
|
||||
return homeDir
|
||||
}
|
||||
}
|
||||
if userProfile := os.Getenv("USERPROFILE"); len(userProfile) > 0 {
|
||||
if _, err := os.Stat(userProfile); err == nil {
|
||||
return userProfile
|
||||
}
|
||||
}
|
||||
}
|
||||
return os.Getenv("HOME")
|
||||
}
|
@@ -1,67 +0,0 @@
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package integer
|
||||
|
||||
func IntMax(a, b int) int {
|
||||
if b > a {
|
||||
return b
|
||||
}
|
||||
return a
|
||||
}
|
||||
|
||||
func IntMin(a, b int) int {
|
||||
if b < a {
|
||||
return b
|
||||
}
|
||||
return a
|
||||
}
|
||||
|
||||
func Int32Max(a, b int32) int32 {
|
||||
if b > a {
|
||||
return b
|
||||
}
|
||||
return a
|
||||
}
|
||||
|
||||
func Int32Min(a, b int32) int32 {
|
||||
if b < a {
|
||||
return b
|
||||
}
|
||||
return a
|
||||
}
|
||||
|
||||
func Int64Max(a, b int64) int64 {
|
||||
if b > a {
|
||||
return b
|
||||
}
|
||||
return a
|
||||
}
|
||||
|
||||
func Int64Min(a, b int64) int64 {
|
||||
if b < a {
|
||||
return b
|
||||
}
|
||||
return a
|
||||
}
|
||||
|
||||
// RoundToInt32 rounds floats into integer numbers.
|
||||
func RoundToInt32(a float64) int32 {
|
||||
if a < 0 {
|
||||
return int32(a - 0.5)
|
||||
}
|
||||
return int32(a + 0.5)
|
||||
}
|
@@ -1,244 +0,0 @@
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package integer
|
||||
|
||||
import "testing"
|
||||
|
||||
func TestIntMax(t *testing.T) {
|
||||
tests := []struct {
|
||||
nums []int
|
||||
expectedMax int
|
||||
}{
|
||||
{
|
||||
nums: []int{-1, 0},
|
||||
expectedMax: 0,
|
||||
},
|
||||
{
|
||||
nums: []int{-1, -2},
|
||||
expectedMax: -1,
|
||||
},
|
||||
{
|
||||
nums: []int{0, 1},
|
||||
expectedMax: 1,
|
||||
},
|
||||
{
|
||||
nums: []int{1, 2},
|
||||
expectedMax: 2,
|
||||
},
|
||||
}
|
||||
|
||||
for i, test := range tests {
|
||||
t.Logf("executing scenario %d", i)
|
||||
if max := IntMax(test.nums[0], test.nums[1]); max != test.expectedMax {
|
||||
t.Errorf("expected %v, got %v", test.expectedMax, max)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestIntMin(t *testing.T) {
|
||||
tests := []struct {
|
||||
nums []int
|
||||
expectedMin int
|
||||
}{
|
||||
{
|
||||
nums: []int{-1, 0},
|
||||
expectedMin: -1,
|
||||
},
|
||||
{
|
||||
nums: []int{-1, -2},
|
||||
expectedMin: -2,
|
||||
},
|
||||
{
|
||||
nums: []int{0, 1},
|
||||
expectedMin: 0,
|
||||
},
|
||||
{
|
||||
nums: []int{1, 2},
|
||||
expectedMin: 1,
|
||||
},
|
||||
}
|
||||
|
||||
for i, test := range tests {
|
||||
t.Logf("executing scenario %d", i)
|
||||
if min := IntMin(test.nums[0], test.nums[1]); min != test.expectedMin {
|
||||
t.Errorf("expected %v, got %v", test.expectedMin, min)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestInt32Max(t *testing.T) {
|
||||
tests := []struct {
|
||||
nums []int32
|
||||
expectedMax int32
|
||||
}{
|
||||
{
|
||||
nums: []int32{-1, 0},
|
||||
expectedMax: 0,
|
||||
},
|
||||
{
|
||||
nums: []int32{-1, -2},
|
||||
expectedMax: -1,
|
||||
},
|
||||
{
|
||||
nums: []int32{0, 1},
|
||||
expectedMax: 1,
|
||||
},
|
||||
{
|
||||
nums: []int32{1, 2},
|
||||
expectedMax: 2,
|
||||
},
|
||||
}
|
||||
|
||||
for i, test := range tests {
|
||||
t.Logf("executing scenario %d", i)
|
||||
if max := Int32Max(test.nums[0], test.nums[1]); max != test.expectedMax {
|
||||
t.Errorf("expected %v, got %v", test.expectedMax, max)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestInt32Min(t *testing.T) {
|
||||
tests := []struct {
|
||||
nums []int32
|
||||
expectedMin int32
|
||||
}{
|
||||
{
|
||||
nums: []int32{-1, 0},
|
||||
expectedMin: -1,
|
||||
},
|
||||
{
|
||||
nums: []int32{-1, -2},
|
||||
expectedMin: -2,
|
||||
},
|
||||
{
|
||||
nums: []int32{0, 1},
|
||||
expectedMin: 0,
|
||||
},
|
||||
{
|
||||
nums: []int32{1, 2},
|
||||
expectedMin: 1,
|
||||
},
|
||||
}
|
||||
|
||||
for i, test := range tests {
|
||||
t.Logf("executing scenario %d", i)
|
||||
if min := Int32Min(test.nums[0], test.nums[1]); min != test.expectedMin {
|
||||
t.Errorf("expected %v, got %v", test.expectedMin, min)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestInt64Max(t *testing.T) {
|
||||
tests := []struct {
|
||||
nums []int64
|
||||
expectedMax int64
|
||||
}{
|
||||
{
|
||||
nums: []int64{-1, 0},
|
||||
expectedMax: 0,
|
||||
},
|
||||
{
|
||||
nums: []int64{-1, -2},
|
||||
expectedMax: -1,
|
||||
},
|
||||
{
|
||||
nums: []int64{0, 1},
|
||||
expectedMax: 1,
|
||||
},
|
||||
{
|
||||
nums: []int64{1, 2},
|
||||
expectedMax: 2,
|
||||
},
|
||||
}
|
||||
|
||||
for i, test := range tests {
|
||||
t.Logf("executing scenario %d", i)
|
||||
if max := Int64Max(test.nums[0], test.nums[1]); max != test.expectedMax {
|
||||
t.Errorf("expected %v, got %v", test.expectedMax, max)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestInt64Min(t *testing.T) {
|
||||
tests := []struct {
|
||||
nums []int64
|
||||
expectedMin int64
|
||||
}{
|
||||
{
|
||||
nums: []int64{-1, 0},
|
||||
expectedMin: -1,
|
||||
},
|
||||
{
|
||||
nums: []int64{-1, -2},
|
||||
expectedMin: -2,
|
||||
},
|
||||
{
|
||||
nums: []int64{0, 1},
|
||||
expectedMin: 0,
|
||||
},
|
||||
{
|
||||
nums: []int64{1, 2},
|
||||
expectedMin: 1,
|
||||
},
|
||||
}
|
||||
|
||||
for i, test := range tests {
|
||||
t.Logf("executing scenario %d", i)
|
||||
if min := Int64Min(test.nums[0], test.nums[1]); min != test.expectedMin {
|
||||
t.Errorf("expected %v, got %v", test.expectedMin, min)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestRoundToInt32(t *testing.T) {
|
||||
tests := []struct {
|
||||
num float64
|
||||
exp int32
|
||||
}{
|
||||
{
|
||||
num: 5.5,
|
||||
exp: 6,
|
||||
},
|
||||
{
|
||||
num: -3.7,
|
||||
exp: -4,
|
||||
},
|
||||
{
|
||||
num: 3.49,
|
||||
exp: 3,
|
||||
},
|
||||
{
|
||||
num: -7.9,
|
||||
exp: -8,
|
||||
},
|
||||
{
|
||||
num: -4.499999,
|
||||
exp: -4,
|
||||
},
|
||||
{
|
||||
num: 0,
|
||||
exp: 0,
|
||||
},
|
||||
}
|
||||
|
||||
for i, test := range tests {
|
||||
t.Logf("executing scenario %d", i)
|
||||
if got := RoundToInt32(test.num); got != test.exp {
|
||||
t.Errorf("expected %d, got %d", test.exp, got)
|
||||
}
|
||||
}
|
||||
}
|
@@ -1,20 +0,0 @@
|
||||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// package jsonpath is a template engine using jsonpath syntax,
|
||||
// which can be seen at http://goessner.net/articles/JsonPath/.
|
||||
// In addition, it has {range} {end} function to iterate list and slice.
|
||||
package jsonpath // import "k8s.io/client-go/pkg/util/jsonpath"
|
@@ -1,498 +0,0 @@
|
||||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package jsonpath
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"reflect"
|
||||
"strings"
|
||||
|
||||
"k8s.io/client-go/pkg/third_party/forked/golang/template"
|
||||
)
|
||||
|
||||
type JSONPath struct {
|
||||
name string
|
||||
parser *Parser
|
||||
stack [][]reflect.Value //push and pop values in different scopes
|
||||
cur []reflect.Value //current scope values
|
||||
beginRange int
|
||||
inRange int
|
||||
endRange int
|
||||
|
||||
allowMissingKeys bool
|
||||
}
|
||||
|
||||
func New(name string) *JSONPath {
|
||||
return &JSONPath{
|
||||
name: name,
|
||||
beginRange: 0,
|
||||
inRange: 0,
|
||||
endRange: 0,
|
||||
}
|
||||
}
|
||||
|
||||
// AllowMissingKeys allows a caller to specify whether they want an error if a field or map key
|
||||
// cannot be located, or simply an empty result. The receiver is returned for chaining.
|
||||
func (j *JSONPath) AllowMissingKeys(allow bool) *JSONPath {
|
||||
j.allowMissingKeys = allow
|
||||
return j
|
||||
}
|
||||
|
||||
// Parse parse the given template, return error
|
||||
func (j *JSONPath) Parse(text string) (err error) {
|
||||
j.parser, err = Parse(j.name, text)
|
||||
return
|
||||
}
|
||||
|
||||
// Execute bounds data into template and write the result
|
||||
func (j *JSONPath) Execute(wr io.Writer, data interface{}) error {
|
||||
fullResults, err := j.FindResults(data)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for ix := range fullResults {
|
||||
if err := j.PrintResults(wr, fullResults[ix]); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (j *JSONPath) FindResults(data interface{}) ([][]reflect.Value, error) {
|
||||
if j.parser == nil {
|
||||
return nil, fmt.Errorf("%s is an incomplete jsonpath template", j.name)
|
||||
}
|
||||
|
||||
j.cur = []reflect.Value{reflect.ValueOf(data)}
|
||||
nodes := j.parser.Root.Nodes
|
||||
fullResult := [][]reflect.Value{}
|
||||
for i := 0; i < len(nodes); i++ {
|
||||
node := nodes[i]
|
||||
results, err := j.walk(j.cur, node)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
//encounter an end node, break the current block
|
||||
if j.endRange > 0 && j.endRange <= j.inRange {
|
||||
j.endRange -= 1
|
||||
break
|
||||
}
|
||||
//encounter a range node, start a range loop
|
||||
if j.beginRange > 0 {
|
||||
j.beginRange -= 1
|
||||
j.inRange += 1
|
||||
for k, value := range results {
|
||||
j.parser.Root.Nodes = nodes[i+1:]
|
||||
if k == len(results)-1 {
|
||||
j.inRange -= 1
|
||||
}
|
||||
nextResults, err := j.FindResults(value.Interface())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
fullResult = append(fullResult, nextResults...)
|
||||
}
|
||||
break
|
||||
}
|
||||
fullResult = append(fullResult, results)
|
||||
}
|
||||
return fullResult, nil
|
||||
}
|
||||
|
||||
// PrintResults write the results into writer
|
||||
func (j *JSONPath) PrintResults(wr io.Writer, results []reflect.Value) error {
|
||||
for i, r := range results {
|
||||
text, err := j.evalToText(r)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if i != len(results)-1 {
|
||||
text = append(text, ' ')
|
||||
}
|
||||
if _, err = wr.Write(text); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// walk visits tree rooted at the given node in DFS order
|
||||
func (j *JSONPath) walk(value []reflect.Value, node Node) ([]reflect.Value, error) {
|
||||
switch node := node.(type) {
|
||||
case *ListNode:
|
||||
return j.evalList(value, node)
|
||||
case *TextNode:
|
||||
return []reflect.Value{reflect.ValueOf(node.Text)}, nil
|
||||
case *FieldNode:
|
||||
return j.evalField(value, node)
|
||||
case *ArrayNode:
|
||||
return j.evalArray(value, node)
|
||||
case *FilterNode:
|
||||
return j.evalFilter(value, node)
|
||||
case *IntNode:
|
||||
return j.evalInt(value, node)
|
||||
case *FloatNode:
|
||||
return j.evalFloat(value, node)
|
||||
case *WildcardNode:
|
||||
return j.evalWildcard(value, node)
|
||||
case *RecursiveNode:
|
||||
return j.evalRecursive(value, node)
|
||||
case *UnionNode:
|
||||
return j.evalUnion(value, node)
|
||||
case *IdentifierNode:
|
||||
return j.evalIdentifier(value, node)
|
||||
default:
|
||||
return value, fmt.Errorf("unexpected Node %v", node)
|
||||
}
|
||||
}
|
||||
|
||||
// evalInt evaluates IntNode
|
||||
func (j *JSONPath) evalInt(input []reflect.Value, node *IntNode) ([]reflect.Value, error) {
|
||||
result := make([]reflect.Value, len(input))
|
||||
for i := range input {
|
||||
result[i] = reflect.ValueOf(node.Value)
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// evalFloat evaluates FloatNode
|
||||
func (j *JSONPath) evalFloat(input []reflect.Value, node *FloatNode) ([]reflect.Value, error) {
|
||||
result := make([]reflect.Value, len(input))
|
||||
for i := range input {
|
||||
result[i] = reflect.ValueOf(node.Value)
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// evalList evaluates ListNode
|
||||
func (j *JSONPath) evalList(value []reflect.Value, node *ListNode) ([]reflect.Value, error) {
|
||||
var err error
|
||||
curValue := value
|
||||
for _, node := range node.Nodes {
|
||||
curValue, err = j.walk(curValue, node)
|
||||
if err != nil {
|
||||
return curValue, err
|
||||
}
|
||||
}
|
||||
return curValue, nil
|
||||
}
|
||||
|
||||
// evalIdentifier evaluates IdentifierNode
|
||||
func (j *JSONPath) evalIdentifier(input []reflect.Value, node *IdentifierNode) ([]reflect.Value, error) {
|
||||
results := []reflect.Value{}
|
||||
switch node.Name {
|
||||
case "range":
|
||||
j.stack = append(j.stack, j.cur)
|
||||
j.beginRange += 1
|
||||
results = input
|
||||
case "end":
|
||||
if j.endRange < j.inRange { //inside a loop, break the current block
|
||||
j.endRange += 1
|
||||
break
|
||||
}
|
||||
// the loop is about to end, pop value and continue the following execution
|
||||
if len(j.stack) > 0 {
|
||||
j.cur, j.stack = j.stack[len(j.stack)-1], j.stack[:len(j.stack)-1]
|
||||
} else {
|
||||
return results, fmt.Errorf("not in range, nothing to end")
|
||||
}
|
||||
default:
|
||||
return input, fmt.Errorf("unrecognized identifier %v", node.Name)
|
||||
}
|
||||
return results, nil
|
||||
}
|
||||
|
||||
// evalArray evaluates ArrayNode
|
||||
func (j *JSONPath) evalArray(input []reflect.Value, node *ArrayNode) ([]reflect.Value, error) {
|
||||
result := []reflect.Value{}
|
||||
for _, value := range input {
|
||||
|
||||
value, isNil := template.Indirect(value)
|
||||
if isNil {
|
||||
continue
|
||||
}
|
||||
if value.Kind() != reflect.Array && value.Kind() != reflect.Slice {
|
||||
return input, fmt.Errorf("%v is not array or slice", value.Type())
|
||||
}
|
||||
params := node.Params
|
||||
if !params[0].Known {
|
||||
params[0].Value = 0
|
||||
}
|
||||
if params[0].Value < 0 {
|
||||
params[0].Value += value.Len()
|
||||
}
|
||||
if !params[1].Known {
|
||||
params[1].Value = value.Len()
|
||||
}
|
||||
|
||||
if params[1].Value < 0 {
|
||||
params[1].Value += value.Len()
|
||||
}
|
||||
|
||||
sliceLength := value.Len()
|
||||
if params[1].Value != params[0].Value { // if you're requesting zero elements, allow it through.
|
||||
if params[0].Value >= sliceLength {
|
||||
return input, fmt.Errorf("array index out of bounds: index %d, length %d", params[0].Value, sliceLength)
|
||||
}
|
||||
if params[1].Value > sliceLength {
|
||||
return input, fmt.Errorf("array index out of bounds: index %d, length %d", params[1].Value-1, sliceLength)
|
||||
}
|
||||
}
|
||||
|
||||
if !params[2].Known {
|
||||
value = value.Slice(params[0].Value, params[1].Value)
|
||||
} else {
|
||||
value = value.Slice3(params[0].Value, params[1].Value, params[2].Value)
|
||||
}
|
||||
for i := 0; i < value.Len(); i++ {
|
||||
result = append(result, value.Index(i))
|
||||
}
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// evalUnion evaluates UnionNode
|
||||
func (j *JSONPath) evalUnion(input []reflect.Value, node *UnionNode) ([]reflect.Value, error) {
|
||||
result := []reflect.Value{}
|
||||
for _, listNode := range node.Nodes {
|
||||
temp, err := j.evalList(input, listNode)
|
||||
if err != nil {
|
||||
return input, err
|
||||
}
|
||||
result = append(result, temp...)
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
|
||||
func (j *JSONPath) findFieldInValue(value *reflect.Value, node *FieldNode) (reflect.Value, error) {
|
||||
t := value.Type()
|
||||
var inlineValue *reflect.Value
|
||||
for ix := 0; ix < t.NumField(); ix++ {
|
||||
f := t.Field(ix)
|
||||
jsonTag := f.Tag.Get("json")
|
||||
parts := strings.Split(jsonTag, ",")
|
||||
if len(parts) == 0 {
|
||||
continue
|
||||
}
|
||||
if parts[0] == node.Value {
|
||||
return value.Field(ix), nil
|
||||
}
|
||||
if len(parts[0]) == 0 {
|
||||
val := value.Field(ix)
|
||||
inlineValue = &val
|
||||
}
|
||||
}
|
||||
if inlineValue != nil {
|
||||
if inlineValue.Kind() == reflect.Struct {
|
||||
// handle 'inline'
|
||||
match, err := j.findFieldInValue(inlineValue, node)
|
||||
if err != nil {
|
||||
return reflect.Value{}, err
|
||||
}
|
||||
if match.IsValid() {
|
||||
return match, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
return value.FieldByName(node.Value), nil
|
||||
}
|
||||
|
||||
// evalField evaluates field of struct or key of map.
|
||||
func (j *JSONPath) evalField(input []reflect.Value, node *FieldNode) ([]reflect.Value, error) {
|
||||
results := []reflect.Value{}
|
||||
// If there's no input, there's no output
|
||||
if len(input) == 0 {
|
||||
return results, nil
|
||||
}
|
||||
for _, value := range input {
|
||||
var result reflect.Value
|
||||
value, isNil := template.Indirect(value)
|
||||
if isNil {
|
||||
continue
|
||||
}
|
||||
|
||||
if value.Kind() == reflect.Struct {
|
||||
var err error
|
||||
if result, err = j.findFieldInValue(&value, node); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
} else if value.Kind() == reflect.Map {
|
||||
mapKeyType := value.Type().Key()
|
||||
nodeValue := reflect.ValueOf(node.Value)
|
||||
// node value type must be convertible to map key type
|
||||
if !nodeValue.Type().ConvertibleTo(mapKeyType) {
|
||||
return results, fmt.Errorf("%s is not convertible to %s", nodeValue, mapKeyType)
|
||||
}
|
||||
result = value.MapIndex(nodeValue.Convert(mapKeyType))
|
||||
}
|
||||
if result.IsValid() {
|
||||
results = append(results, result)
|
||||
}
|
||||
}
|
||||
if len(results) == 0 {
|
||||
if j.allowMissingKeys {
|
||||
return results, nil
|
||||
}
|
||||
return results, fmt.Errorf("%s is not found", node.Value)
|
||||
}
|
||||
return results, nil
|
||||
}
|
||||
|
||||
// evalWildcard extract all contents of the given value
|
||||
func (j *JSONPath) evalWildcard(input []reflect.Value, node *WildcardNode) ([]reflect.Value, error) {
|
||||
results := []reflect.Value{}
|
||||
for _, value := range input {
|
||||
value, isNil := template.Indirect(value)
|
||||
if isNil {
|
||||
continue
|
||||
}
|
||||
|
||||
kind := value.Kind()
|
||||
if kind == reflect.Struct {
|
||||
for i := 0; i < value.NumField(); i++ {
|
||||
results = append(results, value.Field(i))
|
||||
}
|
||||
} else if kind == reflect.Map {
|
||||
for _, key := range value.MapKeys() {
|
||||
results = append(results, value.MapIndex(key))
|
||||
}
|
||||
} else if kind == reflect.Array || kind == reflect.Slice || kind == reflect.String {
|
||||
for i := 0; i < value.Len(); i++ {
|
||||
results = append(results, value.Index(i))
|
||||
}
|
||||
}
|
||||
}
|
||||
return results, nil
|
||||
}
|
||||
|
||||
// evalRecursive visit the given value recursively and push all of them to result
|
||||
func (j *JSONPath) evalRecursive(input []reflect.Value, node *RecursiveNode) ([]reflect.Value, error) {
|
||||
result := []reflect.Value{}
|
||||
for _, value := range input {
|
||||
results := []reflect.Value{}
|
||||
value, isNil := template.Indirect(value)
|
||||
if isNil {
|
||||
continue
|
||||
}
|
||||
|
||||
kind := value.Kind()
|
||||
if kind == reflect.Struct {
|
||||
for i := 0; i < value.NumField(); i++ {
|
||||
results = append(results, value.Field(i))
|
||||
}
|
||||
} else if kind == reflect.Map {
|
||||
for _, key := range value.MapKeys() {
|
||||
results = append(results, value.MapIndex(key))
|
||||
}
|
||||
} else if kind == reflect.Array || kind == reflect.Slice || kind == reflect.String {
|
||||
for i := 0; i < value.Len(); i++ {
|
||||
results = append(results, value.Index(i))
|
||||
}
|
||||
}
|
||||
if len(results) != 0 {
|
||||
result = append(result, value)
|
||||
output, err := j.evalRecursive(results, node)
|
||||
if err != nil {
|
||||
return result, err
|
||||
}
|
||||
result = append(result, output...)
|
||||
}
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// evalFilter filter array according to FilterNode
|
||||
func (j *JSONPath) evalFilter(input []reflect.Value, node *FilterNode) ([]reflect.Value, error) {
|
||||
results := []reflect.Value{}
|
||||
for _, value := range input {
|
||||
value, _ = template.Indirect(value)
|
||||
|
||||
if value.Kind() != reflect.Array && value.Kind() != reflect.Slice {
|
||||
return input, fmt.Errorf("%v is not array or slice and cannot be filtered", value)
|
||||
}
|
||||
for i := 0; i < value.Len(); i++ {
|
||||
temp := []reflect.Value{value.Index(i)}
|
||||
lefts, err := j.evalList(temp, node.Left)
|
||||
|
||||
//case exists
|
||||
if node.Operator == "exists" {
|
||||
if len(lefts) > 0 {
|
||||
results = append(results, value.Index(i))
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return input, err
|
||||
}
|
||||
|
||||
var left, right interface{}
|
||||
if len(lefts) != 1 {
|
||||
return input, fmt.Errorf("can only compare one element at a time")
|
||||
}
|
||||
left = lefts[0].Interface()
|
||||
|
||||
rights, err := j.evalList(temp, node.Right)
|
||||
if err != nil {
|
||||
return input, err
|
||||
}
|
||||
if len(rights) != 1 {
|
||||
return input, fmt.Errorf("can only compare one element at a time")
|
||||
}
|
||||
right = rights[0].Interface()
|
||||
|
||||
pass := false
|
||||
switch node.Operator {
|
||||
case "<":
|
||||
pass, err = template.Less(left, right)
|
||||
case ">":
|
||||
pass, err = template.Greater(left, right)
|
||||
case "==":
|
||||
pass, err = template.Equal(left, right)
|
||||
case "!=":
|
||||
pass, err = template.NotEqual(left, right)
|
||||
case "<=":
|
||||
pass, err = template.LessEqual(left, right)
|
||||
case ">=":
|
||||
pass, err = template.GreaterEqual(left, right)
|
||||
default:
|
||||
return results, fmt.Errorf("unrecognized filter operator %s", node.Operator)
|
||||
}
|
||||
if err != nil {
|
||||
return results, err
|
||||
}
|
||||
if pass {
|
||||
results = append(results, value.Index(i))
|
||||
}
|
||||
}
|
||||
}
|
||||
return results, nil
|
||||
}
|
||||
|
||||
// evalToText translates reflect value to corresponding text
|
||||
func (j *JSONPath) evalToText(v reflect.Value) ([]byte, error) {
|
||||
iface, ok := template.PrintableValue(v)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("can't print type %s", v.Type())
|
||||
}
|
||||
var buffer bytes.Buffer
|
||||
fmt.Fprint(&buffer, iface)
|
||||
return buffer.Bytes(), nil
|
||||
}
|
@@ -1,282 +0,0 @@
|
||||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package jsonpath
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"sort"
|
||||
"strings"
|
||||
"testing"
|
||||
)
|
||||
|
||||
type jsonpathTest struct {
|
||||
name string
|
||||
template string
|
||||
input interface{}
|
||||
expect string
|
||||
}
|
||||
|
||||
func testJSONPath(tests []jsonpathTest, allowMissingKeys bool, t *testing.T) {
|
||||
for _, test := range tests {
|
||||
j := New(test.name)
|
||||
j.AllowMissingKeys(allowMissingKeys)
|
||||
err := j.Parse(test.template)
|
||||
if err != nil {
|
||||
t.Errorf("in %s, parse %s error %v", test.name, test.template, err)
|
||||
}
|
||||
buf := new(bytes.Buffer)
|
||||
err = j.Execute(buf, test.input)
|
||||
if err != nil {
|
||||
t.Errorf("in %s, execute error %v", test.name, err)
|
||||
}
|
||||
out := buf.String()
|
||||
if out != test.expect {
|
||||
t.Errorf(`in %s, expect to get "%s", got "%s"`, test.name, test.expect, out)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// testJSONPathSortOutput test cases related to map, the results may print in random order
|
||||
func testJSONPathSortOutput(tests []jsonpathTest, t *testing.T) {
|
||||
for _, test := range tests {
|
||||
j := New(test.name)
|
||||
err := j.Parse(test.template)
|
||||
if err != nil {
|
||||
t.Errorf("in %s, parse %s error %v", test.name, test.template, err)
|
||||
}
|
||||
buf := new(bytes.Buffer)
|
||||
err = j.Execute(buf, test.input)
|
||||
if err != nil {
|
||||
t.Errorf("in %s, execute error %v", test.name, err)
|
||||
}
|
||||
out := buf.String()
|
||||
//since map is visited in random order, we need to sort the results.
|
||||
sortedOut := strings.Fields(out)
|
||||
sort.Strings(sortedOut)
|
||||
sortedExpect := strings.Fields(test.expect)
|
||||
sort.Strings(sortedExpect)
|
||||
if !reflect.DeepEqual(sortedOut, sortedExpect) {
|
||||
t.Errorf(`in %s, expect to get "%s", got "%s"`, test.name, test.expect, out)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func testFailJSONPath(tests []jsonpathTest, t *testing.T) {
|
||||
for _, test := range tests {
|
||||
j := New(test.name)
|
||||
err := j.Parse(test.template)
|
||||
if err != nil {
|
||||
t.Errorf("in %s, parse %s error %v", test.name, test.template, err)
|
||||
}
|
||||
buf := new(bytes.Buffer)
|
||||
err = j.Execute(buf, test.input)
|
||||
var out string
|
||||
if err == nil {
|
||||
out = "nil"
|
||||
} else {
|
||||
out = err.Error()
|
||||
}
|
||||
if out != test.expect {
|
||||
t.Errorf("in %s, expect to get error %q, got %q", test.name, test.expect, out)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type book struct {
|
||||
Category string
|
||||
Author string
|
||||
Title string
|
||||
Price float32
|
||||
}
|
||||
|
||||
func (b book) String() string {
|
||||
return fmt.Sprintf("{Category: %s, Author: %s, Title: %s, Price: %v}", b.Category, b.Author, b.Title, b.Price)
|
||||
}
|
||||
|
||||
type bicycle struct {
|
||||
Color string
|
||||
Price float32
|
||||
}
|
||||
|
||||
type empName string
|
||||
type job string
|
||||
type store struct {
|
||||
Book []book
|
||||
Bicycle bicycle
|
||||
Name string
|
||||
Labels map[string]int
|
||||
Employees map[empName]job
|
||||
}
|
||||
|
||||
func TestStructInput(t *testing.T) {
|
||||
|
||||
storeData := store{
|
||||
Name: "jsonpath",
|
||||
Book: []book{
|
||||
{"reference", "Nigel Rees", "Sayings of the Centurey", 8.95},
|
||||
{"fiction", "Evelyn Waugh", "Sword of Honour", 12.99},
|
||||
{"fiction", "Herman Melville", "Moby Dick", 8.99},
|
||||
},
|
||||
Bicycle: bicycle{"red", 19.95},
|
||||
Labels: map[string]int{
|
||||
"engieer": 10,
|
||||
"web/html": 15,
|
||||
"k8s-app": 20,
|
||||
},
|
||||
Employees: map[empName]job{
|
||||
"jason": "manager",
|
||||
"dan": "clerk",
|
||||
},
|
||||
}
|
||||
|
||||
storeTests := []jsonpathTest{
|
||||
{"plain", "hello jsonpath", nil, "hello jsonpath"},
|
||||
{"recursive", "{..}", []int{1, 2, 3}, "[1 2 3]"},
|
||||
{"filter", "{[?(@<5)]}", []int{2, 6, 3, 7}, "2 3"},
|
||||
{"quote", `{"{"}`, nil, "{"},
|
||||
{"union", "{[1,3,4]}", []int{0, 1, 2, 3, 4}, "1 3 4"},
|
||||
{"array", "{[0:2]}", []string{"Monday", "Tudesday"}, "Monday Tudesday"},
|
||||
{"variable", "hello {.Name}", storeData, "hello jsonpath"},
|
||||
{"dict/", "{$.Labels.web/html}", storeData, "15"},
|
||||
{"dict/", "{$.Employees.jason}", storeData, "manager"},
|
||||
{"dict/", "{$.Employees.dan}", storeData, "clerk"},
|
||||
{"dict-", "{.Labels.k8s-app}", storeData, "20"},
|
||||
{"nest", "{.Bicycle.Color}", storeData, "red"},
|
||||
{"allarray", "{.Book[*].Author}", storeData, "Nigel Rees Evelyn Waugh Herman Melville"},
|
||||
{"allfileds", "{.Bicycle.*}", storeData, "red 19.95"},
|
||||
{"recurfileds", "{..Price}", storeData, "8.95 12.99 8.99 19.95"},
|
||||
{"lastarray", "{.Book[-1:]}", storeData,
|
||||
"{Category: fiction, Author: Herman Melville, Title: Moby Dick, Price: 8.99}"},
|
||||
{"recurarray", "{..Book[2]}", storeData,
|
||||
"{Category: fiction, Author: Herman Melville, Title: Moby Dick, Price: 8.99}"},
|
||||
}
|
||||
testJSONPath(storeTests, false, t)
|
||||
|
||||
missingKeyTests := []jsonpathTest{
|
||||
{"nonexistent field", "{.hello}", storeData, ""},
|
||||
}
|
||||
testJSONPath(missingKeyTests, true, t)
|
||||
|
||||
failStoreTests := []jsonpathTest{
|
||||
{"invalid identifier", "{hello}", storeData, "unrecognized identifier hello"},
|
||||
{"nonexistent field", "{.hello}", storeData, "hello is not found"},
|
||||
{"invalid array", "{.Labels[0]}", storeData, "map[string]int is not array or slice"},
|
||||
{"invalid filter operator", "{.Book[?(@.Price<>10)]}", storeData, "unrecognized filter operator <>"},
|
||||
{"redundent end", "{range .Labels.*}{@}{end}{end}", storeData, "not in range, nothing to end"},
|
||||
}
|
||||
testFailJSONPath(failStoreTests, t)
|
||||
}
|
||||
|
||||
func TestJSONInput(t *testing.T) {
|
||||
var pointsJSON = []byte(`[
|
||||
{"id": "i1", "x":4, "y":-5},
|
||||
{"id": "i2", "x":-2, "y":-5, "z":1},
|
||||
{"id": "i3", "x": 8, "y": 3 },
|
||||
{"id": "i4", "x": -6, "y": -1 },
|
||||
{"id": "i5", "x": 0, "y": 2, "z": 1 },
|
||||
{"id": "i6", "x": 1, "y": 4 }
|
||||
]`)
|
||||
var pointsData interface{}
|
||||
err := json.Unmarshal(pointsJSON, &pointsData)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
pointsTests := []jsonpathTest{
|
||||
{"exists filter", "{[?(@.z)].id}", pointsData, "i2 i5"},
|
||||
{"bracket key", "{[0]['id']}", pointsData, "i1"},
|
||||
}
|
||||
testJSONPath(pointsTests, false, t)
|
||||
}
|
||||
|
||||
// TestKubernetes tests some use cases from kubernetes
|
||||
func TestKubernetes(t *testing.T) {
|
||||
var input = []byte(`{
|
||||
"kind": "List",
|
||||
"items":[
|
||||
{
|
||||
"kind":"None",
|
||||
"metadata":{
|
||||
"name":"127.0.0.1",
|
||||
"labels":{
|
||||
"kubernetes.io/hostname":"127.0.0.1"
|
||||
}
|
||||
},
|
||||
"status":{
|
||||
"capacity":{"cpu":"4"},
|
||||
"addresses":[{"type": "LegacyHostIP", "address":"127.0.0.1"}]
|
||||
}
|
||||
},
|
||||
{
|
||||
"kind":"None",
|
||||
"metadata":{
|
||||
"name":"127.0.0.2",
|
||||
"labels":{
|
||||
"kubernetes.io/hostname":"127.0.0.2"
|
||||
}
|
||||
},
|
||||
"status":{
|
||||
"capacity":{"cpu":"8"},
|
||||
"addresses":[
|
||||
{"type": "LegacyHostIP", "address":"127.0.0.2"},
|
||||
{"type": "another", "address":"127.0.0.3"}
|
||||
]
|
||||
}
|
||||
}
|
||||
],
|
||||
"users":[
|
||||
{
|
||||
"name": "myself",
|
||||
"user": {}
|
||||
},
|
||||
{
|
||||
"name": "e2e",
|
||||
"user": {"username": "admin", "password": "secret"}
|
||||
}
|
||||
]
|
||||
}`)
|
||||
var nodesData interface{}
|
||||
err := json.Unmarshal(input, &nodesData)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
nodesTests := []jsonpathTest{
|
||||
{"range item", `{range .items[*]}{.metadata.name}, {end}{.kind}`, nodesData, "127.0.0.1, 127.0.0.2, List"},
|
||||
{"range item with quote", `{range .items[*]}{.metadata.name}{"\t"}{end}`, nodesData, "127.0.0.1\t127.0.0.2\t"},
|
||||
{"range addresss", `{.items[*].status.addresses[*].address}`, nodesData,
|
||||
"127.0.0.1 127.0.0.2 127.0.0.3"},
|
||||
{"double range", `{range .items[*]}{range .status.addresses[*]}{.address}, {end}{end}`, nodesData,
|
||||
"127.0.0.1, 127.0.0.2, 127.0.0.3, "},
|
||||
{"item name", `{.items[*].metadata.name}`, nodesData, "127.0.0.1 127.0.0.2"},
|
||||
{"union nodes capacity", `{.items[*]['metadata.name', 'status.capacity']}`, nodesData,
|
||||
"127.0.0.1 127.0.0.2 map[cpu:4] map[cpu:8]"},
|
||||
{"range nodes capacity", `{range .items[*]}[{.metadata.name}, {.status.capacity}] {end}`, nodesData,
|
||||
"[127.0.0.1, map[cpu:4]] [127.0.0.2, map[cpu:8]] "},
|
||||
{"user password", `{.users[?(@.name=="e2e")].user.password}`, &nodesData, "secret"},
|
||||
{"hostname", `{.items[0].metadata.labels.kubernetes\.io/hostname}`, &nodesData, "127.0.0.1"},
|
||||
{"hostname filter", `{.items[?(@.metadata.labels.kubernetes\.io/hostname=="127.0.0.1")].kind}`, &nodesData, "None"},
|
||||
}
|
||||
testJSONPath(nodesTests, false, t)
|
||||
|
||||
randomPrintOrderTests := []jsonpathTest{
|
||||
{"recursive name", "{..name}", nodesData, `127.0.0.1 127.0.0.2 myself e2e`},
|
||||
}
|
||||
testJSONPathSortOutput(randomPrintOrderTests, t)
|
||||
}
|
@@ -1,239 +0,0 @@
|
||||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package jsonpath
|
||||
|
||||
import "fmt"
|
||||
|
||||
// NodeType identifies the type of a parse tree node.
|
||||
type NodeType int
|
||||
|
||||
// Type returns itself and provides an easy default implementation
|
||||
func (t NodeType) Type() NodeType {
|
||||
return t
|
||||
}
|
||||
|
||||
func (t NodeType) String() string {
|
||||
return NodeTypeName[t]
|
||||
}
|
||||
|
||||
const (
|
||||
NodeText NodeType = iota
|
||||
NodeArray
|
||||
NodeList
|
||||
NodeField
|
||||
NodeIdentifier
|
||||
NodeFilter
|
||||
NodeInt
|
||||
NodeFloat
|
||||
NodeWildcard
|
||||
NodeRecursive
|
||||
NodeUnion
|
||||
)
|
||||
|
||||
var NodeTypeName = map[NodeType]string{
|
||||
NodeText: "NodeText",
|
||||
NodeArray: "NodeArray",
|
||||
NodeList: "NodeList",
|
||||
NodeField: "NodeField",
|
||||
NodeIdentifier: "NodeIdentifier",
|
||||
NodeFilter: "NodeFilter",
|
||||
NodeInt: "NodeInt",
|
||||
NodeFloat: "NodeFloat",
|
||||
NodeWildcard: "NodeWildcard",
|
||||
NodeRecursive: "NodeRecursive",
|
||||
NodeUnion: "NodeUnion",
|
||||
}
|
||||
|
||||
type Node interface {
|
||||
Type() NodeType
|
||||
String() string
|
||||
}
|
||||
|
||||
// ListNode holds a sequence of nodes.
|
||||
type ListNode struct {
|
||||
NodeType
|
||||
Nodes []Node // The element nodes in lexical order.
|
||||
}
|
||||
|
||||
func newList() *ListNode {
|
||||
return &ListNode{NodeType: NodeList}
|
||||
}
|
||||
|
||||
func (l *ListNode) append(n Node) {
|
||||
l.Nodes = append(l.Nodes, n)
|
||||
}
|
||||
|
||||
func (l *ListNode) String() string {
|
||||
return fmt.Sprintf("%s", l.Type())
|
||||
}
|
||||
|
||||
// TextNode holds plain text.
|
||||
type TextNode struct {
|
||||
NodeType
|
||||
Text string // The text; may span newlines.
|
||||
}
|
||||
|
||||
func newText(text string) *TextNode {
|
||||
return &TextNode{NodeType: NodeText, Text: text}
|
||||
}
|
||||
|
||||
func (t *TextNode) String() string {
|
||||
return fmt.Sprintf("%s: %s", t.Type(), t.Text)
|
||||
}
|
||||
|
||||
// FieldNode holds field of struct
|
||||
type FieldNode struct {
|
||||
NodeType
|
||||
Value string
|
||||
}
|
||||
|
||||
func newField(value string) *FieldNode {
|
||||
return &FieldNode{NodeType: NodeField, Value: value}
|
||||
}
|
||||
|
||||
func (f *FieldNode) String() string {
|
||||
return fmt.Sprintf("%s: %s", f.Type(), f.Value)
|
||||
}
|
||||
|
||||
// IdentifierNode holds an identifier
|
||||
type IdentifierNode struct {
|
||||
NodeType
|
||||
Name string
|
||||
}
|
||||
|
||||
func newIdentifier(value string) *IdentifierNode {
|
||||
return &IdentifierNode{
|
||||
NodeType: NodeIdentifier,
|
||||
Name: value,
|
||||
}
|
||||
}
|
||||
|
||||
func (f *IdentifierNode) String() string {
|
||||
return fmt.Sprintf("%s: %s", f.Type(), f.Name)
|
||||
}
|
||||
|
||||
// ParamsEntry holds param information for ArrayNode
|
||||
type ParamsEntry struct {
|
||||
Value int
|
||||
Known bool //whether the value is known when parse it
|
||||
}
|
||||
|
||||
// ArrayNode holds start, end, step information for array index selection
|
||||
type ArrayNode struct {
|
||||
NodeType
|
||||
Params [3]ParamsEntry //start, end, step
|
||||
}
|
||||
|
||||
func newArray(params [3]ParamsEntry) *ArrayNode {
|
||||
return &ArrayNode{
|
||||
NodeType: NodeArray,
|
||||
Params: params,
|
||||
}
|
||||
}
|
||||
|
||||
func (a *ArrayNode) String() string {
|
||||
return fmt.Sprintf("%s: %v", a.Type(), a.Params)
|
||||
}
|
||||
|
||||
// FilterNode holds operand and operator information for filter
|
||||
type FilterNode struct {
|
||||
NodeType
|
||||
Left *ListNode
|
||||
Right *ListNode
|
||||
Operator string
|
||||
}
|
||||
|
||||
func newFilter(left, right *ListNode, operator string) *FilterNode {
|
||||
return &FilterNode{
|
||||
NodeType: NodeFilter,
|
||||
Left: left,
|
||||
Right: right,
|
||||
Operator: operator,
|
||||
}
|
||||
}
|
||||
|
||||
func (f *FilterNode) String() string {
|
||||
return fmt.Sprintf("%s: %s %s %s", f.Type(), f.Left, f.Operator, f.Right)
|
||||
}
|
||||
|
||||
// IntNode holds integer value
|
||||
type IntNode struct {
|
||||
NodeType
|
||||
Value int
|
||||
}
|
||||
|
||||
func newInt(num int) *IntNode {
|
||||
return &IntNode{NodeType: NodeInt, Value: num}
|
||||
}
|
||||
|
||||
func (i *IntNode) String() string {
|
||||
return fmt.Sprintf("%s: %d", i.Type(), i.Value)
|
||||
}
|
||||
|
||||
// FloatNode holds float value
|
||||
type FloatNode struct {
|
||||
NodeType
|
||||
Value float64
|
||||
}
|
||||
|
||||
func newFloat(num float64) *FloatNode {
|
||||
return &FloatNode{NodeType: NodeFloat, Value: num}
|
||||
}
|
||||
|
||||
func (i *FloatNode) String() string {
|
||||
return fmt.Sprintf("%s: %f", i.Type(), i.Value)
|
||||
}
|
||||
|
||||
// WildcardNode means a wildcard
|
||||
type WildcardNode struct {
|
||||
NodeType
|
||||
}
|
||||
|
||||
func newWildcard() *WildcardNode {
|
||||
return &WildcardNode{NodeType: NodeWildcard}
|
||||
}
|
||||
|
||||
func (i *WildcardNode) String() string {
|
||||
return fmt.Sprintf("%s", i.Type())
|
||||
}
|
||||
|
||||
// RecursiveNode means a recursive descent operator
|
||||
type RecursiveNode struct {
|
||||
NodeType
|
||||
}
|
||||
|
||||
func newRecursive() *RecursiveNode {
|
||||
return &RecursiveNode{NodeType: NodeRecursive}
|
||||
}
|
||||
|
||||
func (r *RecursiveNode) String() string {
|
||||
return fmt.Sprintf("%s", r.Type())
|
||||
}
|
||||
|
||||
// UnionNode is union of ListNode
|
||||
type UnionNode struct {
|
||||
NodeType
|
||||
Nodes []*ListNode
|
||||
}
|
||||
|
||||
func newUnion(nodes []*ListNode) *UnionNode {
|
||||
return &UnionNode{NodeType: NodeUnion, Nodes: nodes}
|
||||
}
|
||||
|
||||
func (u *UnionNode) String() string {
|
||||
return fmt.Sprintf("%s", u.Type())
|
||||
}
|
@@ -1,433 +0,0 @@
|
||||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package jsonpath
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
"unicode"
|
||||
"unicode/utf8"
|
||||
)
|
||||
|
||||
const eof = -1
|
||||
|
||||
const (
|
||||
leftDelim = "{"
|
||||
rightDelim = "}"
|
||||
)
|
||||
|
||||
type Parser struct {
|
||||
Name string
|
||||
Root *ListNode
|
||||
input string
|
||||
cur *ListNode
|
||||
pos int
|
||||
start int
|
||||
width int
|
||||
}
|
||||
|
||||
// Parse parsed the given text and return a node Parser.
|
||||
// If an error is encountered, parsing stops and an empty
|
||||
// Parser is returned with the error
|
||||
func Parse(name, text string) (*Parser, error) {
|
||||
p := NewParser(name)
|
||||
err := p.Parse(text)
|
||||
if err != nil {
|
||||
p = nil
|
||||
}
|
||||
return p, err
|
||||
}
|
||||
|
||||
func NewParser(name string) *Parser {
|
||||
return &Parser{
|
||||
Name: name,
|
||||
}
|
||||
}
|
||||
|
||||
// parseAction parsed the expression inside delimiter
|
||||
func parseAction(name, text string) (*Parser, error) {
|
||||
p, err := Parse(name, fmt.Sprintf("%s%s%s", leftDelim, text, rightDelim))
|
||||
// when error happens, p will be nil, so we need to return here
|
||||
if err != nil {
|
||||
return p, err
|
||||
}
|
||||
p.Root = p.Root.Nodes[0].(*ListNode)
|
||||
return p, nil
|
||||
}
|
||||
|
||||
func (p *Parser) Parse(text string) error {
|
||||
p.input = text
|
||||
p.Root = newList()
|
||||
p.pos = 0
|
||||
return p.parseText(p.Root)
|
||||
}
|
||||
|
||||
// consumeText return the parsed text since last cosumeText
|
||||
func (p *Parser) consumeText() string {
|
||||
value := p.input[p.start:p.pos]
|
||||
p.start = p.pos
|
||||
return value
|
||||
}
|
||||
|
||||
// next returns the next rune in the input.
|
||||
func (p *Parser) next() rune {
|
||||
if int(p.pos) >= len(p.input) {
|
||||
p.width = 0
|
||||
return eof
|
||||
}
|
||||
r, w := utf8.DecodeRuneInString(p.input[p.pos:])
|
||||
p.width = w
|
||||
p.pos += p.width
|
||||
return r
|
||||
}
|
||||
|
||||
// peek returns but does not consume the next rune in the input.
|
||||
func (p *Parser) peek() rune {
|
||||
r := p.next()
|
||||
p.backup()
|
||||
return r
|
||||
}
|
||||
|
||||
// backup steps back one rune. Can only be called once per call of next.
|
||||
func (p *Parser) backup() {
|
||||
p.pos -= p.width
|
||||
}
|
||||
|
||||
func (p *Parser) parseText(cur *ListNode) error {
|
||||
for {
|
||||
if strings.HasPrefix(p.input[p.pos:], leftDelim) {
|
||||
if p.pos > p.start {
|
||||
cur.append(newText(p.consumeText()))
|
||||
}
|
||||
return p.parseLeftDelim(cur)
|
||||
}
|
||||
if p.next() == eof {
|
||||
break
|
||||
}
|
||||
}
|
||||
// Correctly reached EOF.
|
||||
if p.pos > p.start {
|
||||
cur.append(newText(p.consumeText()))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// parseLeftDelim scans the left delimiter, which is known to be present.
|
||||
func (p *Parser) parseLeftDelim(cur *ListNode) error {
|
||||
p.pos += len(leftDelim)
|
||||
p.consumeText()
|
||||
newNode := newList()
|
||||
cur.append(newNode)
|
||||
cur = newNode
|
||||
return p.parseInsideAction(cur)
|
||||
}
|
||||
|
||||
func (p *Parser) parseInsideAction(cur *ListNode) error {
|
||||
prefixMap := map[string]func(*ListNode) error{
|
||||
rightDelim: p.parseRightDelim,
|
||||
"[?(": p.parseFilter,
|
||||
"..": p.parseRecursive,
|
||||
}
|
||||
for prefix, parseFunc := range prefixMap {
|
||||
if strings.HasPrefix(p.input[p.pos:], prefix) {
|
||||
return parseFunc(cur)
|
||||
}
|
||||
}
|
||||
|
||||
switch r := p.next(); {
|
||||
case r == eof || isEndOfLine(r):
|
||||
return fmt.Errorf("unclosed action")
|
||||
case r == ' ':
|
||||
p.consumeText()
|
||||
case r == '@' || r == '$': //the current object, just pass it
|
||||
p.consumeText()
|
||||
case r == '[':
|
||||
return p.parseArray(cur)
|
||||
case r == '"':
|
||||
return p.parseQuote(cur)
|
||||
case r == '.':
|
||||
return p.parseField(cur)
|
||||
case r == '+' || r == '-' || unicode.IsDigit(r):
|
||||
p.backup()
|
||||
return p.parseNumber(cur)
|
||||
case isAlphaNumeric(r):
|
||||
p.backup()
|
||||
return p.parseIdentifier(cur)
|
||||
default:
|
||||
return fmt.Errorf("unrecognized character in action: %#U", r)
|
||||
}
|
||||
return p.parseInsideAction(cur)
|
||||
}
|
||||
|
||||
// parseRightDelim scans the right delimiter, which is known to be present.
|
||||
func (p *Parser) parseRightDelim(cur *ListNode) error {
|
||||
p.pos += len(rightDelim)
|
||||
p.consumeText()
|
||||
cur = p.Root
|
||||
return p.parseText(cur)
|
||||
}
|
||||
|
||||
// parseIdentifier scans build-in keywords, like "range" "end"
|
||||
func (p *Parser) parseIdentifier(cur *ListNode) error {
|
||||
var r rune
|
||||
for {
|
||||
r = p.next()
|
||||
if isTerminator(r) {
|
||||
p.backup()
|
||||
break
|
||||
}
|
||||
}
|
||||
value := p.consumeText()
|
||||
cur.append(newIdentifier(value))
|
||||
return p.parseInsideAction(cur)
|
||||
}
|
||||
|
||||
// parseRecursive scans the recursive desent operator ..
|
||||
func (p *Parser) parseRecursive(cur *ListNode) error {
|
||||
p.pos += len("..")
|
||||
p.consumeText()
|
||||
cur.append(newRecursive())
|
||||
if r := p.peek(); isAlphaNumeric(r) {
|
||||
return p.parseField(cur)
|
||||
}
|
||||
return p.parseInsideAction(cur)
|
||||
}
|
||||
|
||||
// parseNumber scans number
|
||||
func (p *Parser) parseNumber(cur *ListNode) error {
|
||||
r := p.peek()
|
||||
if r == '+' || r == '-' {
|
||||
r = p.next()
|
||||
}
|
||||
for {
|
||||
r = p.next()
|
||||
if r != '.' && !unicode.IsDigit(r) {
|
||||
p.backup()
|
||||
break
|
||||
}
|
||||
}
|
||||
value := p.consumeText()
|
||||
i, err := strconv.Atoi(value)
|
||||
if err == nil {
|
||||
cur.append(newInt(i))
|
||||
return p.parseInsideAction(cur)
|
||||
}
|
||||
d, err := strconv.ParseFloat(value, 64)
|
||||
if err == nil {
|
||||
cur.append(newFloat(d))
|
||||
return p.parseInsideAction(cur)
|
||||
}
|
||||
return fmt.Errorf("cannot parse number %s", value)
|
||||
}
|
||||
|
||||
// parseArray scans array index selection
|
||||
func (p *Parser) parseArray(cur *ListNode) error {
|
||||
Loop:
|
||||
for {
|
||||
switch p.next() {
|
||||
case eof, '\n':
|
||||
return fmt.Errorf("unterminated array")
|
||||
case ']':
|
||||
break Loop
|
||||
}
|
||||
}
|
||||
text := p.consumeText()
|
||||
text = string(text[1 : len(text)-1])
|
||||
if text == "*" {
|
||||
text = ":"
|
||||
}
|
||||
|
||||
//union operator
|
||||
strs := strings.Split(text, ",")
|
||||
if len(strs) > 1 {
|
||||
union := []*ListNode{}
|
||||
for _, str := range strs {
|
||||
parser, err := parseAction("union", fmt.Sprintf("[%s]", strings.Trim(str, " ")))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
union = append(union, parser.Root)
|
||||
}
|
||||
cur.append(newUnion(union))
|
||||
return p.parseInsideAction(cur)
|
||||
}
|
||||
|
||||
// dict key
|
||||
reg := regexp.MustCompile(`^'([^']*)'$`)
|
||||
value := reg.FindStringSubmatch(text)
|
||||
if value != nil {
|
||||
parser, err := parseAction("arraydict", fmt.Sprintf(".%s", value[1]))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, node := range parser.Root.Nodes {
|
||||
cur.append(node)
|
||||
}
|
||||
return p.parseInsideAction(cur)
|
||||
}
|
||||
|
||||
//slice operator
|
||||
reg = regexp.MustCompile(`^(-?[\d]*)(:-?[\d]*)?(:[\d]*)?$`)
|
||||
value = reg.FindStringSubmatch(text)
|
||||
if value == nil {
|
||||
return fmt.Errorf("invalid array index %s", text)
|
||||
}
|
||||
value = value[1:]
|
||||
params := [3]ParamsEntry{}
|
||||
for i := 0; i < 3; i++ {
|
||||
if value[i] != "" {
|
||||
if i > 0 {
|
||||
value[i] = value[i][1:]
|
||||
}
|
||||
if i > 0 && value[i] == "" {
|
||||
params[i].Known = false
|
||||
} else {
|
||||
var err error
|
||||
params[i].Known = true
|
||||
params[i].Value, err = strconv.Atoi(value[i])
|
||||
if err != nil {
|
||||
return fmt.Errorf("array index %s is not a number", value[i])
|
||||
}
|
||||
}
|
||||
} else {
|
||||
if i == 1 {
|
||||
params[i].Known = true
|
||||
params[i].Value = params[0].Value + 1
|
||||
} else {
|
||||
params[i].Known = false
|
||||
params[i].Value = 0
|
||||
}
|
||||
}
|
||||
}
|
||||
cur.append(newArray(params))
|
||||
return p.parseInsideAction(cur)
|
||||
}
|
||||
|
||||
// parseFilter scans filter inside array selection
|
||||
func (p *Parser) parseFilter(cur *ListNode) error {
|
||||
p.pos += len("[?(")
|
||||
p.consumeText()
|
||||
Loop:
|
||||
for {
|
||||
switch p.next() {
|
||||
case eof, '\n':
|
||||
return fmt.Errorf("unterminated filter")
|
||||
case ')':
|
||||
break Loop
|
||||
}
|
||||
}
|
||||
if p.next() != ']' {
|
||||
return fmt.Errorf("unclosed array expect ]")
|
||||
}
|
||||
reg := regexp.MustCompile(`^([^!<>=]+)([!<>=]+)(.+?)$`)
|
||||
text := p.consumeText()
|
||||
text = string(text[:len(text)-2])
|
||||
value := reg.FindStringSubmatch(text)
|
||||
if value == nil {
|
||||
parser, err := parseAction("text", text)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
cur.append(newFilter(parser.Root, newList(), "exists"))
|
||||
} else {
|
||||
leftParser, err := parseAction("left", value[1])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
rightParser, err := parseAction("right", value[3])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
cur.append(newFilter(leftParser.Root, rightParser.Root, value[2]))
|
||||
}
|
||||
return p.parseInsideAction(cur)
|
||||
}
|
||||
|
||||
// parseQuote unquotes string inside double quote
|
||||
func (p *Parser) parseQuote(cur *ListNode) error {
|
||||
Loop:
|
||||
for {
|
||||
switch p.next() {
|
||||
case eof, '\n':
|
||||
return fmt.Errorf("unterminated quoted string")
|
||||
case '"':
|
||||
break Loop
|
||||
}
|
||||
}
|
||||
value := p.consumeText()
|
||||
s, err := strconv.Unquote(value)
|
||||
if err != nil {
|
||||
return fmt.Errorf("unquote string %s error %v", value, err)
|
||||
}
|
||||
cur.append(newText(s))
|
||||
return p.parseInsideAction(cur)
|
||||
}
|
||||
|
||||
// parseField scans a field until a terminator
|
||||
func (p *Parser) parseField(cur *ListNode) error {
|
||||
p.consumeText()
|
||||
for p.advance() {
|
||||
}
|
||||
value := p.consumeText()
|
||||
if value == "*" {
|
||||
cur.append(newWildcard())
|
||||
} else {
|
||||
cur.append(newField(strings.Replace(value, "\\", "", -1)))
|
||||
}
|
||||
return p.parseInsideAction(cur)
|
||||
}
|
||||
|
||||
// advance scans until next non-escaped terminator
|
||||
func (p *Parser) advance() bool {
|
||||
r := p.next()
|
||||
if r == '\\' {
|
||||
p.next()
|
||||
} else if isTerminator(r) {
|
||||
p.backup()
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// isTerminator reports whether the input is at valid termination character to appear after an identifier.
|
||||
func isTerminator(r rune) bool {
|
||||
if isSpace(r) || isEndOfLine(r) {
|
||||
return true
|
||||
}
|
||||
switch r {
|
||||
case eof, '.', ',', '[', ']', '$', '@', '{', '}':
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// isSpace reports whether r is a space character.
|
||||
func isSpace(r rune) bool {
|
||||
return r == ' ' || r == '\t'
|
||||
}
|
||||
|
||||
// isEndOfLine reports whether r is an end-of-line character.
|
||||
func isEndOfLine(r rune) bool {
|
||||
return r == '\r' || r == '\n'
|
||||
}
|
||||
|
||||
// isAlphaNumeric reports whether r is an alphabetic, digit, or underscore.
|
||||
func isAlphaNumeric(r rune) bool {
|
||||
return r == '_' || unicode.IsLetter(r) || unicode.IsDigit(r)
|
||||
}
|
@@ -1,136 +0,0 @@
|
||||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package jsonpath
|
||||
|
||||
import (
|
||||
"testing"
|
||||
)
|
||||
|
||||
type parserTest struct {
|
||||
name string
|
||||
text string
|
||||
nodes []Node
|
||||
shouldError bool
|
||||
}
|
||||
|
||||
var parserTests = []parserTest{
|
||||
{"plain", `hello jsonpath`, []Node{newText("hello jsonpath")}, false},
|
||||
{"variable", `hello {.jsonpath}`,
|
||||
[]Node{newText("hello "), newList(), newField("jsonpath")}, false},
|
||||
{"arrayfiled", `hello {['jsonpath']}`,
|
||||
[]Node{newText("hello "), newList(), newField("jsonpath")}, false},
|
||||
{"quote", `{"{"}`, []Node{newList(), newText("{")}, false},
|
||||
{"array", `{[1:3]}`, []Node{newList(),
|
||||
newArray([3]ParamsEntry{{1, true}, {3, true}, {0, false}})}, false},
|
||||
{"allarray", `{.book[*].author}`,
|
||||
[]Node{newList(), newField("book"),
|
||||
newArray([3]ParamsEntry{{0, false}, {0, false}, {0, false}}), newField("author")}, false},
|
||||
{"wildcard", `{.bicycle.*}`,
|
||||
[]Node{newList(), newField("bicycle"), newWildcard()}, false},
|
||||
{"filter", `{[?(@.price<3)]}`,
|
||||
[]Node{newList(), newFilter(newList(), newList(), "<"),
|
||||
newList(), newField("price"), newList(), newInt(3)}, false},
|
||||
{"recursive", `{..}`, []Node{newList(), newRecursive()}, false},
|
||||
{"recurField", `{..price}`,
|
||||
[]Node{newList(), newRecursive(), newField("price")}, false},
|
||||
{"arraydict", `{['book.price']}`, []Node{newList(),
|
||||
newField("book"), newField("price"),
|
||||
}, false},
|
||||
{"union", `{['bicycle.price', 3, 'book.price']}`, []Node{newList(), newUnion([]*ListNode{}),
|
||||
newList(), newField("bicycle"), newField("price"),
|
||||
newList(), newArray([3]ParamsEntry{{3, true}, {4, true}, {0, false}}),
|
||||
newList(), newField("book"), newField("price"),
|
||||
}, false},
|
||||
{"range", `{range .items}{.name},{end}`, []Node{
|
||||
newList(), newIdentifier("range"), newField("items"),
|
||||
newList(), newField("name"), newText(","),
|
||||
newList(), newIdentifier("end"),
|
||||
}, false},
|
||||
{"malformat input", `{\\\}`, []Node{}, true},
|
||||
}
|
||||
|
||||
func collectNode(nodes []Node, cur Node) []Node {
|
||||
nodes = append(nodes, cur)
|
||||
switch cur.Type() {
|
||||
case NodeList:
|
||||
for _, node := range cur.(*ListNode).Nodes {
|
||||
nodes = collectNode(nodes, node)
|
||||
}
|
||||
case NodeFilter:
|
||||
nodes = collectNode(nodes, cur.(*FilterNode).Left)
|
||||
nodes = collectNode(nodes, cur.(*FilterNode).Right)
|
||||
case NodeUnion:
|
||||
for _, node := range cur.(*UnionNode).Nodes {
|
||||
nodes = collectNode(nodes, node)
|
||||
}
|
||||
}
|
||||
return nodes
|
||||
}
|
||||
|
||||
func TestParser(t *testing.T) {
|
||||
for _, test := range parserTests {
|
||||
parser, err := Parse(test.name, test.text)
|
||||
if test.shouldError {
|
||||
if err == nil {
|
||||
t.Errorf("unexpected non-error when parsing %s", test.name)
|
||||
}
|
||||
continue
|
||||
}
|
||||
if err != nil {
|
||||
t.Errorf("parse %s error %v", test.name, err)
|
||||
}
|
||||
result := collectNode([]Node{}, parser.Root)[1:]
|
||||
if len(result) != len(test.nodes) {
|
||||
t.Errorf("in %s, expect to get %d nodes, got %d nodes", test.name, len(test.nodes), len(result))
|
||||
t.Error(result)
|
||||
}
|
||||
for i, expect := range test.nodes {
|
||||
if result[i].String() != expect.String() {
|
||||
t.Errorf("in %s, %dth node, expect %v, got %v", test.name, i, expect, result[i])
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type failParserTest struct {
|
||||
name string
|
||||
text string
|
||||
err string
|
||||
}
|
||||
|
||||
func TestFailParser(t *testing.T) {
|
||||
failParserTests := []failParserTest{
|
||||
{"unclosed action", "{.hello", "unclosed action"},
|
||||
{"unrecognized character", "{*}", "unrecognized character in action: U+002A '*'"},
|
||||
{"invalid number", "{+12.3.0}", "cannot parse number +12.3.0"},
|
||||
{"unterminated array", "{[1}", "unterminated array"},
|
||||
{"invalid index", "{[::-1]}", "invalid array index ::-1"},
|
||||
{"unterminated filter", "{[?(.price]}", "unterminated filter"},
|
||||
}
|
||||
for _, test := range failParserTests {
|
||||
_, err := Parse(test.name, test.text)
|
||||
var out string
|
||||
if err == nil {
|
||||
out = "nil"
|
||||
} else {
|
||||
out = err.Error()
|
||||
}
|
||||
if out != test.err {
|
||||
t.Errorf("in %s, expect to get error %v, got %v", test.name, test.err, out)
|
||||
}
|
||||
}
|
||||
}
|
@@ -1,139 +0,0 @@
|
||||
/*
|
||||
Copyright 2014 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package testing
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"reflect"
|
||||
"sync"
|
||||
)
|
||||
|
||||
// TestInterface is a simple interface providing Errorf, to make injection for
|
||||
// testing easier (insert 'yo dawg' meme here).
|
||||
type TestInterface interface {
|
||||
Errorf(format string, args ...interface{})
|
||||
Logf(format string, args ...interface{})
|
||||
}
|
||||
|
||||
// LogInterface is a simple interface to allow injection of Logf to report serving errors.
|
||||
type LogInterface interface {
|
||||
Logf(format string, args ...interface{})
|
||||
}
|
||||
|
||||
// FakeHandler is to assist in testing HTTP requests. Notice that FakeHandler is
|
||||
// not thread safe and you must not direct traffic to except for the request
|
||||
// you want to test. You can do this by hiding it in an http.ServeMux.
|
||||
type FakeHandler struct {
|
||||
RequestReceived *http.Request
|
||||
RequestBody string
|
||||
StatusCode int
|
||||
ResponseBody string
|
||||
// For logging - you can use a *testing.T
|
||||
// This will keep log messages associated with the test.
|
||||
T LogInterface
|
||||
|
||||
// Enforce "only one use" constraint.
|
||||
lock sync.Mutex
|
||||
requestCount int
|
||||
hasBeenChecked bool
|
||||
|
||||
SkipRequestFn func(verb string, url url.URL) bool
|
||||
}
|
||||
|
||||
func (f *FakeHandler) SetResponseBody(responseBody string) {
|
||||
f.lock.Lock()
|
||||
defer f.lock.Unlock()
|
||||
f.ResponseBody = responseBody
|
||||
}
|
||||
|
||||
func (f *FakeHandler) ServeHTTP(response http.ResponseWriter, request *http.Request) {
|
||||
f.lock.Lock()
|
||||
defer f.lock.Unlock()
|
||||
|
||||
if f.SkipRequestFn != nil && f.SkipRequestFn(request.Method, *request.URL) {
|
||||
response.Header().Set("Content-Type", "application/json")
|
||||
response.WriteHeader(f.StatusCode)
|
||||
response.Write([]byte(f.ResponseBody))
|
||||
return
|
||||
}
|
||||
|
||||
f.requestCount++
|
||||
if f.hasBeenChecked {
|
||||
panic("got request after having been validated")
|
||||
}
|
||||
|
||||
f.RequestReceived = request
|
||||
response.Header().Set("Content-Type", "application/json")
|
||||
response.WriteHeader(f.StatusCode)
|
||||
response.Write([]byte(f.ResponseBody))
|
||||
|
||||
bodyReceived, err := ioutil.ReadAll(request.Body)
|
||||
if err != nil && f.T != nil {
|
||||
f.T.Logf("Received read error: %v", err)
|
||||
}
|
||||
f.RequestBody = string(bodyReceived)
|
||||
if f.T != nil {
|
||||
f.T.Logf("request body: %s", f.RequestBody)
|
||||
}
|
||||
}
|
||||
|
||||
func (f *FakeHandler) ValidateRequestCount(t TestInterface, count int) bool {
|
||||
ok := true
|
||||
f.lock.Lock()
|
||||
defer f.lock.Unlock()
|
||||
if f.requestCount != count {
|
||||
ok = false
|
||||
t.Errorf("Expected %d call, but got %d. Only the last call is recorded and checked.", count, f.requestCount)
|
||||
}
|
||||
f.hasBeenChecked = true
|
||||
return ok
|
||||
}
|
||||
|
||||
// ValidateRequest verifies that FakeHandler received a request with expected path, method, and body.
|
||||
func (f *FakeHandler) ValidateRequest(t TestInterface, expectedPath, expectedMethod string, body *string) {
|
||||
f.lock.Lock()
|
||||
defer f.lock.Unlock()
|
||||
if f.requestCount != 1 {
|
||||
t.Logf("Expected 1 call, but got %v. Only the last call is recorded and checked.", f.requestCount)
|
||||
}
|
||||
f.hasBeenChecked = true
|
||||
|
||||
expectURL, err := url.Parse(expectedPath)
|
||||
if err != nil {
|
||||
t.Errorf("Couldn't parse %v as a URL.", expectedPath)
|
||||
}
|
||||
if f.RequestReceived == nil {
|
||||
t.Errorf("Unexpected nil request received for %s", expectedPath)
|
||||
return
|
||||
}
|
||||
if f.RequestReceived.URL.Path != expectURL.Path {
|
||||
t.Errorf("Unexpected request path for request %#v, received: %q, expected: %q", f.RequestReceived, f.RequestReceived.URL.Path, expectURL.Path)
|
||||
}
|
||||
if e, a := expectURL.Query(), f.RequestReceived.URL.Query(); !reflect.DeepEqual(e, a) {
|
||||
t.Errorf("Unexpected query for request %#v, received: %q, expected: %q", f.RequestReceived, a, e)
|
||||
}
|
||||
if f.RequestReceived.Method != expectedMethod {
|
||||
t.Errorf("Unexpected method: %q, expected: %q", f.RequestReceived.Method, expectedMethod)
|
||||
}
|
||||
if body != nil {
|
||||
if *body != f.RequestBody {
|
||||
t.Errorf("Received body:\n%s\n Doesn't match expected body:\n%s", f.RequestBody, *body)
|
||||
}
|
||||
}
|
||||
}
|
@@ -1,180 +0,0 @@
|
||||
/*
|
||||
Copyright 2014 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package testing
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestFakeHandlerPath(t *testing.T) {
|
||||
handler := FakeHandler{StatusCode: http.StatusOK}
|
||||
server := httptest.NewServer(&handler)
|
||||
defer server.Close()
|
||||
method := "GET"
|
||||
path := "/foo/bar"
|
||||
body := "somebody"
|
||||
|
||||
req, err := http.NewRequest(method, server.URL+path, bytes.NewBufferString(body))
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
}
|
||||
|
||||
client := http.Client{}
|
||||
_, err = client.Do(req)
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
}
|
||||
|
||||
handler.ValidateRequest(t, path, method, &body)
|
||||
}
|
||||
|
||||
func TestFakeHandlerPathNoBody(t *testing.T) {
|
||||
handler := FakeHandler{StatusCode: http.StatusOK}
|
||||
server := httptest.NewServer(&handler)
|
||||
defer server.Close()
|
||||
method := "GET"
|
||||
path := "/foo/bar"
|
||||
|
||||
req, err := http.NewRequest(method, server.URL+path, nil)
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
}
|
||||
|
||||
client := http.Client{}
|
||||
_, err = client.Do(req)
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
}
|
||||
|
||||
handler.ValidateRequest(t, path, method, nil)
|
||||
}
|
||||
|
||||
type fakeError struct {
|
||||
errors []string
|
||||
}
|
||||
|
||||
func (f *fakeError) Errorf(format string, args ...interface{}) {
|
||||
f.errors = append(f.errors, format)
|
||||
}
|
||||
|
||||
func (f *fakeError) Logf(format string, args ...interface{}) {}
|
||||
|
||||
func TestFakeHandlerWrongPath(t *testing.T) {
|
||||
handler := FakeHandler{StatusCode: http.StatusOK}
|
||||
server := httptest.NewServer(&handler)
|
||||
defer server.Close()
|
||||
method := "GET"
|
||||
path := "/foo/bar"
|
||||
fakeT := fakeError{}
|
||||
|
||||
req, err := http.NewRequest(method, server.URL+"/foo/baz", nil)
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
}
|
||||
|
||||
client := http.Client{}
|
||||
_, err = client.Do(req)
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
}
|
||||
|
||||
handler.ValidateRequest(&fakeT, path, method, nil)
|
||||
if len(fakeT.errors) != 1 {
|
||||
t.Errorf("Unexpected error set: %#v", fakeT.errors)
|
||||
}
|
||||
}
|
||||
|
||||
func TestFakeHandlerWrongMethod(t *testing.T) {
|
||||
handler := FakeHandler{StatusCode: http.StatusOK}
|
||||
server := httptest.NewServer(&handler)
|
||||
defer server.Close()
|
||||
method := "GET"
|
||||
path := "/foo/bar"
|
||||
fakeT := fakeError{}
|
||||
|
||||
req, err := http.NewRequest("PUT", server.URL+path, nil)
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
}
|
||||
|
||||
client := http.Client{}
|
||||
_, err = client.Do(req)
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
}
|
||||
|
||||
handler.ValidateRequest(&fakeT, path, method, nil)
|
||||
if len(fakeT.errors) != 1 {
|
||||
t.Errorf("Unexpected error set: %#v", fakeT.errors)
|
||||
}
|
||||
}
|
||||
|
||||
func TestFakeHandlerWrongBody(t *testing.T) {
|
||||
handler := FakeHandler{StatusCode: http.StatusOK}
|
||||
server := httptest.NewServer(&handler)
|
||||
defer server.Close()
|
||||
method := "GET"
|
||||
path := "/foo/bar"
|
||||
body := "somebody"
|
||||
fakeT := fakeError{}
|
||||
|
||||
req, err := http.NewRequest(method, server.URL+path, bytes.NewBufferString(body))
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
}
|
||||
|
||||
client := http.Client{}
|
||||
_, err = client.Do(req)
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
}
|
||||
|
||||
otherbody := "otherbody"
|
||||
handler.ValidateRequest(&fakeT, path, method, &otherbody)
|
||||
if len(fakeT.errors) != 1 {
|
||||
t.Errorf("Unexpected error set: %#v", fakeT.errors)
|
||||
}
|
||||
}
|
||||
|
||||
func TestFakeHandlerNilBody(t *testing.T) {
|
||||
handler := FakeHandler{StatusCode: http.StatusOK}
|
||||
server := httptest.NewServer(&handler)
|
||||
defer server.Close()
|
||||
method := "GET"
|
||||
path := "/foo/bar"
|
||||
body := "somebody"
|
||||
fakeT := fakeError{}
|
||||
|
||||
req, err := http.NewRequest(method, server.URL+path, nil)
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
}
|
||||
|
||||
client := http.Client{}
|
||||
_, err = client.Do(req)
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
}
|
||||
|
||||
handler.ValidateRequest(&fakeT, path, method, &body)
|
||||
if len(fakeT.errors) != 1 {
|
||||
t.Errorf("Unexpected error set: %#v", fakeT.errors)
|
||||
}
|
||||
}
|
@@ -1,44 +0,0 @@
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package testing
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"os"
|
||||
)
|
||||
|
||||
// MkTmpdir creates a temporary directory based upon the prefix passed in.
|
||||
// If successful, it returns the temporary directory path. The directory can be
|
||||
// deleted with a call to "os.RemoveAll(...)".
|
||||
// In case of error, it'll return an empty string and the error.
|
||||
func MkTmpdir(prefix string) (string, error) {
|
||||
tmpDir, err := ioutil.TempDir(os.TempDir(), prefix)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return tmpDir, nil
|
||||
}
|
||||
|
||||
// MkTmpdir does the same work as "MkTmpdir", except in case of
|
||||
// errors, it'll trigger a panic.
|
||||
func MkTmpdirOrDie(prefix string) string {
|
||||
tmpDir, err := MkTmpdir(prefix)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return tmpDir
|
||||
}
|
@@ -21,7 +21,7 @@ import (
|
||||
"time"
|
||||
|
||||
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
|
||||
"k8s.io/client-go/pkg/util/clock"
|
||||
"k8s.io/client-go/util/clock"
|
||||
)
|
||||
|
||||
// DelayingInterface is an Interface that can Add an item at a later time. This makes it easier to
|
||||
|
@@ -23,7 +23,7 @@ import (
|
||||
"time"
|
||||
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/client-go/pkg/util/clock"
|
||||
"k8s.io/client-go/util/clock"
|
||||
)
|
||||
|
||||
func TestSimpleQueue(t *testing.T) {
|
||||
|
@@ -20,7 +20,7 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"k8s.io/client-go/pkg/util/clock"
|
||||
"k8s.io/client-go/util/clock"
|
||||
)
|
||||
|
||||
func TestRateLimitingQueue(t *testing.T) {
|
||||
|
Reference in New Issue
Block a user