1
0
mirror of https://github.com/rancher/rke.git synced 2025-07-06 12:06:15 +00:00

Merge pull request #282 from moelsayed/images_type

Add SystemImages type
This commit is contained in:
Darren Shepherd 2018-01-30 16:58:55 +00:00 committed by GitHub
commit 1ba2f3d8df
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
84 changed files with 14513 additions and 16628 deletions

View File

@ -74,11 +74,11 @@ system_images:
alpine: alpine:latest alpine: alpine:latest
nginx_proxy: rancher/rke-nginx-proxy:v0.1.1 nginx_proxy: rancher/rke-nginx-proxy:v0.1.1
cert_downloader: rancher/rke-cert-deployer:v0.1.1 cert_downloader: rancher/rke-cert-deployer:v0.1.1
service_sidekick_image: rancher/rke-service-sidekick:v0.1.0 kubernetes_services_sidecar: rancher/rke-service-sidekick:v0.1.0
kubedns_image: gcr.io/google_containers/k8s-dns-kube-dns-amd64:1.14.5 kubedns: gcr.io/google_containers/k8s-dns-kube-dns-amd64:1.14.5
dnsmasq_image: gcr.io/google_containers/k8s-dns-dnsmasq-nanny-amd64:1.14.5 dnsmasq: gcr.io/google_containers/k8s-dns-dnsmasq-nanny-amd64:1.14.5
kubedns_sidecar_image: gcr.io/google_containers/k8s-dns-sidecar-amd64:1.14.5 kubedns_sidecar: gcr.io/google_containers/k8s-dns-sidecar-amd64:1.14.5
kubedns_autoscaler_image: gcr.io/google_containers/cluster-proportional-autoscaler-amd64:1.0.0 kubedns_autoscaler: gcr.io/google_containers/cluster-proportional-autoscaler-amd64:1.0.0
# all addon manifests MUST specify a namespace # all addon manifests MUST specify a namespace
addons: |- addons: |-

View File

@ -40,10 +40,10 @@ func (c *Cluster) deployKubeDNS(ctx context.Context) error {
kubeDNSConfig := map[string]string{ kubeDNSConfig := map[string]string{
addons.KubeDNSServer: c.ClusterDNSServer, addons.KubeDNSServer: c.ClusterDNSServer,
addons.KubeDNSClusterDomain: c.ClusterDomain, addons.KubeDNSClusterDomain: c.ClusterDomain,
addons.KubeDNSImage: c.SystemImages[KubeDNSImage], addons.KubeDNSImage: c.SystemImages.KubeDNS,
addons.DNSMasqImage: c.SystemImages[DNSMasqImage], addons.DNSMasqImage: c.SystemImages.DNSmasq,
addons.KubeDNSSidecarImage: c.SystemImages[KubeDNSSidecarImage], addons.KubeDNSSidecarImage: c.SystemImages.KubeDNSSidecar,
addons.KubeDNSAutoScalerImage: c.SystemImages[KubeDNSAutoScalerImage], addons.KubeDNSAutoScalerImage: c.SystemImages.KubeDNSAutoscaler,
} }
kubeDNSYaml, err := addons.GetKubeDNSManifest(kubeDNSConfig) kubeDNSYaml, err := addons.GetKubeDNSManifest(kubeDNSConfig)
if err != nil { if err != nil {

View File

@ -22,7 +22,7 @@ func SetUpAuthentication(ctx context.Context, kubeCluster, currentCluster *Clust
kubeCluster.Certificates = currentCluster.Certificates kubeCluster.Certificates = currentCluster.Certificates
} else { } else {
log.Infof(ctx, "[certificates] Attempting to recover certificates from backup on host [%s]", kubeCluster.EtcdHosts[0].Address) log.Infof(ctx, "[certificates] Attempting to recover certificates from backup on host [%s]", kubeCluster.EtcdHosts[0].Address)
kubeCluster.Certificates, err = pki.FetchCertificatesFromHost(ctx, kubeCluster.EtcdHosts, kubeCluster.EtcdHosts[0], kubeCluster.SystemImages[AplineImage], kubeCluster.LocalKubeConfigPath) kubeCluster.Certificates, err = pki.FetchCertificatesFromHost(ctx, kubeCluster.EtcdHosts, kubeCluster.EtcdHosts[0], kubeCluster.SystemImages.Alpine, kubeCluster.LocalKubeConfigPath)
if err != nil { if err != nil {
return err return err
} }
@ -42,7 +42,7 @@ func SetUpAuthentication(ctx context.Context, kubeCluster, currentCluster *Clust
return fmt.Errorf("Failed to generate Kubernetes certificates: %v", err) return fmt.Errorf("Failed to generate Kubernetes certificates: %v", err)
} }
log.Infof(ctx, "[certificates] Temporarily saving certs to etcd host [%s]", kubeCluster.EtcdHosts[0].Address) log.Infof(ctx, "[certificates] Temporarily saving certs to etcd host [%s]", kubeCluster.EtcdHosts[0].Address)
if err := pki.DeployCertificatesOnHost(ctx, kubeCluster.EtcdHosts, kubeCluster.EtcdHosts[0], kubeCluster.Certificates, kubeCluster.SystemImages[CertDownloaderImage], pki.TempCertPath); err != nil { if err := pki.DeployCertificatesOnHost(ctx, kubeCluster.EtcdHosts, kubeCluster.EtcdHosts[0], kubeCluster.Certificates, kubeCluster.SystemImages.CertDownloader, pki.TempCertPath); err != nil {
return err return err
} }
log.Infof(ctx, "[certificates] Saved certs to etcd host [%s]", kubeCluster.EtcdHosts[0].Address) log.Infof(ctx, "[certificates] Saved certs to etcd host [%s]", kubeCluster.EtcdHosts[0].Address)

View File

@ -43,14 +43,6 @@ const (
UpdateStateTimeout = 30 UpdateStateTimeout = 30
GetStateTimeout = 30 GetStateTimeout = 30
KubernetesClientTimeOut = 30 KubernetesClientTimeOut = 30
AplineImage = "alpine"
NginxProxyImage = "nginx_proxy"
CertDownloaderImage = "cert_downloader"
KubeDNSImage = "kubedns_image"
DNSMasqImage = "dnsmasq_image"
KubeDNSSidecarImage = "kubedns_sidecar_image"
KubeDNSAutoScalerImage = "kubedns_autoscaler_image"
ServiceSidekickImage = "service_sidekick_image"
NoneAuthorizationMode = "none" NoneAuthorizationMode = "none"
LocalNodeAddress = "127.0.0.1" LocalNodeAddress = "127.0.0.1"
LocalNodeHostname = "localhost" LocalNodeHostname = "localhost"
@ -66,7 +58,7 @@ func (c *Cluster) DeployControlPlane(ctx context.Context) error {
if err := services.RunControlPlane(ctx, c.ControlPlaneHosts, if err := services.RunControlPlane(ctx, c.ControlPlaneHosts,
c.EtcdHosts, c.EtcdHosts,
c.Services, c.Services,
c.SystemImages[ServiceSidekickImage], c.SystemImages.KubernetesServicesSidecar,
c.Authorization.Mode, c.Authorization.Mode,
c.LocalConnDialerFactory); err != nil { c.LocalConnDialerFactory); err != nil {
return fmt.Errorf("[controlPlane] Failed to bring up Control Plane: %v", err) return fmt.Errorf("[controlPlane] Failed to bring up Control Plane: %v", err)
@ -84,8 +76,8 @@ func (c *Cluster) DeployWorkerPlane(ctx context.Context) error {
c.WorkerHosts, c.WorkerHosts,
c.EtcdHosts, c.EtcdHosts,
c.Services, c.Services,
c.SystemImages[NginxProxyImage], c.SystemImages.NginxProxy,
c.SystemImages[ServiceSidekickImage], c.SystemImages.KubernetesServicesSidecar,
c.LocalConnDialerFactory); err != nil { c.LocalConnDialerFactory); err != nil {
return fmt.Errorf("[workerPlane] Failed to bring up Worker Plane: %v", err) return fmt.Errorf("[workerPlane] Failed to bring up Worker Plane: %v", err)
} }
@ -189,22 +181,19 @@ func (c *Cluster) setClusterServicesDefaults() {
} }
func (c *Cluster) setClusterImageDefaults() { func (c *Cluster) setClusterImageDefaults() {
if c.SystemImages == nil {
// don't break if the user didn't define rke_images systemImagesDefaultsMap := map[*string]string{
c.SystemImages = make(map[string]string) &c.SystemImages.Alpine: DefaultAplineImage,
} &c.SystemImages.NginxProxy: DefaultNginxProxyImage,
systemImagesDefaultsMap := map[string]string{ &c.SystemImages.CertDownloader: DefaultCertDownloaderImage,
AplineImage: DefaultAplineImage, &c.SystemImages.KubeDNS: DefaultKubeDNSImage,
NginxProxyImage: DefaultNginxProxyImage, &c.SystemImages.KubeDNSSidecar: DefaultKubeDNSSidecarImage,
CertDownloaderImage: DefaultCertDownloaderImage, &c.SystemImages.DNSmasq: DefaultDNSmasqImage,
KubeDNSImage: DefaultKubeDNSImage, &c.SystemImages.KubeDNSAutoscaler: DefaultKubeDNSAutoScalerImage,
DNSMasqImage: DefaultDNSMasqImage, &c.SystemImages.KubernetesServicesSidecar: DefaultKubernetesServicesSidecarImage,
KubeDNSSidecarImage: DefaultKubeDNSSidecarImage,
KubeDNSAutoScalerImage: DefaultKubeDNSAutoScalerImage,
ServiceSidekickImage: DefaultServiceSidekickImage,
} }
for k, v := range systemImagesDefaultsMap { for k, v := range systemImagesDefaultsMap {
setDefaultIfEmptyMapValue(c.SystemImages, k, v) setDefaultIfEmpty(k, v)
} }
} }

View File

@ -17,11 +17,11 @@ const (
DefaultNetworkPlugin = "flannel" DefaultNetworkPlugin = "flannel"
DefaultNetworkCloudProvider = "none" DefaultNetworkCloudProvider = "none"
DefaultInfraContainerImage = "gcr.io/google_containers/pause-amd64:3.0" DefaultInfraContainerImage = "gcr.io/google_containers/pause-amd64:3.0"
DefaultAplineImage = "alpine:latest" DefaultAplineImage = "alpine:latest"
DefaultNginxProxyImage = "rancher/rke-nginx-proxy:v0.1.1" DefaultNginxProxyImage = "rancher/rke-nginx-proxy:v0.1.1"
DefaultCertDownloaderImage = "rancher/rke-cert-deployer:v0.1.1" DefaultCertDownloaderImage = "rancher/rke-cert-deployer:v0.1.1"
DefaultServiceSidekickImage = "rancher/rke-service-sidekick:v0.1.0" DefaultKubernetesServicesSidecarImage = "rancher/rke-service-sidekick:v0.1.0"
DefaultEtcdImage = "quay.io/coreos/etcd:latest" DefaultEtcdImage = "quay.io/coreos/etcd:latest"
DefaultK8sImage = "rancher/k8s:v1.8.5-rancher4" DefaultK8sImage = "rancher/k8s:v1.8.5-rancher4"
@ -42,7 +42,7 @@ const (
DefaultCanalFlannelImage = "quay.io/coreos/flannel:v0.9.1" DefaultCanalFlannelImage = "quay.io/coreos/flannel:v0.9.1"
DefaultKubeDNSImage = "gcr.io/google_containers/k8s-dns-kube-dns-amd64:1.14.5" DefaultKubeDNSImage = "gcr.io/google_containers/k8s-dns-kube-dns-amd64:1.14.5"
DefaultDNSMasqImage = "gcr.io/google_containers/k8s-dns-dnsmasq-nanny-amd64:1.14.5" DefaultDNSmasqImage = "gcr.io/google_containers/k8s-dns-dnsmasq-nanny-amd64:1.14.5"
DefaultKubeDNSSidecarImage = "gcr.io/google_containers/k8s-dns-sidecar-amd64:1.14.5" DefaultKubeDNSSidecarImage = "gcr.io/google_containers/k8s-dns-sidecar-amd64:1.14.5"
DefaultKubeDNSAutoScalerImage = "gcr.io/google_containers/cluster-proportional-autoscaler-amd64:1.0.0" DefaultKubeDNSAutoScalerImage = "gcr.io/google_containers/cluster-proportional-autoscaler-amd64:1.0.0"
) )

View File

@ -72,14 +72,14 @@ func (c *Cluster) InvertIndexHosts() error {
func (c *Cluster) SetUpHosts(ctx context.Context) error { func (c *Cluster) SetUpHosts(ctx context.Context) error {
if c.Authentication.Strategy == X509AuthenticationProvider { if c.Authentication.Strategy == X509AuthenticationProvider {
log.Infof(ctx, "[certificates] Deploying kubernetes certificates to Cluster nodes") log.Infof(ctx, "[certificates] Deploying kubernetes certificates to Cluster nodes")
if err := pki.DeployCertificatesOnMasters(ctx, c.ControlPlaneHosts, c.Certificates, c.SystemImages[CertDownloaderImage]); err != nil { if err := pki.DeployCertificatesOnMasters(ctx, c.ControlPlaneHosts, c.Certificates, c.SystemImages.CertDownloader); err != nil {
return err return err
} }
if err := pki.DeployCertificatesOnWorkers(ctx, c.WorkerHosts, c.Certificates, c.SystemImages[CertDownloaderImage]); err != nil { if err := pki.DeployCertificatesOnWorkers(ctx, c.WorkerHosts, c.Certificates, c.SystemImages.CertDownloader); err != nil {
return err return err
} }
// Deploying etcd certificates // Deploying etcd certificates
if err := pki.DeployCertificatesOnEtcd(ctx, c.EtcdHosts, c.Certificates, c.SystemImages[CertDownloaderImage]); err != nil { if err := pki.DeployCertificatesOnEtcd(ctx, c.EtcdHosts, c.Certificates, c.SystemImages.CertDownloader); err != nil {
return err return err
} }

View File

@ -343,7 +343,7 @@ func (c *Cluster) deployListenerOnPlane(ctx context.Context, portList []string,
} }
func (c *Cluster) deployListener(ctx context.Context, host *hosts.Host, portList []string, containerName string) error { func (c *Cluster) deployListener(ctx context.Context, host *hosts.Host, portList []string, containerName string) error {
imageCfg := &container.Config{ imageCfg := &container.Config{
Image: c.SystemImages[AplineImage], Image: c.SystemImages.Alpine,
Cmd: []string{ Cmd: []string{
"nc", "nc",
"-kl", "-kl",
@ -412,7 +412,7 @@ func (c *Cluster) runServicePortChecks(ctx context.Context) error {
for _, host := range c.EtcdHosts { for _, host := range c.EtcdHosts {
runHost := host runHost := host
errgrp.Go(func() error { errgrp.Go(func() error {
return checkPlaneTCPPortsFromHost(ctx, runHost, etcdPortList, c.EtcdHosts, c.SystemImages[AplineImage]) return checkPlaneTCPPortsFromHost(ctx, runHost, etcdPortList, c.EtcdHosts, c.SystemImages.Alpine)
}) })
} }
if err := errgrp.Wait(); err != nil { if err := errgrp.Wait(); err != nil {
@ -424,7 +424,7 @@ func (c *Cluster) runServicePortChecks(ctx context.Context) error {
for _, host := range c.ControlPlaneHosts { for _, host := range c.ControlPlaneHosts {
runHost := host runHost := host
errgrp.Go(func() error { errgrp.Go(func() error {
return checkPlaneTCPPortsFromHost(ctx, runHost, etcdPortList, c.EtcdHosts, c.SystemImages[AplineImage]) return checkPlaneTCPPortsFromHost(ctx, runHost, etcdPortList, c.EtcdHosts, c.SystemImages.Alpine)
}) })
} }
if err := errgrp.Wait(); err != nil { if err := errgrp.Wait(); err != nil {
@ -434,7 +434,7 @@ func (c *Cluster) runServicePortChecks(ctx context.Context) error {
for _, host := range c.WorkerHosts { for _, host := range c.WorkerHosts {
runHost := host runHost := host
errgrp.Go(func() error { errgrp.Go(func() error {
return checkPlaneTCPPortsFromHost(ctx, runHost, etcdPortList, c.EtcdHosts, c.SystemImages[AplineImage]) return checkPlaneTCPPortsFromHost(ctx, runHost, etcdPortList, c.EtcdHosts, c.SystemImages.Alpine)
}) })
} }
if err := errgrp.Wait(); err != nil { if err := errgrp.Wait(); err != nil {
@ -448,7 +448,7 @@ func (c *Cluster) runServicePortChecks(ctx context.Context) error {
for _, host := range c.ControlPlaneHosts { for _, host := range c.ControlPlaneHosts {
runHost := host runHost := host
errgrp.Go(func() error { errgrp.Go(func() error {
return checkPlaneTCPPortsFromHost(ctx, runHost, workerPortList, c.WorkerHosts, c.SystemImages[AplineImage]) return checkPlaneTCPPortsFromHost(ctx, runHost, workerPortList, c.WorkerHosts, c.SystemImages.Alpine)
}) })
} }
if err := errgrp.Wait(); err != nil { if err := errgrp.Wait(); err != nil {
@ -462,7 +462,7 @@ func (c *Cluster) runServicePortChecks(ctx context.Context) error {
for _, host := range c.WorkerHosts { for _, host := range c.WorkerHosts {
runHost := host runHost := host
errgrp.Go(func() error { errgrp.Go(func() error {
return checkPlaneTCPPortsFromHost(ctx, runHost, controlPlanePortList, c.ControlPlaneHosts, c.SystemImages[AplineImage]) return checkPlaneTCPPortsFromHost(ctx, runHost, controlPlanePortList, c.ControlPlaneHosts, c.SystemImages.Alpine)
}) })
} }
return errgrp.Wait() return errgrp.Wait()

View File

@ -56,7 +56,7 @@ func reconcileWorker(ctx context.Context, currentCluster, kubeCluster *Cluster,
return fmt.Errorf("Failed to delete worker node %s from cluster", toDeleteHost.Address) return fmt.Errorf("Failed to delete worker node %s from cluster", toDeleteHost.Address)
} }
// attempting to clean services/files on the host // attempting to clean services/files on the host
if err := reconcileHost(ctx, toDeleteHost, true, false, currentCluster.SystemImages[AplineImage], currentCluster.DockerDialerFactory); err != nil { if err := reconcileHost(ctx, toDeleteHost, true, false, currentCluster.SystemImages.Alpine, currentCluster.DockerDialerFactory); err != nil {
log.Warnf(ctx, "[reconcile] Couldn't clean up worker node [%s]: %v", toDeleteHost.Address, err) log.Warnf(ctx, "[reconcile] Couldn't clean up worker node [%s]: %v", toDeleteHost.Address, err)
continue continue
} }
@ -97,7 +97,7 @@ func reconcileControl(ctx context.Context, currentCluster, kubeCluster *Cluster,
return fmt.Errorf("Failed to delete controlplane node %s from cluster", toDeleteHost.Address) return fmt.Errorf("Failed to delete controlplane node %s from cluster", toDeleteHost.Address)
} }
// attempting to clean services/files on the host // attempting to clean services/files on the host
if err := reconcileHost(ctx, toDeleteHost, false, false, currentCluster.SystemImages[AplineImage], currentCluster.DockerDialerFactory); err != nil { if err := reconcileHost(ctx, toDeleteHost, false, false, currentCluster.SystemImages.Alpine, currentCluster.DockerDialerFactory); err != nil {
log.Warnf(ctx, "[reconcile] Couldn't clean up controlplane node [%s]: %v", toDeleteHost.Address, err) log.Warnf(ctx, "[reconcile] Couldn't clean up controlplane node [%s]: %v", toDeleteHost.Address, err)
continue continue
} }
@ -110,7 +110,7 @@ func reconcileControl(ctx context.Context, currentCluster, kubeCluster *Cluster,
cpChanged := hosts.IsHostListChanged(currentCluster.ControlPlaneHosts, kubeCluster.ControlPlaneHosts) cpChanged := hosts.IsHostListChanged(currentCluster.ControlPlaneHosts, kubeCluster.ControlPlaneHosts)
if cpChanged { if cpChanged {
log.Infof(ctx, "[reconcile] Rolling update nginx hosts with new list of control plane hosts") log.Infof(ctx, "[reconcile] Rolling update nginx hosts with new list of control plane hosts")
err := services.RollingUpdateNginxProxy(ctx, kubeCluster.ControlPlaneHosts, kubeCluster.WorkerHosts, currentCluster.SystemImages[NginxProxyImage]) err := services.RollingUpdateNginxProxy(ctx, kubeCluster.ControlPlaneHosts, kubeCluster.WorkerHosts, currentCluster.SystemImages.NginxProxy)
if err != nil { if err != nil {
return fmt.Errorf("Failed to rolling update Nginx hosts with new control plane hosts") return fmt.Errorf("Failed to rolling update Nginx hosts with new control plane hosts")
} }
@ -173,7 +173,7 @@ func reconcileEtcd(ctx context.Context, currentCluster, kubeCluster *Cluster, ku
continue continue
} }
// attempting to clean services/files on the host // attempting to clean services/files on the host
if err := reconcileHost(ctx, etcdHost, false, true, currentCluster.SystemImages[AplineImage], currentCluster.DockerDialerFactory); err != nil { if err := reconcileHost(ctx, etcdHost, false, true, currentCluster.SystemImages.Alpine, currentCluster.DockerDialerFactory); err != nil {
log.Warnf(ctx, "[reconcile] Couldn't clean up etcd node [%s]: %v", etcdHost.Address, err) log.Warnf(ctx, "[reconcile] Couldn't clean up etcd node [%s]: %v", etcdHost.Address, err)
continue continue
} }
@ -199,7 +199,7 @@ func reconcileEtcd(ctx context.Context, currentCluster, kubeCluster *Cluster, ku
currentCluster.Certificates = crtMap currentCluster.Certificates = crtMap
for _, etcdHost := range etcdToAdd { for _, etcdHost := range etcdToAdd {
// deploy certificates on new etcd host // deploy certificates on new etcd host
if err := pki.DeployCertificatesOnHost(ctx, kubeCluster.EtcdHosts, etcdHost, currentCluster.Certificates, kubeCluster.SystemImages[CertDownloaderImage], pki.CertPathPrefix); err != nil { if err := pki.DeployCertificatesOnHost(ctx, kubeCluster.EtcdHosts, etcdHost, currentCluster.Certificates, kubeCluster.SystemImages.CertDownloader, pki.CertPathPrefix); err != nil {
return err return err
} }

View File

@ -25,7 +25,7 @@ func (c *Cluster) ClusterRemove(ctx context.Context) error {
} }
// Clean up all hosts // Clean up all hosts
if err := cleanUpHosts(ctx, c.ControlPlaneHosts, c.WorkerHosts, c.EtcdHosts, c.SystemImages[AplineImage]); err != nil { if err := cleanUpHosts(ctx, c.ControlPlaneHosts, c.WorkerHosts, c.EtcdHosts, c.SystemImages.Alpine); err != nil {
return err return err
} }

View File

@ -24,4 +24,4 @@ github.com/coreos/go-semver e214231b295a8ea9479f11b70b35d5acf3556d9
github.com/ugorji/go/codec ccfe18359b55b97855cee1d3f74e5efbda4869dc github.com/ugorji/go/codec ccfe18359b55b97855cee1d3f74e5efbda4869dc
github.com/rancher/norman da40fa7b068e21ca85bfb3c9f4cf207f1008bf43 github.com/rancher/norman da40fa7b068e21ca85bfb3c9f4cf207f1008bf43
github.com/rancher/types 89ffa83b41713b743dbb420b7bd7180ad6ccb5f9 github.com/rancher/types 019aa7fc1e5f54e09ecdbb2133bc665aa80cf54a

View File

@ -1,16 +0,0 @@
// Copyright 2016 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package tlsutil provides utility functions for handling TLS.
package tlsutil

View File

@ -1,72 +0,0 @@
// Copyright 2016 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package tlsutil
import (
"crypto/tls"
"crypto/x509"
"encoding/pem"
"io/ioutil"
)
// NewCertPool creates x509 certPool with provided CA files.
func NewCertPool(CAFiles []string) (*x509.CertPool, error) {
certPool := x509.NewCertPool()
for _, CAFile := range CAFiles {
pemByte, err := ioutil.ReadFile(CAFile)
if err != nil {
return nil, err
}
for {
var block *pem.Block
block, pemByte = pem.Decode(pemByte)
if block == nil {
break
}
cert, err := x509.ParseCertificate(block.Bytes)
if err != nil {
return nil, err
}
certPool.AddCert(cert)
}
}
return certPool, nil
}
// NewCert generates TLS cert by using the given cert,key and parse function.
func NewCert(certfile, keyfile string, parseFunc func([]byte, []byte) (tls.Certificate, error)) (*tls.Certificate, error) {
cert, err := ioutil.ReadFile(certfile)
if err != nil {
return nil, err
}
key, err := ioutil.ReadFile(keyfile)
if err != nil {
return nil, err
}
if parseFunc == nil {
parseFunc = tls.X509KeyPair
}
tlsCert, err := parseFunc(cert, key)
if err != nil {
return nil, err
}
return &tlsCert, nil
}

View File

@ -1,17 +0,0 @@
// Copyright 2015 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package transport implements various HTTP transport utilities based on Go
// net package.
package transport

View File

@ -1,94 +0,0 @@
// Copyright 2015 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package transport
import (
"crypto/tls"
"fmt"
"net"
"time"
)
type keepAliveConn interface {
SetKeepAlive(bool) error
SetKeepAlivePeriod(d time.Duration) error
}
// NewKeepAliveListener returns a listener that listens on the given address.
// Be careful when wrap around KeepAliveListener with another Listener if TLSInfo is not nil.
// Some pkgs (like go/http) might expect Listener to return TLSConn type to start TLS handshake.
// http://tldp.org/HOWTO/TCP-Keepalive-HOWTO/overview.html
func NewKeepAliveListener(l net.Listener, scheme string, tlscfg *tls.Config) (net.Listener, error) {
if scheme == "https" {
if tlscfg == nil {
return nil, fmt.Errorf("cannot listen on TLS for given listener: KeyFile and CertFile are not presented")
}
return newTLSKeepaliveListener(l, tlscfg), nil
}
return &keepaliveListener{
Listener: l,
}, nil
}
type keepaliveListener struct{ net.Listener }
func (kln *keepaliveListener) Accept() (net.Conn, error) {
c, err := kln.Listener.Accept()
if err != nil {
return nil, err
}
kac := c.(keepAliveConn)
// detection time: tcp_keepalive_time + tcp_keepalive_probes + tcp_keepalive_intvl
// default on linux: 30 + 8 * 30
// default on osx: 30 + 8 * 75
kac.SetKeepAlive(true)
kac.SetKeepAlivePeriod(30 * time.Second)
return c, nil
}
// A tlsKeepaliveListener implements a network listener (net.Listener) for TLS connections.
type tlsKeepaliveListener struct {
net.Listener
config *tls.Config
}
// Accept waits for and returns the next incoming TLS connection.
// The returned connection c is a *tls.Conn.
func (l *tlsKeepaliveListener) Accept() (c net.Conn, err error) {
c, err = l.Listener.Accept()
if err != nil {
return
}
kac := c.(keepAliveConn)
// detection time: tcp_keepalive_time + tcp_keepalive_probes + tcp_keepalive_intvl
// default on linux: 30 + 8 * 30
// default on osx: 30 + 8 * 75
kac.SetKeepAlive(true)
kac.SetKeepAlivePeriod(30 * time.Second)
c = tls.Server(c, l.config)
return c, nil
}
// NewListener creates a Listener which accepts connections from an inner
// Listener and wraps each connection with Server.
// The configuration config must be non-nil and must have
// at least one certificate.
func newTLSKeepaliveListener(inner net.Listener, config *tls.Config) net.Listener {
l := &tlsKeepaliveListener{}
l.Listener = inner
l.config = config
return l
}

View File

@ -1,80 +0,0 @@
// Copyright 2013 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package transport provides network utility functions, complementing the more
// common ones in the net package.
package transport
import (
"errors"
"net"
"sync"
"time"
)
var (
ErrNotTCP = errors.New("only tcp connections have keepalive")
)
// LimitListener returns a Listener that accepts at most n simultaneous
// connections from the provided Listener.
func LimitListener(l net.Listener, n int) net.Listener {
return &limitListener{l, make(chan struct{}, n)}
}
type limitListener struct {
net.Listener
sem chan struct{}
}
func (l *limitListener) acquire() { l.sem <- struct{}{} }
func (l *limitListener) release() { <-l.sem }
func (l *limitListener) Accept() (net.Conn, error) {
l.acquire()
c, err := l.Listener.Accept()
if err != nil {
l.release()
return nil, err
}
return &limitListenerConn{Conn: c, release: l.release}, nil
}
type limitListenerConn struct {
net.Conn
releaseOnce sync.Once
release func()
}
func (l *limitListenerConn) Close() error {
err := l.Conn.Close()
l.releaseOnce.Do(l.release)
return err
}
func (l *limitListenerConn) SetKeepAlive(doKeepAlive bool) error {
tcpc, ok := l.Conn.(*net.TCPConn)
if !ok {
return ErrNotTCP
}
return tcpc.SetKeepAlive(doKeepAlive)
}
func (l *limitListenerConn) SetKeepAlivePeriod(d time.Duration) error {
tcpc, ok := l.Conn.(*net.TCPConn)
if !ok {
return ErrNotTCP
}
return tcpc.SetKeepAlivePeriod(d)
}

View File

@ -1,281 +0,0 @@
// Copyright 2015 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package transport
import (
"crypto/ecdsa"
"crypto/elliptic"
"crypto/rand"
"crypto/tls"
"crypto/x509"
"crypto/x509/pkix"
"encoding/pem"
"errors"
"fmt"
"math/big"
"net"
"os"
"path/filepath"
"strings"
"time"
"github.com/coreos/etcd/pkg/tlsutil"
)
func NewListener(addr, scheme string, tlsinfo *TLSInfo) (l net.Listener, err error) {
if l, err = newListener(addr, scheme); err != nil {
return nil, err
}
return wrapTLS(addr, scheme, tlsinfo, l)
}
func newListener(addr string, scheme string) (net.Listener, error) {
if scheme == "unix" || scheme == "unixs" {
// unix sockets via unix://laddr
return NewUnixListener(addr)
}
return net.Listen("tcp", addr)
}
func wrapTLS(addr, scheme string, tlsinfo *TLSInfo, l net.Listener) (net.Listener, error) {
if scheme != "https" && scheme != "unixs" {
return l, nil
}
return newTLSListener(l, tlsinfo, checkSAN)
}
type TLSInfo struct {
CertFile string
KeyFile string
CAFile string // TODO: deprecate this in v4
TrustedCAFile string
ClientCertAuth bool
CRLFile string
InsecureSkipVerify bool
// ServerName ensures the cert matches the given host in case of discovery / virtual hosting
ServerName string
// HandshakeFailure is optionally called when a connection fails to handshake. The
// connection will be closed immediately afterwards.
HandshakeFailure func(*tls.Conn, error)
selfCert bool
// parseFunc exists to simplify testing. Typically, parseFunc
// should be left nil. In that case, tls.X509KeyPair will be used.
parseFunc func([]byte, []byte) (tls.Certificate, error)
// AllowedCN is a CN which must be provided by a client.
AllowedCN string
}
func (info TLSInfo) String() string {
return fmt.Sprintf("cert = %s, key = %s, ca = %s, trusted-ca = %s, client-cert-auth = %v, crl-file = %s", info.CertFile, info.KeyFile, info.CAFile, info.TrustedCAFile, info.ClientCertAuth, info.CRLFile)
}
func (info TLSInfo) Empty() bool {
return info.CertFile == "" && info.KeyFile == ""
}
func SelfCert(dirpath string, hosts []string) (info TLSInfo, err error) {
if err = os.MkdirAll(dirpath, 0700); err != nil {
return
}
certPath := filepath.Join(dirpath, "cert.pem")
keyPath := filepath.Join(dirpath, "key.pem")
_, errcert := os.Stat(certPath)
_, errkey := os.Stat(keyPath)
if errcert == nil && errkey == nil {
info.CertFile = certPath
info.KeyFile = keyPath
info.selfCert = true
return
}
serialNumberLimit := new(big.Int).Lsh(big.NewInt(1), 128)
serialNumber, err := rand.Int(rand.Reader, serialNumberLimit)
if err != nil {
return
}
tmpl := x509.Certificate{
SerialNumber: serialNumber,
Subject: pkix.Name{Organization: []string{"etcd"}},
NotBefore: time.Now(),
NotAfter: time.Now().Add(365 * (24 * time.Hour)),
KeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature,
ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth},
BasicConstraintsValid: true,
}
for _, host := range hosts {
h, _, _ := net.SplitHostPort(host)
if ip := net.ParseIP(h); ip != nil {
tmpl.IPAddresses = append(tmpl.IPAddresses, ip)
} else {
tmpl.DNSNames = append(tmpl.DNSNames, h)
}
}
priv, err := ecdsa.GenerateKey(elliptic.P521(), rand.Reader)
if err != nil {
return
}
derBytes, err := x509.CreateCertificate(rand.Reader, &tmpl, &tmpl, &priv.PublicKey, priv)
if err != nil {
return
}
certOut, err := os.Create(certPath)
if err != nil {
return
}
pem.Encode(certOut, &pem.Block{Type: "CERTIFICATE", Bytes: derBytes})
certOut.Close()
b, err := x509.MarshalECPrivateKey(priv)
if err != nil {
return
}
keyOut, err := os.OpenFile(keyPath, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0600)
if err != nil {
return
}
pem.Encode(keyOut, &pem.Block{Type: "EC PRIVATE KEY", Bytes: b})
keyOut.Close()
return SelfCert(dirpath, hosts)
}
func (info TLSInfo) baseConfig() (*tls.Config, error) {
if info.KeyFile == "" || info.CertFile == "" {
return nil, fmt.Errorf("KeyFile and CertFile must both be present[key: %v, cert: %v]", info.KeyFile, info.CertFile)
}
tlsCert, err := tlsutil.NewCert(info.CertFile, info.KeyFile, info.parseFunc)
if err != nil {
return nil, err
}
cfg := &tls.Config{
Certificates: []tls.Certificate{*tlsCert},
MinVersion: tls.VersionTLS12,
ServerName: info.ServerName,
}
if info.AllowedCN != "" {
cfg.VerifyPeerCertificate = func(rawCerts [][]byte, verifiedChains [][]*x509.Certificate) error {
for _, chains := range verifiedChains {
if len(chains) != 0 {
if info.AllowedCN == chains[0].Subject.CommonName {
return nil
}
}
}
return errors.New("CommonName authentication failed")
}
}
// this only reloads certs when there's a client request
// TODO: support server-side refresh (e.g. inotify, SIGHUP), caching
cfg.GetCertificate = func(clientHello *tls.ClientHelloInfo) (*tls.Certificate, error) {
return tlsutil.NewCert(info.CertFile, info.KeyFile, info.parseFunc)
}
cfg.GetClientCertificate = func(unused *tls.CertificateRequestInfo) (*tls.Certificate, error) {
return tlsutil.NewCert(info.CertFile, info.KeyFile, info.parseFunc)
}
return cfg, nil
}
// cafiles returns a list of CA file paths.
func (info TLSInfo) cafiles() []string {
cs := make([]string, 0)
if info.CAFile != "" {
cs = append(cs, info.CAFile)
}
if info.TrustedCAFile != "" {
cs = append(cs, info.TrustedCAFile)
}
return cs
}
// ServerConfig generates a tls.Config object for use by an HTTP server.
func (info TLSInfo) ServerConfig() (*tls.Config, error) {
cfg, err := info.baseConfig()
if err != nil {
return nil, err
}
cfg.ClientAuth = tls.NoClientCert
if info.CAFile != "" || info.ClientCertAuth {
cfg.ClientAuth = tls.RequireAndVerifyClientCert
}
CAFiles := info.cafiles()
if len(CAFiles) > 0 {
cp, err := tlsutil.NewCertPool(CAFiles)
if err != nil {
return nil, err
}
cfg.ClientCAs = cp
}
// "h2" NextProtos is necessary for enabling HTTP2 for go's HTTP server
cfg.NextProtos = []string{"h2"}
return cfg, nil
}
// ClientConfig generates a tls.Config object for use by an HTTP client.
func (info TLSInfo) ClientConfig() (*tls.Config, error) {
var cfg *tls.Config
var err error
if !info.Empty() {
cfg, err = info.baseConfig()
if err != nil {
return nil, err
}
} else {
cfg = &tls.Config{ServerName: info.ServerName}
}
cfg.InsecureSkipVerify = info.InsecureSkipVerify
CAFiles := info.cafiles()
if len(CAFiles) > 0 {
cfg.RootCAs, err = tlsutil.NewCertPool(CAFiles)
if err != nil {
return nil, err
}
}
if info.selfCert {
cfg.InsecureSkipVerify = true
}
return cfg, nil
}
// IsClosedConnError returns true if the error is from closing listener, cmux.
// copied from golang.org/x/net/http2/http2.go
func IsClosedConnError(err error) bool {
// 'use of closed network connection' (Go <=1.8)
// 'use of closed file or network connection' (Go >1.8, internal/poll.ErrClosing)
// 'mux: listener closed' (cmux.ErrListenerClosed)
return err != nil && strings.Contains(err.Error(), "closed")
}

View File

@ -1,272 +0,0 @@
// Copyright 2017 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package transport
import (
"context"
"crypto/tls"
"crypto/x509"
"fmt"
"io/ioutil"
"net"
"strings"
"sync"
)
// tlsListener overrides a TLS listener so it will reject client
// certificates with insufficient SAN credentials or CRL revoked
// certificates.
type tlsListener struct {
net.Listener
connc chan net.Conn
donec chan struct{}
err error
handshakeFailure func(*tls.Conn, error)
check tlsCheckFunc
}
type tlsCheckFunc func(context.Context, *tls.Conn) error
// NewTLSListener handshakes TLS connections and performs optional CRL checking.
func NewTLSListener(l net.Listener, tlsinfo *TLSInfo) (net.Listener, error) {
check := func(context.Context, *tls.Conn) error { return nil }
return newTLSListener(l, tlsinfo, check)
}
func newTLSListener(l net.Listener, tlsinfo *TLSInfo, check tlsCheckFunc) (net.Listener, error) {
if tlsinfo == nil || tlsinfo.Empty() {
l.Close()
return nil, fmt.Errorf("cannot listen on TLS for %s: KeyFile and CertFile are not presented", l.Addr().String())
}
tlscfg, err := tlsinfo.ServerConfig()
if err != nil {
return nil, err
}
hf := tlsinfo.HandshakeFailure
if hf == nil {
hf = func(*tls.Conn, error) {}
}
if len(tlsinfo.CRLFile) > 0 {
prevCheck := check
check = func(ctx context.Context, tlsConn *tls.Conn) error {
if err := prevCheck(ctx, tlsConn); err != nil {
return err
}
st := tlsConn.ConnectionState()
if certs := st.PeerCertificates; len(certs) > 0 {
return checkCRL(tlsinfo.CRLFile, certs)
}
return nil
}
}
tlsl := &tlsListener{
Listener: tls.NewListener(l, tlscfg),
connc: make(chan net.Conn),
donec: make(chan struct{}),
handshakeFailure: hf,
check: check,
}
go tlsl.acceptLoop()
return tlsl, nil
}
func (l *tlsListener) Accept() (net.Conn, error) {
select {
case conn := <-l.connc:
return conn, nil
case <-l.donec:
return nil, l.err
}
}
func checkSAN(ctx context.Context, tlsConn *tls.Conn) error {
st := tlsConn.ConnectionState()
if certs := st.PeerCertificates; len(certs) > 0 {
addr := tlsConn.RemoteAddr().String()
return checkCertSAN(ctx, certs[0], addr)
}
return nil
}
// acceptLoop launches each TLS handshake in a separate goroutine
// to prevent a hanging TLS connection from blocking other connections.
func (l *tlsListener) acceptLoop() {
var wg sync.WaitGroup
var pendingMu sync.Mutex
pending := make(map[net.Conn]struct{})
ctx, cancel := context.WithCancel(context.Background())
defer func() {
cancel()
pendingMu.Lock()
for c := range pending {
c.Close()
}
pendingMu.Unlock()
wg.Wait()
close(l.donec)
}()
for {
conn, err := l.Listener.Accept()
if err != nil {
l.err = err
return
}
pendingMu.Lock()
pending[conn] = struct{}{}
pendingMu.Unlock()
wg.Add(1)
go func() {
defer func() {
if conn != nil {
conn.Close()
}
wg.Done()
}()
tlsConn := conn.(*tls.Conn)
herr := tlsConn.Handshake()
pendingMu.Lock()
delete(pending, conn)
pendingMu.Unlock()
if herr != nil {
l.handshakeFailure(tlsConn, herr)
return
}
if err := l.check(ctx, tlsConn); err != nil {
l.handshakeFailure(tlsConn, err)
return
}
select {
case l.connc <- tlsConn:
conn = nil
case <-ctx.Done():
}
}()
}
}
func checkCRL(crlPath string, cert []*x509.Certificate) error {
// TODO: cache
crlBytes, err := ioutil.ReadFile(crlPath)
if err != nil {
return err
}
certList, err := x509.ParseCRL(crlBytes)
if err != nil {
return err
}
revokedSerials := make(map[string]struct{})
for _, rc := range certList.TBSCertList.RevokedCertificates {
revokedSerials[string(rc.SerialNumber.Bytes())] = struct{}{}
}
for _, c := range cert {
serial := string(c.SerialNumber.Bytes())
if _, ok := revokedSerials[serial]; ok {
return fmt.Errorf("transport: certificate serial %x revoked", serial)
}
}
return nil
}
func checkCertSAN(ctx context.Context, cert *x509.Certificate, remoteAddr string) error {
if len(cert.IPAddresses) == 0 && len(cert.DNSNames) == 0 {
return nil
}
h, _, herr := net.SplitHostPort(remoteAddr)
if herr != nil {
return herr
}
if len(cert.IPAddresses) > 0 {
cerr := cert.VerifyHostname(h)
if cerr == nil {
return nil
}
if len(cert.DNSNames) == 0 {
return cerr
}
}
if len(cert.DNSNames) > 0 {
ok, err := isHostInDNS(ctx, h, cert.DNSNames)
if ok {
return nil
}
errStr := ""
if err != nil {
errStr = " (" + err.Error() + ")"
}
return fmt.Errorf("tls: %q does not match any of DNSNames %q"+errStr, h, cert.DNSNames)
}
return nil
}
func isHostInDNS(ctx context.Context, host string, dnsNames []string) (ok bool, err error) {
// reverse lookup
wildcards, names := []string{}, []string{}
for _, dns := range dnsNames {
if strings.HasPrefix(dns, "*.") {
wildcards = append(wildcards, dns[1:])
} else {
names = append(names, dns)
}
}
lnames, lerr := net.DefaultResolver.LookupAddr(ctx, host)
for _, name := range lnames {
// strip trailing '.' from PTR record
if name[len(name)-1] == '.' {
name = name[:len(name)-1]
}
for _, wc := range wildcards {
if strings.HasSuffix(name, wc) {
return true, nil
}
}
for _, n := range names {
if n == name {
return true, nil
}
}
}
err = lerr
// forward lookup
for _, dns := range names {
addrs, lerr := net.DefaultResolver.LookupHost(ctx, dns)
if lerr != nil {
err = lerr
continue
}
for _, addr := range addrs {
if addr == host {
return true, nil
}
}
}
return false, err
}
func (l *tlsListener) Close() error {
err := l.Listener.Close()
<-l.donec
return err
}

View File

@ -1,44 +0,0 @@
// Copyright 2015 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package transport
import (
"net"
"time"
)
type timeoutConn struct {
net.Conn
wtimeoutd time.Duration
rdtimeoutd time.Duration
}
func (c timeoutConn) Write(b []byte) (n int, err error) {
if c.wtimeoutd > 0 {
if err := c.SetWriteDeadline(time.Now().Add(c.wtimeoutd)); err != nil {
return 0, err
}
}
return c.Conn.Write(b)
}
func (c timeoutConn) Read(b []byte) (n int, err error) {
if c.rdtimeoutd > 0 {
if err := c.SetReadDeadline(time.Now().Add(c.rdtimeoutd)); err != nil {
return 0, err
}
}
return c.Conn.Read(b)
}

View File

@ -1,36 +0,0 @@
// Copyright 2015 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package transport
import (
"net"
"time"
)
type rwTimeoutDialer struct {
wtimeoutd time.Duration
rdtimeoutd time.Duration
net.Dialer
}
func (d *rwTimeoutDialer) Dial(network, address string) (net.Conn, error) {
conn, err := d.Dialer.Dial(network, address)
tconn := &timeoutConn{
rdtimeoutd: d.rdtimeoutd,
wtimeoutd: d.wtimeoutd,
Conn: conn,
}
return tconn, err
}

View File

@ -1,57 +0,0 @@
// Copyright 2015 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package transport
import (
"net"
"time"
)
// NewTimeoutListener returns a listener that listens on the given address.
// If read/write on the accepted connection blocks longer than its time limit,
// it will return timeout error.
func NewTimeoutListener(addr string, scheme string, tlsinfo *TLSInfo, rdtimeoutd, wtimeoutd time.Duration) (net.Listener, error) {
ln, err := newListener(addr, scheme)
if err != nil {
return nil, err
}
ln = &rwTimeoutListener{
Listener: ln,
rdtimeoutd: rdtimeoutd,
wtimeoutd: wtimeoutd,
}
if ln, err = wrapTLS(addr, scheme, tlsinfo, ln); err != nil {
return nil, err
}
return ln, nil
}
type rwTimeoutListener struct {
net.Listener
wtimeoutd time.Duration
rdtimeoutd time.Duration
}
func (rwln *rwTimeoutListener) Accept() (net.Conn, error) {
c, err := rwln.Listener.Accept()
if err != nil {
return nil, err
}
return timeoutConn{
Conn: c,
wtimeoutd: rwln.wtimeoutd,
rdtimeoutd: rwln.rdtimeoutd,
}, nil
}

View File

@ -1,51 +0,0 @@
// Copyright 2015 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package transport
import (
"net"
"net/http"
"time"
)
// NewTimeoutTransport returns a transport created using the given TLS info.
// If read/write on the created connection blocks longer than its time limit,
// it will return timeout error.
// If read/write timeout is set, transport will not be able to reuse connection.
func NewTimeoutTransport(info TLSInfo, dialtimeoutd, rdtimeoutd, wtimeoutd time.Duration) (*http.Transport, error) {
tr, err := NewTransport(info, dialtimeoutd)
if err != nil {
return nil, err
}
if rdtimeoutd != 0 || wtimeoutd != 0 {
// the timed out connection will timeout soon after it is idle.
// it should not be put back to http transport as an idle connection for future usage.
tr.MaxIdleConnsPerHost = -1
} else {
// allow more idle connections between peers to avoid unnecessary port allocation.
tr.MaxIdleConnsPerHost = 1024
}
tr.Dial = (&rwTimeoutDialer{
Dialer: net.Dialer{
Timeout: dialtimeoutd,
KeepAlive: 30 * time.Second,
},
rdtimeoutd: rdtimeoutd,
wtimeoutd: wtimeoutd,
}).Dial
return tr, nil
}

View File

@ -1,49 +0,0 @@
// Copyright 2016 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package transport
import (
"fmt"
"strings"
"time"
)
// ValidateSecureEndpoints scans the given endpoints against tls info, returning only those
// endpoints that could be validated as secure.
func ValidateSecureEndpoints(tlsInfo TLSInfo, eps []string) ([]string, error) {
t, err := NewTransport(tlsInfo, 5*time.Second)
if err != nil {
return nil, err
}
var errs []string
var endpoints []string
for _, ep := range eps {
if !strings.HasPrefix(ep, "https://") {
errs = append(errs, fmt.Sprintf("%q is insecure", ep))
continue
}
conn, cerr := t.Dial("tcp", ep[len("https://"):])
if cerr != nil {
errs = append(errs, fmt.Sprintf("%q failed to dial (%v)", ep, cerr))
continue
}
conn.Close()
endpoints = append(endpoints, ep)
}
if len(errs) != 0 {
err = fmt.Errorf("%s", strings.Join(errs, ","))
}
return endpoints, err
}

View File

@ -1,71 +0,0 @@
// Copyright 2016 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package transport
import (
"net"
"net/http"
"strings"
"time"
)
type unixTransport struct{ *http.Transport }
func NewTransport(info TLSInfo, dialtimeoutd time.Duration) (*http.Transport, error) {
cfg, err := info.ClientConfig()
if err != nil {
return nil, err
}
t := &http.Transport{
Proxy: http.ProxyFromEnvironment,
Dial: (&net.Dialer{
Timeout: dialtimeoutd,
// value taken from http.DefaultTransport
KeepAlive: 30 * time.Second,
}).Dial,
// value taken from http.DefaultTransport
TLSHandshakeTimeout: 10 * time.Second,
TLSClientConfig: cfg,
}
dialer := (&net.Dialer{
Timeout: dialtimeoutd,
KeepAlive: 30 * time.Second,
})
dial := func(net, addr string) (net.Conn, error) {
return dialer.Dial("unix", addr)
}
tu := &http.Transport{
Proxy: http.ProxyFromEnvironment,
Dial: dial,
TLSHandshakeTimeout: 10 * time.Second,
TLSClientConfig: cfg,
}
ut := &unixTransport{tu}
t.RegisterProtocol("unix", ut)
t.RegisterProtocol("unixs", ut)
return t, nil
}
func (urt *unixTransport) RoundTrip(req *http.Request) (*http.Response, error) {
url := *req.URL
req.URL = &url
req.URL.Scheme = strings.Replace(req.URL.Scheme, "unix", "http", 1)
return urt.Transport.RoundTrip(req)
}

View File

@ -1,40 +0,0 @@
// Copyright 2016 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package transport
import (
"net"
"os"
)
type unixListener struct{ net.Listener }
func NewUnixListener(addr string) (net.Listener, error) {
if err := os.Remove(addr); err != nil && !os.IsNotExist(err) {
return nil, err
}
l, err := net.Listen("unix", addr)
if err != nil {
return nil, err
}
return &unixListener{l}, nil
}
func (ul *unixListener) Close() error {
if err := os.Remove(ul.Addr().String()); err != nil && !os.IsNotExist(err) {
return err
}
return ul.Listener.Close()
}

View File

@ -63,8 +63,8 @@ type GlobalRoleBinding struct {
metav1.TypeMeta `json:",inline"` metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty"` metav1.ObjectMeta `json:"metadata,omitempty"`
Subject rbacv1.Subject `json:"subject,omitempty"` UserName string `json:"userName,omitempty" norman:"required,type=reference[user]"`
GlobalRoleName string `json:"globalRoleName,omitempty" norman:"type=reference[globalRole]"` GlobalRoleName string `json:"globalRoleName,omitempty" norman:"required,type=reference[globalRole]"`
} }
type RoleTemplate struct { type RoleTemplate struct {
@ -94,10 +94,9 @@ type ProjectRoleTemplateBinding struct {
metav1.TypeMeta `json:",inline"` metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty"` metav1.ObjectMeta `json:"metadata,omitempty"`
Subject rbacv1.Subject `json:"subject,omitempty"` UserName string `json:"userName,omitempty" norman:"required,type=reference[user]"`
ProjectName string `json:"projectName,omitempty" norman:"required,type=reference[project]"`
ProjectName string `json:"projectName,omitempty" norman:"type=reference[project]"` RoleTemplateName string `json:"roleTemplateName,omitempty" norman:"required,type=reference[roleTemplate]"`
RoleTemplateName string `json:"roleTemplateName,omitempty" norman:"type=reference[roleTemplate]"`
} }
type ClusterRoleTemplateBinding struct { type ClusterRoleTemplateBinding struct {
@ -105,8 +104,7 @@ type ClusterRoleTemplateBinding struct {
metav1.TypeMeta `json:",inline"` metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty"` metav1.ObjectMeta `json:"metadata,omitempty"`
Subject rbacv1.Subject `json:"subject,omitempty"` UserName string `json:"userName,omitempty" norman:"required,type=reference[user]"`
ClusterName string `json:"clusterName,omitempty" norman:"required,type=reference[cluster]"`
ClusterName string `json:"clusterName,omitempty" norman:"type=reference[cluster]"` RoleTemplateName string `json:"roleTemplateName,omitempty" norman:"required,type=reference[roleTemplate]"`
RoleTemplateName string `json:"roleTemplateName,omitempty" norman:"type=reference[roleTemplate]"`
} }

View File

@ -17,6 +17,7 @@ const (
ClusterConditionProvisioned condition.Cond = "Provisioned" ClusterConditionProvisioned condition.Cond = "Provisioned"
ClusterConditionUpdated condition.Cond = "Updated" ClusterConditionUpdated condition.Cond = "Updated"
ClusterConditionRemoved condition.Cond = "Removed" ClusterConditionRemoved condition.Cond = "Removed"
ClusterConditionRegistered condition.Cond = "Registered"
// ClusterConditionNoDiskPressure true when all cluster nodes have sufficient disk // ClusterConditionNoDiskPressure true when all cluster nodes have sufficient disk
ClusterConditionNoDiskPressure condition.Cond = "NoDiskPressure" ClusterConditionNoDiskPressure condition.Cond = "NoDiskPressure"
// ClusterConditionNoMemoryPressure true when all cluster nodes have sufficient memory // ClusterConditionNoMemoryPressure true when all cluster nodes have sufficient memory
@ -46,8 +47,8 @@ type ClusterSpec struct {
DisplayName string `json:"displayName"` DisplayName string `json:"displayName"`
Description string `json:"description"` Description string `json:"description"`
Internal bool `json:"internal" norman:"nocreate,noupdate"` Internal bool `json:"internal" norman:"nocreate,noupdate"`
Embedded bool `json:"embedded"` ImportedConfig *ImportedConfig `json:"importedConfig" norman:"noupdate"`
EmbeddedConfig *K8sServerConfig `json:"embeddedConfig"` EmbeddedConfig *K8sServerConfig `json:"embeddedConfig" norman:"noupdate"`
GoogleKubernetesEngineConfig *GoogleKubernetesEngineConfig `json:"googleKubernetesEngineConfig,omitempty"` GoogleKubernetesEngineConfig *GoogleKubernetesEngineConfig `json:"googleKubernetesEngineConfig,omitempty"`
AzureKubernetesServiceConfig *AzureKubernetesServiceConfig `json:"azureKubernetesServiceConfig,omitempty"` AzureKubernetesServiceConfig *AzureKubernetesServiceConfig `json:"azureKubernetesServiceConfig,omitempty"`
RancherKubernetesEngineConfig *RancherKubernetesEngineConfig `json:"rancherKubernetesEngineConfig,omitempty"` RancherKubernetesEngineConfig *RancherKubernetesEngineConfig `json:"rancherKubernetesEngineConfig,omitempty"`
@ -55,6 +56,10 @@ type ClusterSpec struct {
DefaultClusterRoleForProjectMembers string `json:"defaultClusterRoleForProjectMembers,omitempty" norman:"type=reference[roleTemplate]"` DefaultClusterRoleForProjectMembers string `json:"defaultClusterRoleForProjectMembers,omitempty" norman:"type=reference[roleTemplate]"`
} }
type ImportedConfig struct {
KubeConfig string `json:"kubeConfig"`
}
type K8sServerConfig struct { type K8sServerConfig struct {
AdmissionControllers []string `json:"admissionControllers,omitempty"` AdmissionControllers []string `json:"admissionControllers,omitempty"`
ServiceNetCIDR string `json:"serviceNetCidr,omitempty"` ServiceNetCIDR string `json:"serviceNetCidr,omitempty"`
@ -171,7 +176,7 @@ type ClusterRegistrationToken struct {
} }
type ClusterRegistrationTokenSpec struct { type ClusterRegistrationTokenSpec struct {
ClusterName string `json:"clusterName" norman:"type=reference[cluster]"` ClusterName string `json:"clusterName" norman:"required,type=reference[cluster]"`
} }
type ClusterRegistrationTokenStatus struct { type ClusterRegistrationTokenStatus struct {

View File

@ -0,0 +1,43 @@
package v3
import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
type Setting struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty"`
Value string `json:"value" norman:"required"`
Default string `json:"default" norman:"nocreate,noupdate"`
Customized bool `json:"customized" norman:"nocreate,noupdate"`
}
type ListenConfig struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty"`
DisplayName string `json:"displayName,omitempty"`
Description string `json:"description,omitempty"`
Mode string `json:"mode,omitempty" norman:"type=enum,options=https|http|acme"`
CACerts string `json:"caCerts,omitempty"`
CACert string `json:"caCert,omitempty"`
CAKey string `json:"caKey,omitempty"`
Cert string `json:"cert,omitempty"`
Key string `json:"key,omitempty" norman:"writeOnly"`
Domains []string `json:"domains,omitempty"`
TOS []string `json:"tos,omitempty" norman:"default=auto"`
KnownIPs []string `json:"knownIps" norman:"nocreate,noupdate"`
Enabled bool `json:"enabled,omitempty" norman:"default=true"`
CertFingerprint string `json:"certFingerprint,omitempty" norman:"nocreate,noupdate"`
CN string `json:"cn,omitempty" norman:"nocreate,noupdate"`
Version int `json:"version,omitempty" norman:"nocreate,noupdate"`
ExpiresAt string `json:"expiresAt,omitempty" norman:"nocreate,noupdate"`
Issuer string `json:"issuer,omitempty" norman:"nocreate,noupdate"`
IssuedAt string `json:"issuedAt,omitempty" norman:"nocreate,noupdate"`
Algorithm string `json:"algorithm,omitempty" norman:"nocreate,noupdate"`
SerialNumber string `json:"serialNumber,omitempty" norman:"nocreate,noupdate"`
KeySize int `json:"keySize,omitempty" norman:"nocreate,noupdate"`
SubjectAlternativeNames []string `json:"subjectAlternativeNames,omitempty" norman:"nocreate,noupdate"`
}

View File

@ -0,0 +1,133 @@
package v3
import (
"github.com/rancher/norman/condition"
"github.com/rancher/norman/types"
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
type ClusterLogging struct {
types.Namespaced
metav1.TypeMeta `json:",inline"`
// Standard objects metadata. More info:
// https://github.com/kubernetes/community/blob/master/contributors/devel/api-conventions.md#metadata
metav1.ObjectMeta `json:"metadata,omitempty"`
// Specification of the desired behavior of the the cluster. More info:
// https://github.com/kubernetes/community/blob/master/contributors/devel/api-conventions.md#spec-and-status
Spec ClusterLoggingSpec `json:"spec"`
// Most recent observed status of the cluster. More info:
// https://github.com/kubernetes/community/blob/master/contributors/devel/api-conventions.md#spec-and-status
Status LoggingStatus `json:"status"`
}
type ProjectLogging struct {
types.Namespaced
metav1.TypeMeta `json:",inline"`
// Standard objects metadata. More info:
// https://github.com/kubernetes/community/blob/master/contributors/devel/api-conventions.md#metadata
metav1.ObjectMeta `json:"metadata,omitempty"`
// Specification of the desired behavior of the the cluster. More info:
// https://github.com/kubernetes/community/blob/master/contributors/devel/api-conventions.md#spec-and-status
Spec ProjectLoggingSpec `json:"spec"`
// Most recent observed status of the cluster. More info:
// https://github.com/kubernetes/community/blob/master/contributors/devel/api-conventions.md#spec-and-status
Status LoggingStatus `json:"status"`
}
type LoggingCommonSpec struct {
DisplayName string `json:"displayName,omitempty"`
OutputFlushInterval int `json:"outputFlushInterval"`
OutputTags map[string]string `json:"outputTags"`
ElasticsearchConfig *ElasticsearchConfig `json:"elasticsearchConfig,omitempty"`
SplunkConfig *SplunkConfig `json:"splunkConfig,omitempty"`
KafkaConfig *KafkaConfig `json:"kafkaConfig,omitempty"`
SyslogConfig *SyslogConfig `json:"syslogConfig,omitempty"`
}
type ClusterLoggingSpec struct {
LoggingCommonSpec
ClusterName string `json:"clusterName" norman:"type=reference[cluster]"`
EmbeddedConfig *EmbeddedConfig `json:"embeddedConfig,omitempty"`
}
type ProjectLoggingSpec struct {
LoggingCommonSpec
ProjectName string `json:"projectName" norman:"type=reference[project]"`
}
type LoggingStatus struct {
Conditions []LoggingCondition `json:"conditions,omitempty"`
}
var (
ClusterLoggingConditionInitialized condition.Cond = "Initialized"
ClusterLoggingConditionProvisioned condition.Cond = "Provisioned"
)
type LoggingCondition struct {
// Type of cluster condition.
Type condition.Cond `json:"type"`
// Status of the condition, one of True, False, Unknown.
Status v1.ConditionStatus `json:"status"`
// The last time this condition was updated.
LastUpdateTime string `json:"lastUpdateTime,omitempty"`
// Last time the condition transitioned from one status to another.
LastTransitionTime string `json:"lastTransitionTime,omitempty"`
// The reason for the condition's last transition.
Reason string `json:"reason,omitempty"`
// Human-readable message indicating details about last transition
Message string `json:"message,omitempty"`
}
type ElasticsearchConfig struct {
Host string `json:"host,omitempty"`
Port int `json:"port,omitempty"`
IndexPrefix string `json:"indexPrefix,omitempty"`
DateFormat string `json:"dateFormat,omitempty"`
AuthUserName string `json:"authUsername,omitempty"` //secret
AuthPassword string `json:"authPassword,omitempty"` //secret
}
type SplunkConfig struct {
Host string `json:"host,omitempty"`
Port int `json:"port,omitempty"`
Protocol string `json:"protocol,omitempty"`
Source string `json:"source,omitempty"`
Token string `json:"token,omitempty"` //secret
}
type EmbeddedConfig struct {
IndexPrefix string `json:"indexPrefix,omitempty"`
DateFormat string `json:"dateFormat,omitempty"`
}
type KafkaConfig struct {
Zookeeper *Zookeeper `json:"zookeeper,omitempty"`
Broker *BrokerList `json:"broker,omitempty"`
Topic string `json:"topic,omitempty"`
DataType string `json:"dataType,omitempty"`
MaxSendRetries int `json:"maxSendRetries,omitempty"`
}
type Zookeeper struct {
Host string `json:"host,omitempty"`
Port int `json:"port,omitempty"`
}
type BrokerList struct {
BrokerList []string `json:"brokerList,omitempty"`
}
type SyslogConfig struct {
Host string `json:"host,omitempty"`
Port int `json:"port,omitempty"`
Severity string `json:"severity,omitempty"`
Program string `json:"program,omitempty"`
}

View File

@ -72,6 +72,7 @@ type MachineStatus struct {
NodeAnnotations map[string]string `json:"nodeAnnotations,omitempty"` NodeAnnotations map[string]string `json:"nodeAnnotations,omitempty"`
NodeLabels map[string]string `json:"nodeLabels,omitempty"` NodeLabels map[string]string `json:"nodeLabels,omitempty"`
Token string `json:"token"` Token string `json:"token"`
NodeTaints []v1.Taint `json:"nodeTaints,omitempty"`
} }
var ( var (
@ -102,15 +103,29 @@ type MachineConfig struct {
Annotations map[string]string `json:"annotations"` Annotations map[string]string `json:"annotations"`
} }
type CustomConfig struct {
// IP or FQDN that is fully resolvable and used for SSH communication
Address string `yaml:"address" json:"address,omitempty"`
// Optional - Internal address that will be used for components communication
InternalAddress string `yaml:"internal_address" json:"internalAddress,omitempty"`
// SSH user that will be used by RKE
User string `yaml:"user" json:"user,omitempty"`
// Optional - Docker socket on the node that will be used in tunneling
DockerSocket string `yaml:"docker_socket" json:"dockerSocket,omitempty"`
// SSH Private Key
SSHKey string `yaml:"ssh_key" json:"sshKey,omitempty"`
}
type MachineSpec struct { type MachineSpec struct {
NodeSpec v1.NodeSpec `json:"nodeSpec"` NodeSpec v1.NodeSpec `json:"nodeSpec"`
Description string `json:"description,omitempty"` CustomConfig *CustomConfig `json:"customConfig"`
DisplayName string `json:"displayName"` Description string `json:"description,omitempty"`
RequestedHostname string `json:"requestedHostname,omitempty" norman:"noupdate"` DisplayName string `json:"displayName"`
ClusterName string `json:"clusterName,omitempty" norman:"type=reference[cluster],noupdate,required"` RequestedHostname string `json:"requestedHostname,omitempty" norman:"type=dnsLabel,nullable,noupdate"`
Role []string `json:"role,omitempty" norman:"noupdate,type=array[enum],options=etcd|worker|controlplane"` ClusterName string `json:"clusterName,omitempty" norman:"type=reference[cluster],noupdate,required"`
MachineTemplateName string `json:"machineTemplateName,omitempty" norman:"type=reference[machineTemplate],noupdate"` Role []string `json:"role,omitempty" norman:"noupdate,type=array[enum],options=etcd|worker|controlplane"`
UseInternalIPAddress bool `json:"useInternalIpAddress,omitempty" norman:"default=true,noupdate"` MachineTemplateName string `json:"machineTemplateName,omitempty" norman:"type=reference[machineTemplate],noupdate"`
UseInternalIPAddress bool `json:"useInternalIpAddress,omitempty" norman:"default=true,noupdate"`
} }
type MachineCommonParams struct { type MachineCommonParams struct {

View File

@ -12,13 +12,49 @@ type RancherKubernetesEngineConfig struct {
// YAML manifest for user provided addons to be deployed on the cluster // YAML manifest for user provided addons to be deployed on the cluster
Addons string `yaml:"addons" json:"addons,omitempty"` Addons string `yaml:"addons" json:"addons,omitempty"`
// List of images used internally for proxy, cert downlaod and kubedns // List of images used internally for proxy, cert downlaod and kubedns
SystemImages map[string]string `yaml:"system_images" json:"systemImages,omitempty"` SystemImages RKESystemImages `yaml:"system_images" json:"systemImages,omitempty"`
// SSH Private Key Path // SSH Private Key Path
SSHKeyPath string `yaml:"ssh_key_path" json:"sshKeyPath,omitempty"` SSHKeyPath string `yaml:"ssh_key_path" json:"sshKeyPath,omitempty"`
// Authorization mode configuration used in the cluster // Authorization mode configuration used in the cluster
Authorization AuthzConfig `yaml:"authorization" json:"authorization,omitempty"` Authorization AuthzConfig `yaml:"authorization" json:"authorization,omitempty"`
// Enable/disable strict docker version checking // Enable/disable strict docker version checking
IgnoreDockerVersion bool `yaml:"ignore_docker_version" json:"ignoreDockerVersion"` IgnoreDockerVersion bool `yaml:"ignore_docker_version" json:"ignoreDockerVersion"`
// Kubernetes version to use (if kubernetes image is specifed, image version takes precedence)
Version string `yaml:"kubernetes_version" json:"kubernetesVersion,omitempty"`
// List of private registries and their credentials
PrivateRegistries []PrivateRegistry `yaml:"private_registries" json:"privateRegistries,omitempty"`
}
type PrivateRegistry struct {
// URL for the registry
URL string `yaml:"url" json:"url,omitempty"`
// User name for registry acces
User string `yaml:"user" json:"user,omitempty"`
// Password for registry access
Password string `yaml:"password" json:"password,omitempty"`
}
type RKESystemImages struct {
// etcd image
Etcd string `yaml:"etcd" json:"etcd,omitempty" norman:"default=quay.io/coreos/etcd:latest"`
// Alpine image
Alpine string `yaml:"alpine" json:"alpine,omitempty" norman:"default=alpine"`
// rke-nginx-proxy image
NginxProxy string `yaml:"nginx_proxy" json:"nginxProxy,omitempty" norman:"default=rancher/rke-nginx-proxy"`
// rke-cert-deployer image
CertDownloader string `yaml:"cert_downloader" json:"certDownloader,omitempty" norman:"default=rancher/rke-cert-deployer"`
// rke-service-sidekick image
KubernetesServicesSidecar string `yaml:"kubernetes_services_sidecar" json:"kubernetesServicesSidecar,omitempty" norman:"default=rancher/rke-kube-services-sidecar"`
// KubeDNS image
KubeDNS string `yaml:"kubedns" json:"kubedns,omitempty" norman:"default=gcr.io/google_containers/k8s-dns-kube-dns-amd64"`
// DNSMasq image
DNSmasq string `yaml:"dnsmasq" json:"dnsmasq,omitempty" norman:"default=gcr.io/google_containers/k8s-dns-dnsmasq-nanny-amd64"`
// KubeDNS side car image
KubeDNSSidecar string `yaml:"kubedns_sidecar" json:"kubednsSidecar,omitempty" norman:"default=gcr.io/google_containers/k8s-dns-sidecar-amd64"`
// KubeDNS autoscaler image
KubeDNSAutoscaler string `yaml:"kubedns_autoscaler" json:"kubednsAutoscaler,omitempty" norman:"default=gcr.io/google_containers/cluster-proportional-autoscaler-amd64"`
// Kubernetes image
Kubernetes string `yaml:"kubernetes" json:"kubernetes,omitempty" norman:"default=rancher/k8s"`
} }
type RKEConfigNode struct { type RKEConfigNode struct {

View File

@ -0,0 +1,252 @@
package v3
import (
"context"
"github.com/rancher/norman/clientbase"
"github.com/rancher/norman/controller"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/watch"
"k8s.io/client-go/tools/cache"
)
var (
ClusterLoggingGroupVersionKind = schema.GroupVersionKind{
Version: Version,
Group: GroupName,
Kind: "ClusterLogging",
}
ClusterLoggingResource = metav1.APIResource{
Name: "clusterloggings",
SingularName: "clusterlogging",
Namespaced: true,
Kind: ClusterLoggingGroupVersionKind.Kind,
}
)
type ClusterLoggingList struct {
metav1.TypeMeta `json:",inline"`
metav1.ListMeta `json:"metadata,omitempty"`
Items []ClusterLogging
}
type ClusterLoggingHandlerFunc func(key string, obj *ClusterLogging) error
type ClusterLoggingLister interface {
List(namespace string, selector labels.Selector) (ret []*ClusterLogging, err error)
Get(namespace, name string) (*ClusterLogging, error)
}
type ClusterLoggingController interface {
Informer() cache.SharedIndexInformer
Lister() ClusterLoggingLister
AddHandler(name string, handler ClusterLoggingHandlerFunc)
AddClusterScopedHandler(name, clusterName string, handler ClusterLoggingHandlerFunc)
Enqueue(namespace, name string)
Sync(ctx context.Context) error
Start(ctx context.Context, threadiness int) error
}
type ClusterLoggingInterface interface {
ObjectClient() *clientbase.ObjectClient
Create(*ClusterLogging) (*ClusterLogging, error)
GetNamespaced(namespace, name string, opts metav1.GetOptions) (*ClusterLogging, error)
Get(name string, opts metav1.GetOptions) (*ClusterLogging, error)
Update(*ClusterLogging) (*ClusterLogging, error)
Delete(name string, options *metav1.DeleteOptions) error
DeleteNamespaced(namespace, name string, options *metav1.DeleteOptions) error
List(opts metav1.ListOptions) (*ClusterLoggingList, error)
Watch(opts metav1.ListOptions) (watch.Interface, error)
DeleteCollection(deleteOpts *metav1.DeleteOptions, listOpts metav1.ListOptions) error
Controller() ClusterLoggingController
AddHandler(name string, sync ClusterLoggingHandlerFunc)
AddLifecycle(name string, lifecycle ClusterLoggingLifecycle)
AddClusterScopedHandler(name, clusterName string, sync ClusterLoggingHandlerFunc)
AddClusterScopedLifecycle(name, clusterName string, lifecycle ClusterLoggingLifecycle)
}
type clusterLoggingLister struct {
controller *clusterLoggingController
}
func (l *clusterLoggingLister) List(namespace string, selector labels.Selector) (ret []*ClusterLogging, err error) {
err = cache.ListAllByNamespace(l.controller.Informer().GetIndexer(), namespace, selector, func(obj interface{}) {
ret = append(ret, obj.(*ClusterLogging))
})
return
}
func (l *clusterLoggingLister) Get(namespace, name string) (*ClusterLogging, error) {
var key string
if namespace != "" {
key = namespace + "/" + name
} else {
key = name
}
obj, exists, err := l.controller.Informer().GetIndexer().GetByKey(key)
if err != nil {
return nil, err
}
if !exists {
return nil, errors.NewNotFound(schema.GroupResource{
Group: ClusterLoggingGroupVersionKind.Group,
Resource: "clusterLogging",
}, name)
}
return obj.(*ClusterLogging), nil
}
type clusterLoggingController struct {
controller.GenericController
}
func (c *clusterLoggingController) Lister() ClusterLoggingLister {
return &clusterLoggingLister{
controller: c,
}
}
func (c *clusterLoggingController) AddHandler(name string, handler ClusterLoggingHandlerFunc) {
c.GenericController.AddHandler(name, func(key string) error {
obj, exists, err := c.Informer().GetStore().GetByKey(key)
if err != nil {
return err
}
if !exists {
return handler(key, nil)
}
return handler(key, obj.(*ClusterLogging))
})
}
func (c *clusterLoggingController) AddClusterScopedHandler(name, cluster string, handler ClusterLoggingHandlerFunc) {
c.GenericController.AddHandler(name, func(key string) error {
obj, exists, err := c.Informer().GetStore().GetByKey(key)
if err != nil {
return err
}
if !exists {
return handler(key, nil)
}
if !controller.ObjectInCluster(cluster, obj) {
return nil
}
return handler(key, obj.(*ClusterLogging))
})
}
type clusterLoggingFactory struct {
}
func (c clusterLoggingFactory) Object() runtime.Object {
return &ClusterLogging{}
}
func (c clusterLoggingFactory) List() runtime.Object {
return &ClusterLoggingList{}
}
func (s *clusterLoggingClient) Controller() ClusterLoggingController {
s.client.Lock()
defer s.client.Unlock()
c, ok := s.client.clusterLoggingControllers[s.ns]
if ok {
return c
}
genericController := controller.NewGenericController(ClusterLoggingGroupVersionKind.Kind+"Controller",
s.objectClient)
c = &clusterLoggingController{
GenericController: genericController,
}
s.client.clusterLoggingControllers[s.ns] = c
s.client.starters = append(s.client.starters, c)
return c
}
type clusterLoggingClient struct {
client *Client
ns string
objectClient *clientbase.ObjectClient
controller ClusterLoggingController
}
func (s *clusterLoggingClient) ObjectClient() *clientbase.ObjectClient {
return s.objectClient
}
func (s *clusterLoggingClient) Create(o *ClusterLogging) (*ClusterLogging, error) {
obj, err := s.objectClient.Create(o)
return obj.(*ClusterLogging), err
}
func (s *clusterLoggingClient) Get(name string, opts metav1.GetOptions) (*ClusterLogging, error) {
obj, err := s.objectClient.Get(name, opts)
return obj.(*ClusterLogging), err
}
func (s *clusterLoggingClient) GetNamespaced(namespace, name string, opts metav1.GetOptions) (*ClusterLogging, error) {
obj, err := s.objectClient.GetNamespaced(namespace, name, opts)
return obj.(*ClusterLogging), err
}
func (s *clusterLoggingClient) Update(o *ClusterLogging) (*ClusterLogging, error) {
obj, err := s.objectClient.Update(o.Name, o)
return obj.(*ClusterLogging), err
}
func (s *clusterLoggingClient) Delete(name string, options *metav1.DeleteOptions) error {
return s.objectClient.Delete(name, options)
}
func (s *clusterLoggingClient) DeleteNamespaced(namespace, name string, options *metav1.DeleteOptions) error {
return s.objectClient.DeleteNamespaced(namespace, name, options)
}
func (s *clusterLoggingClient) List(opts metav1.ListOptions) (*ClusterLoggingList, error) {
obj, err := s.objectClient.List(opts)
return obj.(*ClusterLoggingList), err
}
func (s *clusterLoggingClient) Watch(opts metav1.ListOptions) (watch.Interface, error) {
return s.objectClient.Watch(opts)
}
// Patch applies the patch and returns the patched deployment.
func (s *clusterLoggingClient) Patch(o *ClusterLogging, data []byte, subresources ...string) (*ClusterLogging, error) {
obj, err := s.objectClient.Patch(o.Name, o, data, subresources...)
return obj.(*ClusterLogging), err
}
func (s *clusterLoggingClient) DeleteCollection(deleteOpts *metav1.DeleteOptions, listOpts metav1.ListOptions) error {
return s.objectClient.DeleteCollection(deleteOpts, listOpts)
}
func (s *clusterLoggingClient) AddHandler(name string, sync ClusterLoggingHandlerFunc) {
s.Controller().AddHandler(name, sync)
}
func (s *clusterLoggingClient) AddLifecycle(name string, lifecycle ClusterLoggingLifecycle) {
sync := NewClusterLoggingLifecycleAdapter(name, false, s, lifecycle)
s.AddHandler(name, sync)
}
func (s *clusterLoggingClient) AddClusterScopedHandler(name, clusterName string, sync ClusterLoggingHandlerFunc) {
s.Controller().AddClusterScopedHandler(name, clusterName, sync)
}
func (s *clusterLoggingClient) AddClusterScopedLifecycle(name, clusterName string, lifecycle ClusterLoggingLifecycle) {
sync := NewClusterLoggingLifecycleAdapter(name+"_"+clusterName, true, s, lifecycle)
s.AddClusterScopedHandler(name, clusterName, sync)
}

View File

@ -0,0 +1,51 @@
package v3
import (
"github.com/rancher/norman/lifecycle"
"k8s.io/apimachinery/pkg/runtime"
)
type ClusterLoggingLifecycle interface {
Create(obj *ClusterLogging) (*ClusterLogging, error)
Remove(obj *ClusterLogging) (*ClusterLogging, error)
Updated(obj *ClusterLogging) (*ClusterLogging, error)
}
type clusterLoggingLifecycleAdapter struct {
lifecycle ClusterLoggingLifecycle
}
func (w *clusterLoggingLifecycleAdapter) Create(obj runtime.Object) (runtime.Object, error) {
o, err := w.lifecycle.Create(obj.(*ClusterLogging))
if o == nil {
return nil, err
}
return o, err
}
func (w *clusterLoggingLifecycleAdapter) Finalize(obj runtime.Object) (runtime.Object, error) {
o, err := w.lifecycle.Remove(obj.(*ClusterLogging))
if o == nil {
return nil, err
}
return o, err
}
func (w *clusterLoggingLifecycleAdapter) Updated(obj runtime.Object) (runtime.Object, error) {
o, err := w.lifecycle.Updated(obj.(*ClusterLogging))
if o == nil {
return nil, err
}
return o, err
}
func NewClusterLoggingLifecycleAdapter(name string, clusterScoped bool, client ClusterLoggingInterface, l ClusterLoggingLifecycle) ClusterLoggingHandlerFunc {
adapter := &clusterLoggingLifecycleAdapter{lifecycle: l}
syncFn := lifecycle.NewObjectLifecycleAdapter(name, clusterScoped, adapter, client.ObjectClient())
return func(key string, obj *ClusterLogging) error {
if obj == nil {
return syncFn(key, nil)
}
return syncFn(key, obj)
}
}

View File

@ -39,6 +39,10 @@ func RegisterDeepCopies(scheme *runtime.Scheme) error {
in.(*BaseService).DeepCopyInto(out.(*BaseService)) in.(*BaseService).DeepCopyInto(out.(*BaseService))
return nil return nil
}, InType: reflect.TypeOf(&BaseService{})}, }, InType: reflect.TypeOf(&BaseService{})},
conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error {
in.(*BrokerList).DeepCopyInto(out.(*BrokerList))
return nil
}, InType: reflect.TypeOf(&BrokerList{})},
conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error {
in.(*Catalog).DeepCopyInto(out.(*Catalog)) in.(*Catalog).DeepCopyInto(out.(*Catalog))
return nil return nil
@ -83,6 +87,18 @@ func RegisterDeepCopies(scheme *runtime.Scheme) error {
in.(*ClusterList).DeepCopyInto(out.(*ClusterList)) in.(*ClusterList).DeepCopyInto(out.(*ClusterList))
return nil return nil
}, InType: reflect.TypeOf(&ClusterList{})}, }, InType: reflect.TypeOf(&ClusterList{})},
conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error {
in.(*ClusterLogging).DeepCopyInto(out.(*ClusterLogging))
return nil
}, InType: reflect.TypeOf(&ClusterLogging{})},
conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error {
in.(*ClusterLoggingList).DeepCopyInto(out.(*ClusterLoggingList))
return nil
}, InType: reflect.TypeOf(&ClusterLoggingList{})},
conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error {
in.(*ClusterLoggingSpec).DeepCopyInto(out.(*ClusterLoggingSpec))
return nil
}, InType: reflect.TypeOf(&ClusterLoggingSpec{})},
conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error {
in.(*ClusterRegistrationToken).DeepCopyInto(out.(*ClusterRegistrationToken)) in.(*ClusterRegistrationToken).DeepCopyInto(out.(*ClusterRegistrationToken))
return nil return nil
@ -115,6 +131,10 @@ func RegisterDeepCopies(scheme *runtime.Scheme) error {
in.(*ClusterStatus).DeepCopyInto(out.(*ClusterStatus)) in.(*ClusterStatus).DeepCopyInto(out.(*ClusterStatus))
return nil return nil
}, InType: reflect.TypeOf(&ClusterStatus{})}, }, InType: reflect.TypeOf(&ClusterStatus{})},
conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error {
in.(*CustomConfig).DeepCopyInto(out.(*CustomConfig))
return nil
}, InType: reflect.TypeOf(&CustomConfig{})},
conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error {
in.(*DynamicSchema).DeepCopyInto(out.(*DynamicSchema)) in.(*DynamicSchema).DeepCopyInto(out.(*DynamicSchema))
return nil return nil
@ -135,6 +155,14 @@ func RegisterDeepCopies(scheme *runtime.Scheme) error {
in.(*ETCDService).DeepCopyInto(out.(*ETCDService)) in.(*ETCDService).DeepCopyInto(out.(*ETCDService))
return nil return nil
}, InType: reflect.TypeOf(&ETCDService{})}, }, InType: reflect.TypeOf(&ETCDService{})},
conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error {
in.(*ElasticsearchConfig).DeepCopyInto(out.(*ElasticsearchConfig))
return nil
}, InType: reflect.TypeOf(&ElasticsearchConfig{})},
conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error {
in.(*EmbeddedConfig).DeepCopyInto(out.(*EmbeddedConfig))
return nil
}, InType: reflect.TypeOf(&EmbeddedConfig{})},
conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error {
in.(*Field).DeepCopyInto(out.(*Field)) in.(*Field).DeepCopyInto(out.(*Field))
return nil return nil
@ -187,10 +215,18 @@ func RegisterDeepCopies(scheme *runtime.Scheme) error {
in.(*GroupMemberList).DeepCopyInto(out.(*GroupMemberList)) in.(*GroupMemberList).DeepCopyInto(out.(*GroupMemberList))
return nil return nil
}, InType: reflect.TypeOf(&GroupMemberList{})}, }, InType: reflect.TypeOf(&GroupMemberList{})},
conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error {
in.(*ImportedConfig).DeepCopyInto(out.(*ImportedConfig))
return nil
}, InType: reflect.TypeOf(&ImportedConfig{})},
conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error {
in.(*K8sServerConfig).DeepCopyInto(out.(*K8sServerConfig)) in.(*K8sServerConfig).DeepCopyInto(out.(*K8sServerConfig))
return nil return nil
}, InType: reflect.TypeOf(&K8sServerConfig{})}, }, InType: reflect.TypeOf(&K8sServerConfig{})},
conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error {
in.(*KafkaConfig).DeepCopyInto(out.(*KafkaConfig))
return nil
}, InType: reflect.TypeOf(&KafkaConfig{})},
conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error {
in.(*KubeAPIService).DeepCopyInto(out.(*KubeAPIService)) in.(*KubeAPIService).DeepCopyInto(out.(*KubeAPIService))
return nil return nil
@ -211,10 +247,30 @@ func RegisterDeepCopies(scheme *runtime.Scheme) error {
in.(*ListOpts).DeepCopyInto(out.(*ListOpts)) in.(*ListOpts).DeepCopyInto(out.(*ListOpts))
return nil return nil
}, InType: reflect.TypeOf(&ListOpts{})}, }, InType: reflect.TypeOf(&ListOpts{})},
conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error {
in.(*ListenConfig).DeepCopyInto(out.(*ListenConfig))
return nil
}, InType: reflect.TypeOf(&ListenConfig{})},
conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error {
in.(*ListenConfigList).DeepCopyInto(out.(*ListenConfigList))
return nil
}, InType: reflect.TypeOf(&ListenConfigList{})},
conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error {
in.(*LocalCredential).DeepCopyInto(out.(*LocalCredential)) in.(*LocalCredential).DeepCopyInto(out.(*LocalCredential))
return nil return nil
}, InType: reflect.TypeOf(&LocalCredential{})}, }, InType: reflect.TypeOf(&LocalCredential{})},
conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error {
in.(*LoggingCommonSpec).DeepCopyInto(out.(*LoggingCommonSpec))
return nil
}, InType: reflect.TypeOf(&LoggingCommonSpec{})},
conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error {
in.(*LoggingCondition).DeepCopyInto(out.(*LoggingCondition))
return nil
}, InType: reflect.TypeOf(&LoggingCondition{})},
conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error {
in.(*LoggingStatus).DeepCopyInto(out.(*LoggingStatus))
return nil
}, InType: reflect.TypeOf(&LoggingStatus{})},
conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error {
in.(*LoginInput).DeepCopyInto(out.(*LoginInput)) in.(*LoginInput).DeepCopyInto(out.(*LoginInput))
return nil return nil
@ -315,6 +371,10 @@ func RegisterDeepCopies(scheme *runtime.Scheme) error {
in.(*PrincipalList).DeepCopyInto(out.(*PrincipalList)) in.(*PrincipalList).DeepCopyInto(out.(*PrincipalList))
return nil return nil
}, InType: reflect.TypeOf(&PrincipalList{})}, }, InType: reflect.TypeOf(&PrincipalList{})},
conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error {
in.(*PrivateRegistry).DeepCopyInto(out.(*PrivateRegistry))
return nil
}, InType: reflect.TypeOf(&PrivateRegistry{})},
conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error {
in.(*Project).DeepCopyInto(out.(*Project)) in.(*Project).DeepCopyInto(out.(*Project))
return nil return nil
@ -327,6 +387,18 @@ func RegisterDeepCopies(scheme *runtime.Scheme) error {
in.(*ProjectList).DeepCopyInto(out.(*ProjectList)) in.(*ProjectList).DeepCopyInto(out.(*ProjectList))
return nil return nil
}, InType: reflect.TypeOf(&ProjectList{})}, }, InType: reflect.TypeOf(&ProjectList{})},
conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error {
in.(*ProjectLogging).DeepCopyInto(out.(*ProjectLogging))
return nil
}, InType: reflect.TypeOf(&ProjectLogging{})},
conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error {
in.(*ProjectLoggingList).DeepCopyInto(out.(*ProjectLoggingList))
return nil
}, InType: reflect.TypeOf(&ProjectLoggingList{})},
conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error {
in.(*ProjectLoggingSpec).DeepCopyInto(out.(*ProjectLoggingSpec))
return nil
}, InType: reflect.TypeOf(&ProjectLoggingSpec{})},
conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error {
in.(*ProjectRoleTemplateBinding).DeepCopyInto(out.(*ProjectRoleTemplateBinding)) in.(*ProjectRoleTemplateBinding).DeepCopyInto(out.(*ProjectRoleTemplateBinding))
return nil return nil
@ -355,6 +427,10 @@ func RegisterDeepCopies(scheme *runtime.Scheme) error {
in.(*RKEConfigServices).DeepCopyInto(out.(*RKEConfigServices)) in.(*RKEConfigServices).DeepCopyInto(out.(*RKEConfigServices))
return nil return nil
}, InType: reflect.TypeOf(&RKEConfigServices{})}, }, InType: reflect.TypeOf(&RKEConfigServices{})},
conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error {
in.(*RKESystemImages).DeepCopyInto(out.(*RKESystemImages))
return nil
}, InType: reflect.TypeOf(&RKESystemImages{})},
conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error {
in.(*RancherKubernetesEngineConfig).DeepCopyInto(out.(*RancherKubernetesEngineConfig)) in.(*RancherKubernetesEngineConfig).DeepCopyInto(out.(*RancherKubernetesEngineConfig))
return nil return nil
@ -379,6 +455,18 @@ func RegisterDeepCopies(scheme *runtime.Scheme) error {
in.(*SetPasswordInput).DeepCopyInto(out.(*SetPasswordInput)) in.(*SetPasswordInput).DeepCopyInto(out.(*SetPasswordInput))
return nil return nil
}, InType: reflect.TypeOf(&SetPasswordInput{})}, }, InType: reflect.TypeOf(&SetPasswordInput{})},
conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error {
in.(*Setting).DeepCopyInto(out.(*Setting))
return nil
}, InType: reflect.TypeOf(&Setting{})},
conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error {
in.(*SettingList).DeepCopyInto(out.(*SettingList))
return nil
}, InType: reflect.TypeOf(&SettingList{})},
conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error {
in.(*SplunkConfig).DeepCopyInto(out.(*SplunkConfig))
return nil
}, InType: reflect.TypeOf(&SplunkConfig{})},
conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error {
in.(*Stack).DeepCopyInto(out.(*Stack)) in.(*Stack).DeepCopyInto(out.(*Stack))
return nil return nil
@ -395,6 +483,10 @@ func RegisterDeepCopies(scheme *runtime.Scheme) error {
in.(*StackStatus).DeepCopyInto(out.(*StackStatus)) in.(*StackStatus).DeepCopyInto(out.(*StackStatus))
return nil return nil
}, InType: reflect.TypeOf(&StackStatus{})}, }, InType: reflect.TypeOf(&StackStatus{})},
conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error {
in.(*SyslogConfig).DeepCopyInto(out.(*SyslogConfig))
return nil
}, InType: reflect.TypeOf(&SyslogConfig{})},
conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error {
in.(*Template).DeepCopyInto(out.(*Template)) in.(*Template).DeepCopyInto(out.(*Template))
return nil return nil
@ -447,6 +539,10 @@ func RegisterDeepCopies(scheme *runtime.Scheme) error {
in.(*Values).DeepCopyInto(out.(*Values)) in.(*Values).DeepCopyInto(out.(*Values))
return nil return nil
}, InType: reflect.TypeOf(&Values{})}, }, InType: reflect.TypeOf(&Values{})},
conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error {
in.(*Zookeeper).DeepCopyInto(out.(*Zookeeper))
return nil
}, InType: reflect.TypeOf(&Zookeeper{})},
) )
} }
@ -551,6 +647,27 @@ func (in *BaseService) DeepCopy() *BaseService {
return out return out
} }
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *BrokerList) DeepCopyInto(out *BrokerList) {
*out = *in
if in.BrokerList != nil {
in, out := &in.BrokerList, &out.BrokerList
*out = make([]string, len(*in))
copy(*out, *in)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BrokerList.
func (in *BrokerList) DeepCopy() *BrokerList {
if in == nil {
return nil
}
out := new(BrokerList)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Catalog) DeepCopyInto(out *Catalog) { func (in *Catalog) DeepCopyInto(out *Catalog) {
*out = *in *out = *in
@ -823,6 +940,96 @@ func (in *ClusterList) DeepCopyObject() runtime.Object {
} }
} }
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ClusterLogging) DeepCopyInto(out *ClusterLogging) {
*out = *in
out.Namespaced = in.Namespaced
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.Spec.DeepCopyInto(&out.Spec)
in.Status.DeepCopyInto(&out.Status)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterLogging.
func (in *ClusterLogging) DeepCopy() *ClusterLogging {
if in == nil {
return nil
}
out := new(ClusterLogging)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *ClusterLogging) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
} else {
return nil
}
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ClusterLoggingList) DeepCopyInto(out *ClusterLoggingList) {
*out = *in
out.TypeMeta = in.TypeMeta
out.ListMeta = in.ListMeta
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]ClusterLogging, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterLoggingList.
func (in *ClusterLoggingList) DeepCopy() *ClusterLoggingList {
if in == nil {
return nil
}
out := new(ClusterLoggingList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *ClusterLoggingList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
} else {
return nil
}
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ClusterLoggingSpec) DeepCopyInto(out *ClusterLoggingSpec) {
*out = *in
in.LoggingCommonSpec.DeepCopyInto(&out.LoggingCommonSpec)
if in.EmbeddedConfig != nil {
in, out := &in.EmbeddedConfig, &out.EmbeddedConfig
if *in == nil {
*out = nil
} else {
*out = new(EmbeddedConfig)
**out = **in
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterLoggingSpec.
func (in *ClusterLoggingSpec) DeepCopy() *ClusterLoggingSpec {
if in == nil {
return nil
}
out := new(ClusterLoggingSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ClusterRegistrationToken) DeepCopyInto(out *ClusterRegistrationToken) { func (in *ClusterRegistrationToken) DeepCopyInto(out *ClusterRegistrationToken) {
*out = *in *out = *in
@ -925,7 +1132,6 @@ func (in *ClusterRoleTemplateBinding) DeepCopyInto(out *ClusterRoleTemplateBindi
out.Namespaced = in.Namespaced out.Namespaced = in.Namespaced
out.TypeMeta = in.TypeMeta out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
out.Subject = in.Subject
return return
} }
@ -992,6 +1198,15 @@ func (in *ClusterSpec) DeepCopyInto(out *ClusterSpec) {
(*in)[i].DeepCopyInto(&(*out)[i]) (*in)[i].DeepCopyInto(&(*out)[i])
} }
} }
if in.ImportedConfig != nil {
in, out := &in.ImportedConfig, &out.ImportedConfig
if *in == nil {
*out = nil
} else {
*out = new(ImportedConfig)
**out = **in
}
}
if in.EmbeddedConfig != nil { if in.EmbeddedConfig != nil {
in, out := &in.EmbeddedConfig, &out.EmbeddedConfig in, out := &in.EmbeddedConfig, &out.EmbeddedConfig
if *in == nil { if *in == nil {
@ -1098,6 +1313,22 @@ func (in *ClusterStatus) DeepCopy() *ClusterStatus {
return out return out
} }
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CustomConfig) DeepCopyInto(out *CustomConfig) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomConfig.
func (in *CustomConfig) DeepCopy() *CustomConfig {
if in == nil {
return nil
}
out := new(CustomConfig)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *DynamicSchema) DeepCopyInto(out *DynamicSchema) { func (in *DynamicSchema) DeepCopyInto(out *DynamicSchema) {
*out = *in *out = *in
@ -1260,6 +1491,38 @@ func (in *ETCDService) DeepCopy() *ETCDService {
return out return out
} }
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ElasticsearchConfig) DeepCopyInto(out *ElasticsearchConfig) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ElasticsearchConfig.
func (in *ElasticsearchConfig) DeepCopy() *ElasticsearchConfig {
if in == nil {
return nil
}
out := new(ElasticsearchConfig)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *EmbeddedConfig) DeepCopyInto(out *EmbeddedConfig) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EmbeddedConfig.
func (in *EmbeddedConfig) DeepCopy() *EmbeddedConfig {
if in == nil {
return nil
}
out := new(EmbeddedConfig)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Field) DeepCopyInto(out *Field) { func (in *Field) DeepCopyInto(out *Field) {
*out = *in *out = *in
@ -1374,7 +1637,6 @@ func (in *GlobalRoleBinding) DeepCopyInto(out *GlobalRoleBinding) {
*out = *in *out = *in
out.TypeMeta = in.TypeMeta out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
out.Subject = in.Subject
return return
} }
@ -1615,6 +1877,22 @@ func (in *GroupMemberList) DeepCopyObject() runtime.Object {
} }
} }
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ImportedConfig) DeepCopyInto(out *ImportedConfig) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImportedConfig.
func (in *ImportedConfig) DeepCopy() *ImportedConfig {
if in == nil {
return nil
}
out := new(ImportedConfig)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *K8sServerConfig) DeepCopyInto(out *K8sServerConfig) { func (in *K8sServerConfig) DeepCopyInto(out *K8sServerConfig) {
*out = *in *out = *in
@ -1636,6 +1914,40 @@ func (in *K8sServerConfig) DeepCopy() *K8sServerConfig {
return out return out
} }
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *KafkaConfig) DeepCopyInto(out *KafkaConfig) {
*out = *in
if in.Zookeeper != nil {
in, out := &in.Zookeeper, &out.Zookeeper
if *in == nil {
*out = nil
} else {
*out = new(Zookeeper)
**out = **in
}
}
if in.Broker != nil {
in, out := &in.Broker, &out.Broker
if *in == nil {
*out = nil
} else {
*out = new(BrokerList)
(*in).DeepCopyInto(*out)
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KafkaConfig.
func (in *KafkaConfig) DeepCopy() *KafkaConfig {
if in == nil {
return nil
}
out := new(KafkaConfig)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *KubeAPIService) DeepCopyInto(out *KubeAPIService) { func (in *KubeAPIService) DeepCopyInto(out *KubeAPIService) {
*out = *in *out = *in
@ -1727,6 +2039,87 @@ func (in *ListOpts) DeepCopy() *ListOpts {
return out return out
} }
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ListenConfig) DeepCopyInto(out *ListenConfig) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
if in.Domains != nil {
in, out := &in.Domains, &out.Domains
*out = make([]string, len(*in))
copy(*out, *in)
}
if in.TOS != nil {
in, out := &in.TOS, &out.TOS
*out = make([]string, len(*in))
copy(*out, *in)
}
if in.KnownIPs != nil {
in, out := &in.KnownIPs, &out.KnownIPs
*out = make([]string, len(*in))
copy(*out, *in)
}
if in.SubjectAlternativeNames != nil {
in, out := &in.SubjectAlternativeNames, &out.SubjectAlternativeNames
*out = make([]string, len(*in))
copy(*out, *in)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ListenConfig.
func (in *ListenConfig) DeepCopy() *ListenConfig {
if in == nil {
return nil
}
out := new(ListenConfig)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *ListenConfig) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
} else {
return nil
}
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ListenConfigList) DeepCopyInto(out *ListenConfigList) {
*out = *in
out.TypeMeta = in.TypeMeta
out.ListMeta = in.ListMeta
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]ListenConfig, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ListenConfigList.
func (in *ListenConfigList) DeepCopy() *ListenConfigList {
if in == nil {
return nil
}
out := new(ListenConfigList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *ListenConfigList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
} else {
return nil
}
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *LocalCredential) DeepCopyInto(out *LocalCredential) { func (in *LocalCredential) DeepCopyInto(out *LocalCredential) {
*out = *in *out = *in
@ -1743,6 +2136,102 @@ func (in *LocalCredential) DeepCopy() *LocalCredential {
return out return out
} }
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *LoggingCommonSpec) DeepCopyInto(out *LoggingCommonSpec) {
*out = *in
if in.OutputTags != nil {
in, out := &in.OutputTags, &out.OutputTags
*out = make(map[string]string, len(*in))
for key, val := range *in {
(*out)[key] = val
}
}
if in.ElasticsearchConfig != nil {
in, out := &in.ElasticsearchConfig, &out.ElasticsearchConfig
if *in == nil {
*out = nil
} else {
*out = new(ElasticsearchConfig)
**out = **in
}
}
if in.SplunkConfig != nil {
in, out := &in.SplunkConfig, &out.SplunkConfig
if *in == nil {
*out = nil
} else {
*out = new(SplunkConfig)
**out = **in
}
}
if in.KafkaConfig != nil {
in, out := &in.KafkaConfig, &out.KafkaConfig
if *in == nil {
*out = nil
} else {
*out = new(KafkaConfig)
(*in).DeepCopyInto(*out)
}
}
if in.SyslogConfig != nil {
in, out := &in.SyslogConfig, &out.SyslogConfig
if *in == nil {
*out = nil
} else {
*out = new(SyslogConfig)
**out = **in
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LoggingCommonSpec.
func (in *LoggingCommonSpec) DeepCopy() *LoggingCommonSpec {
if in == nil {
return nil
}
out := new(LoggingCommonSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *LoggingCondition) DeepCopyInto(out *LoggingCondition) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LoggingCondition.
func (in *LoggingCondition) DeepCopy() *LoggingCondition {
if in == nil {
return nil
}
out := new(LoggingCondition)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *LoggingStatus) DeepCopyInto(out *LoggingStatus) {
*out = *in
if in.Conditions != nil {
in, out := &in.Conditions, &out.Conditions
*out = make([]LoggingCondition, len(*in))
copy(*out, *in)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LoggingStatus.
func (in *LoggingStatus) DeepCopy() *LoggingStatus {
if in == nil {
return nil
}
out := new(LoggingStatus)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *LoginInput) DeepCopyInto(out *LoginInput) { func (in *LoginInput) DeepCopyInto(out *LoginInput) {
*out = *in *out = *in
@ -2039,6 +2528,15 @@ func (in *MachineList) DeepCopyObject() runtime.Object {
func (in *MachineSpec) DeepCopyInto(out *MachineSpec) { func (in *MachineSpec) DeepCopyInto(out *MachineSpec) {
*out = *in *out = *in
in.NodeSpec.DeepCopyInto(&out.NodeSpec) in.NodeSpec.DeepCopyInto(&out.NodeSpec)
if in.CustomConfig != nil {
in, out := &in.CustomConfig, &out.CustomConfig
if *in == nil {
*out = nil
} else {
*out = new(CustomConfig)
**out = **in
}
}
if in.Role != nil { if in.Role != nil {
in, out := &in.Role, &out.Role in, out := &in.Role, &out.Role
*out = make([]string, len(*in)) *out = make([]string, len(*in))
@ -2112,6 +2610,13 @@ func (in *MachineStatus) DeepCopyInto(out *MachineStatus) {
(*out)[key] = val (*out)[key] = val
} }
} }
if in.NodeTaints != nil {
in, out := &in.NodeTaints, &out.NodeTaints
*out = make([]v1.Taint, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return return
} }
@ -2457,6 +2962,22 @@ func (in *PrincipalList) DeepCopyObject() runtime.Object {
} }
} }
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PrivateRegistry) DeepCopyInto(out *PrivateRegistry) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PrivateRegistry.
func (in *PrivateRegistry) DeepCopy() *PrivateRegistry {
if in == nil {
return nil
}
out := new(PrivateRegistry)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Project) DeepCopyInto(out *Project) { func (in *Project) DeepCopyInto(out *Project) {
*out = *in *out = *in
@ -2537,13 +3058,93 @@ func (in *ProjectList) DeepCopyObject() runtime.Object {
} }
} }
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ProjectLogging) DeepCopyInto(out *ProjectLogging) {
*out = *in
out.Namespaced = in.Namespaced
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.Spec.DeepCopyInto(&out.Spec)
in.Status.DeepCopyInto(&out.Status)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProjectLogging.
func (in *ProjectLogging) DeepCopy() *ProjectLogging {
if in == nil {
return nil
}
out := new(ProjectLogging)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *ProjectLogging) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
} else {
return nil
}
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ProjectLoggingList) DeepCopyInto(out *ProjectLoggingList) {
*out = *in
out.TypeMeta = in.TypeMeta
out.ListMeta = in.ListMeta
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]ProjectLogging, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProjectLoggingList.
func (in *ProjectLoggingList) DeepCopy() *ProjectLoggingList {
if in == nil {
return nil
}
out := new(ProjectLoggingList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *ProjectLoggingList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
} else {
return nil
}
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ProjectLoggingSpec) DeepCopyInto(out *ProjectLoggingSpec) {
*out = *in
in.LoggingCommonSpec.DeepCopyInto(&out.LoggingCommonSpec)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProjectLoggingSpec.
func (in *ProjectLoggingSpec) DeepCopy() *ProjectLoggingSpec {
if in == nil {
return nil
}
out := new(ProjectLoggingSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ProjectRoleTemplateBinding) DeepCopyInto(out *ProjectRoleTemplateBinding) { func (in *ProjectRoleTemplateBinding) DeepCopyInto(out *ProjectRoleTemplateBinding) {
*out = *in *out = *in
out.Namespaced = in.Namespaced out.Namespaced = in.Namespaced
out.TypeMeta = in.TypeMeta out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
out.Subject = in.Subject
return return
} }
@ -2701,6 +3302,22 @@ func (in *RKEConfigServices) DeepCopy() *RKEConfigServices {
return out return out
} }
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *RKESystemImages) DeepCopyInto(out *RKESystemImages) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RKESystemImages.
func (in *RKESystemImages) DeepCopy() *RKESystemImages {
if in == nil {
return nil
}
out := new(RKESystemImages)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *RancherKubernetesEngineConfig) DeepCopyInto(out *RancherKubernetesEngineConfig) { func (in *RancherKubernetesEngineConfig) DeepCopyInto(out *RancherKubernetesEngineConfig) {
*out = *in *out = *in
@ -2714,14 +3331,13 @@ func (in *RancherKubernetesEngineConfig) DeepCopyInto(out *RancherKubernetesEngi
in.Services.DeepCopyInto(&out.Services) in.Services.DeepCopyInto(&out.Services)
in.Network.DeepCopyInto(&out.Network) in.Network.DeepCopyInto(&out.Network)
in.Authentication.DeepCopyInto(&out.Authentication) in.Authentication.DeepCopyInto(&out.Authentication)
if in.SystemImages != nil { out.SystemImages = in.SystemImages
in, out := &in.SystemImages, &out.SystemImages
*out = make(map[string]string, len(*in))
for key, val := range *in {
(*out)[key] = val
}
}
in.Authorization.DeepCopyInto(&out.Authorization) in.Authorization.DeepCopyInto(&out.Authorization)
if in.PrivateRegistries != nil {
in, out := &in.PrivateRegistries, &out.PrivateRegistries
*out = make([]PrivateRegistry, len(*in))
copy(*out, *in)
}
return return
} }
@ -2857,6 +3473,83 @@ func (in *SetPasswordInput) DeepCopy() *SetPasswordInput {
return out return out
} }
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Setting) DeepCopyInto(out *Setting) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Setting.
func (in *Setting) DeepCopy() *Setting {
if in == nil {
return nil
}
out := new(Setting)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *Setting) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
} else {
return nil
}
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *SettingList) DeepCopyInto(out *SettingList) {
*out = *in
out.TypeMeta = in.TypeMeta
out.ListMeta = in.ListMeta
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]Setting, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SettingList.
func (in *SettingList) DeepCopy() *SettingList {
if in == nil {
return nil
}
out := new(SettingList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *SettingList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
} else {
return nil
}
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *SplunkConfig) DeepCopyInto(out *SplunkConfig) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SplunkConfig.
func (in *SplunkConfig) DeepCopy() *SplunkConfig {
if in == nil {
return nil
}
out := new(SplunkConfig)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Stack) DeepCopyInto(out *Stack) { func (in *Stack) DeepCopyInto(out *Stack) {
*out = *in *out = *in
@ -2984,6 +3677,22 @@ func (in *StackStatus) DeepCopy() *StackStatus {
return out return out
} }
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *SyslogConfig) DeepCopyInto(out *SyslogConfig) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SyslogConfig.
func (in *SyslogConfig) DeepCopy() *SyslogConfig {
if in == nil {
return nil
}
out := new(SyslogConfig)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Template) DeepCopyInto(out *Template) { func (in *Template) DeepCopyInto(out *Template) {
*out = *in *out = *in
@ -3376,3 +4085,19 @@ func (in *Values) DeepCopy() *Values {
in.DeepCopyInto(out) in.DeepCopyInto(out)
return out return out
} }
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Zookeeper) DeepCopyInto(out *Zookeeper) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Zookeeper.
func (in *Zookeeper) DeepCopy() *Zookeeper {
if in == nil {
return nil
}
out := new(Zookeeper)
in.DeepCopyInto(out)
return out
}

View File

@ -38,6 +38,10 @@ type Interface interface {
DynamicSchemasGetter DynamicSchemasGetter
StacksGetter StacksGetter
PreferencesGetter PreferencesGetter
ClusterLoggingsGetter
ProjectLoggingsGetter
ListenConfigsGetter
SettingsGetter
} }
type Client struct { type Client struct {
@ -69,6 +73,10 @@ type Client struct {
dynamicSchemaControllers map[string]DynamicSchemaController dynamicSchemaControllers map[string]DynamicSchemaController
stackControllers map[string]StackController stackControllers map[string]StackController
preferenceControllers map[string]PreferenceController preferenceControllers map[string]PreferenceController
clusterLoggingControllers map[string]ClusterLoggingController
projectLoggingControllers map[string]ProjectLoggingController
listenConfigControllers map[string]ListenConfigController
settingControllers map[string]SettingController
} }
func NewForConfig(config rest.Config) (Interface, error) { func NewForConfig(config rest.Config) (Interface, error) {
@ -109,6 +117,10 @@ func NewForConfig(config rest.Config) (Interface, error) {
dynamicSchemaControllers: map[string]DynamicSchemaController{}, dynamicSchemaControllers: map[string]DynamicSchemaController{},
stackControllers: map[string]StackController{}, stackControllers: map[string]StackController{},
preferenceControllers: map[string]PreferenceController{}, preferenceControllers: map[string]PreferenceController{},
clusterLoggingControllers: map[string]ClusterLoggingController{},
projectLoggingControllers: map[string]ProjectLoggingController{},
listenConfigControllers: map[string]ListenConfigController{},
settingControllers: map[string]SettingController{},
}, nil }, nil
} }
@ -435,3 +447,55 @@ func (c *Client) Preferences(namespace string) PreferenceInterface {
objectClient: objectClient, objectClient: objectClient,
} }
} }
type ClusterLoggingsGetter interface {
ClusterLoggings(namespace string) ClusterLoggingInterface
}
func (c *Client) ClusterLoggings(namespace string) ClusterLoggingInterface {
objectClient := clientbase.NewObjectClient(namespace, c.restClient, &ClusterLoggingResource, ClusterLoggingGroupVersionKind, clusterLoggingFactory{})
return &clusterLoggingClient{
ns: namespace,
client: c,
objectClient: objectClient,
}
}
type ProjectLoggingsGetter interface {
ProjectLoggings(namespace string) ProjectLoggingInterface
}
func (c *Client) ProjectLoggings(namespace string) ProjectLoggingInterface {
objectClient := clientbase.NewObjectClient(namespace, c.restClient, &ProjectLoggingResource, ProjectLoggingGroupVersionKind, projectLoggingFactory{})
return &projectLoggingClient{
ns: namespace,
client: c,
objectClient: objectClient,
}
}
type ListenConfigsGetter interface {
ListenConfigs(namespace string) ListenConfigInterface
}
func (c *Client) ListenConfigs(namespace string) ListenConfigInterface {
objectClient := clientbase.NewObjectClient(namespace, c.restClient, &ListenConfigResource, ListenConfigGroupVersionKind, listenConfigFactory{})
return &listenConfigClient{
ns: namespace,
client: c,
objectClient: objectClient,
}
}
type SettingsGetter interface {
Settings(namespace string) SettingInterface
}
func (c *Client) Settings(namespace string) SettingInterface {
objectClient := clientbase.NewObjectClient(namespace, c.restClient, &SettingResource, SettingGroupVersionKind, settingFactory{})
return &settingClient{
ns: namespace,
client: c,
objectClient: objectClient,
}
}

View File

@ -0,0 +1,251 @@
package v3
import (
"context"
"github.com/rancher/norman/clientbase"
"github.com/rancher/norman/controller"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/watch"
"k8s.io/client-go/tools/cache"
)
var (
ListenConfigGroupVersionKind = schema.GroupVersionKind{
Version: Version,
Group: GroupName,
Kind: "ListenConfig",
}
ListenConfigResource = metav1.APIResource{
Name: "listenconfigs",
SingularName: "listenconfig",
Namespaced: false,
Kind: ListenConfigGroupVersionKind.Kind,
}
)
type ListenConfigList struct {
metav1.TypeMeta `json:",inline"`
metav1.ListMeta `json:"metadata,omitempty"`
Items []ListenConfig
}
type ListenConfigHandlerFunc func(key string, obj *ListenConfig) error
type ListenConfigLister interface {
List(namespace string, selector labels.Selector) (ret []*ListenConfig, err error)
Get(namespace, name string) (*ListenConfig, error)
}
type ListenConfigController interface {
Informer() cache.SharedIndexInformer
Lister() ListenConfigLister
AddHandler(name string, handler ListenConfigHandlerFunc)
AddClusterScopedHandler(name, clusterName string, handler ListenConfigHandlerFunc)
Enqueue(namespace, name string)
Sync(ctx context.Context) error
Start(ctx context.Context, threadiness int) error
}
type ListenConfigInterface interface {
ObjectClient() *clientbase.ObjectClient
Create(*ListenConfig) (*ListenConfig, error)
GetNamespaced(namespace, name string, opts metav1.GetOptions) (*ListenConfig, error)
Get(name string, opts metav1.GetOptions) (*ListenConfig, error)
Update(*ListenConfig) (*ListenConfig, error)
Delete(name string, options *metav1.DeleteOptions) error
DeleteNamespaced(namespace, name string, options *metav1.DeleteOptions) error
List(opts metav1.ListOptions) (*ListenConfigList, error)
Watch(opts metav1.ListOptions) (watch.Interface, error)
DeleteCollection(deleteOpts *metav1.DeleteOptions, listOpts metav1.ListOptions) error
Controller() ListenConfigController
AddHandler(name string, sync ListenConfigHandlerFunc)
AddLifecycle(name string, lifecycle ListenConfigLifecycle)
AddClusterScopedHandler(name, clusterName string, sync ListenConfigHandlerFunc)
AddClusterScopedLifecycle(name, clusterName string, lifecycle ListenConfigLifecycle)
}
type listenConfigLister struct {
controller *listenConfigController
}
func (l *listenConfigLister) List(namespace string, selector labels.Selector) (ret []*ListenConfig, err error) {
err = cache.ListAllByNamespace(l.controller.Informer().GetIndexer(), namespace, selector, func(obj interface{}) {
ret = append(ret, obj.(*ListenConfig))
})
return
}
func (l *listenConfigLister) Get(namespace, name string) (*ListenConfig, error) {
var key string
if namespace != "" {
key = namespace + "/" + name
} else {
key = name
}
obj, exists, err := l.controller.Informer().GetIndexer().GetByKey(key)
if err != nil {
return nil, err
}
if !exists {
return nil, errors.NewNotFound(schema.GroupResource{
Group: ListenConfigGroupVersionKind.Group,
Resource: "listenConfig",
}, name)
}
return obj.(*ListenConfig), nil
}
type listenConfigController struct {
controller.GenericController
}
func (c *listenConfigController) Lister() ListenConfigLister {
return &listenConfigLister{
controller: c,
}
}
func (c *listenConfigController) AddHandler(name string, handler ListenConfigHandlerFunc) {
c.GenericController.AddHandler(name, func(key string) error {
obj, exists, err := c.Informer().GetStore().GetByKey(key)
if err != nil {
return err
}
if !exists {
return handler(key, nil)
}
return handler(key, obj.(*ListenConfig))
})
}
func (c *listenConfigController) AddClusterScopedHandler(name, cluster string, handler ListenConfigHandlerFunc) {
c.GenericController.AddHandler(name, func(key string) error {
obj, exists, err := c.Informer().GetStore().GetByKey(key)
if err != nil {
return err
}
if !exists {
return handler(key, nil)
}
if !controller.ObjectInCluster(cluster, obj) {
return nil
}
return handler(key, obj.(*ListenConfig))
})
}
type listenConfigFactory struct {
}
func (c listenConfigFactory) Object() runtime.Object {
return &ListenConfig{}
}
func (c listenConfigFactory) List() runtime.Object {
return &ListenConfigList{}
}
func (s *listenConfigClient) Controller() ListenConfigController {
s.client.Lock()
defer s.client.Unlock()
c, ok := s.client.listenConfigControllers[s.ns]
if ok {
return c
}
genericController := controller.NewGenericController(ListenConfigGroupVersionKind.Kind+"Controller",
s.objectClient)
c = &listenConfigController{
GenericController: genericController,
}
s.client.listenConfigControllers[s.ns] = c
s.client.starters = append(s.client.starters, c)
return c
}
type listenConfigClient struct {
client *Client
ns string
objectClient *clientbase.ObjectClient
controller ListenConfigController
}
func (s *listenConfigClient) ObjectClient() *clientbase.ObjectClient {
return s.objectClient
}
func (s *listenConfigClient) Create(o *ListenConfig) (*ListenConfig, error) {
obj, err := s.objectClient.Create(o)
return obj.(*ListenConfig), err
}
func (s *listenConfigClient) Get(name string, opts metav1.GetOptions) (*ListenConfig, error) {
obj, err := s.objectClient.Get(name, opts)
return obj.(*ListenConfig), err
}
func (s *listenConfigClient) GetNamespaced(namespace, name string, opts metav1.GetOptions) (*ListenConfig, error) {
obj, err := s.objectClient.GetNamespaced(namespace, name, opts)
return obj.(*ListenConfig), err
}
func (s *listenConfigClient) Update(o *ListenConfig) (*ListenConfig, error) {
obj, err := s.objectClient.Update(o.Name, o)
return obj.(*ListenConfig), err
}
func (s *listenConfigClient) Delete(name string, options *metav1.DeleteOptions) error {
return s.objectClient.Delete(name, options)
}
func (s *listenConfigClient) DeleteNamespaced(namespace, name string, options *metav1.DeleteOptions) error {
return s.objectClient.DeleteNamespaced(namespace, name, options)
}
func (s *listenConfigClient) List(opts metav1.ListOptions) (*ListenConfigList, error) {
obj, err := s.objectClient.List(opts)
return obj.(*ListenConfigList), err
}
func (s *listenConfigClient) Watch(opts metav1.ListOptions) (watch.Interface, error) {
return s.objectClient.Watch(opts)
}
// Patch applies the patch and returns the patched deployment.
func (s *listenConfigClient) Patch(o *ListenConfig, data []byte, subresources ...string) (*ListenConfig, error) {
obj, err := s.objectClient.Patch(o.Name, o, data, subresources...)
return obj.(*ListenConfig), err
}
func (s *listenConfigClient) DeleteCollection(deleteOpts *metav1.DeleteOptions, listOpts metav1.ListOptions) error {
return s.objectClient.DeleteCollection(deleteOpts, listOpts)
}
func (s *listenConfigClient) AddHandler(name string, sync ListenConfigHandlerFunc) {
s.Controller().AddHandler(name, sync)
}
func (s *listenConfigClient) AddLifecycle(name string, lifecycle ListenConfigLifecycle) {
sync := NewListenConfigLifecycleAdapter(name, false, s, lifecycle)
s.AddHandler(name, sync)
}
func (s *listenConfigClient) AddClusterScopedHandler(name, clusterName string, sync ListenConfigHandlerFunc) {
s.Controller().AddClusterScopedHandler(name, clusterName, sync)
}
func (s *listenConfigClient) AddClusterScopedLifecycle(name, clusterName string, lifecycle ListenConfigLifecycle) {
sync := NewListenConfigLifecycleAdapter(name+"_"+clusterName, true, s, lifecycle)
s.AddClusterScopedHandler(name, clusterName, sync)
}

View File

@ -0,0 +1,51 @@
package v3
import (
"github.com/rancher/norman/lifecycle"
"k8s.io/apimachinery/pkg/runtime"
)
type ListenConfigLifecycle interface {
Create(obj *ListenConfig) (*ListenConfig, error)
Remove(obj *ListenConfig) (*ListenConfig, error)
Updated(obj *ListenConfig) (*ListenConfig, error)
}
type listenConfigLifecycleAdapter struct {
lifecycle ListenConfigLifecycle
}
func (w *listenConfigLifecycleAdapter) Create(obj runtime.Object) (runtime.Object, error) {
o, err := w.lifecycle.Create(obj.(*ListenConfig))
if o == nil {
return nil, err
}
return o, err
}
func (w *listenConfigLifecycleAdapter) Finalize(obj runtime.Object) (runtime.Object, error) {
o, err := w.lifecycle.Remove(obj.(*ListenConfig))
if o == nil {
return nil, err
}
return o, err
}
func (w *listenConfigLifecycleAdapter) Updated(obj runtime.Object) (runtime.Object, error) {
o, err := w.lifecycle.Updated(obj.(*ListenConfig))
if o == nil {
return nil, err
}
return o, err
}
func NewListenConfigLifecycleAdapter(name string, clusterScoped bool, client ListenConfigInterface, l ListenConfigLifecycle) ListenConfigHandlerFunc {
adapter := &listenConfigLifecycleAdapter{lifecycle: l}
syncFn := lifecycle.NewObjectLifecycleAdapter(name, clusterScoped, adapter, client.ObjectClient())
return func(key string, obj *ListenConfig) error {
if obj == nil {
return syncFn(key, nil)
}
return syncFn(key, obj)
}
}

View File

@ -0,0 +1,252 @@
package v3
import (
"context"
"github.com/rancher/norman/clientbase"
"github.com/rancher/norman/controller"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/watch"
"k8s.io/client-go/tools/cache"
)
var (
ProjectLoggingGroupVersionKind = schema.GroupVersionKind{
Version: Version,
Group: GroupName,
Kind: "ProjectLogging",
}
ProjectLoggingResource = metav1.APIResource{
Name: "projectloggings",
SingularName: "projectlogging",
Namespaced: true,
Kind: ProjectLoggingGroupVersionKind.Kind,
}
)
type ProjectLoggingList struct {
metav1.TypeMeta `json:",inline"`
metav1.ListMeta `json:"metadata,omitempty"`
Items []ProjectLogging
}
type ProjectLoggingHandlerFunc func(key string, obj *ProjectLogging) error
type ProjectLoggingLister interface {
List(namespace string, selector labels.Selector) (ret []*ProjectLogging, err error)
Get(namespace, name string) (*ProjectLogging, error)
}
type ProjectLoggingController interface {
Informer() cache.SharedIndexInformer
Lister() ProjectLoggingLister
AddHandler(name string, handler ProjectLoggingHandlerFunc)
AddClusterScopedHandler(name, clusterName string, handler ProjectLoggingHandlerFunc)
Enqueue(namespace, name string)
Sync(ctx context.Context) error
Start(ctx context.Context, threadiness int) error
}
type ProjectLoggingInterface interface {
ObjectClient() *clientbase.ObjectClient
Create(*ProjectLogging) (*ProjectLogging, error)
GetNamespaced(namespace, name string, opts metav1.GetOptions) (*ProjectLogging, error)
Get(name string, opts metav1.GetOptions) (*ProjectLogging, error)
Update(*ProjectLogging) (*ProjectLogging, error)
Delete(name string, options *metav1.DeleteOptions) error
DeleteNamespaced(namespace, name string, options *metav1.DeleteOptions) error
List(opts metav1.ListOptions) (*ProjectLoggingList, error)
Watch(opts metav1.ListOptions) (watch.Interface, error)
DeleteCollection(deleteOpts *metav1.DeleteOptions, listOpts metav1.ListOptions) error
Controller() ProjectLoggingController
AddHandler(name string, sync ProjectLoggingHandlerFunc)
AddLifecycle(name string, lifecycle ProjectLoggingLifecycle)
AddClusterScopedHandler(name, clusterName string, sync ProjectLoggingHandlerFunc)
AddClusterScopedLifecycle(name, clusterName string, lifecycle ProjectLoggingLifecycle)
}
type projectLoggingLister struct {
controller *projectLoggingController
}
func (l *projectLoggingLister) List(namespace string, selector labels.Selector) (ret []*ProjectLogging, err error) {
err = cache.ListAllByNamespace(l.controller.Informer().GetIndexer(), namespace, selector, func(obj interface{}) {
ret = append(ret, obj.(*ProjectLogging))
})
return
}
func (l *projectLoggingLister) Get(namespace, name string) (*ProjectLogging, error) {
var key string
if namespace != "" {
key = namespace + "/" + name
} else {
key = name
}
obj, exists, err := l.controller.Informer().GetIndexer().GetByKey(key)
if err != nil {
return nil, err
}
if !exists {
return nil, errors.NewNotFound(schema.GroupResource{
Group: ProjectLoggingGroupVersionKind.Group,
Resource: "projectLogging",
}, name)
}
return obj.(*ProjectLogging), nil
}
type projectLoggingController struct {
controller.GenericController
}
func (c *projectLoggingController) Lister() ProjectLoggingLister {
return &projectLoggingLister{
controller: c,
}
}
func (c *projectLoggingController) AddHandler(name string, handler ProjectLoggingHandlerFunc) {
c.GenericController.AddHandler(name, func(key string) error {
obj, exists, err := c.Informer().GetStore().GetByKey(key)
if err != nil {
return err
}
if !exists {
return handler(key, nil)
}
return handler(key, obj.(*ProjectLogging))
})
}
func (c *projectLoggingController) AddClusterScopedHandler(name, cluster string, handler ProjectLoggingHandlerFunc) {
c.GenericController.AddHandler(name, func(key string) error {
obj, exists, err := c.Informer().GetStore().GetByKey(key)
if err != nil {
return err
}
if !exists {
return handler(key, nil)
}
if !controller.ObjectInCluster(cluster, obj) {
return nil
}
return handler(key, obj.(*ProjectLogging))
})
}
type projectLoggingFactory struct {
}
func (c projectLoggingFactory) Object() runtime.Object {
return &ProjectLogging{}
}
func (c projectLoggingFactory) List() runtime.Object {
return &ProjectLoggingList{}
}
func (s *projectLoggingClient) Controller() ProjectLoggingController {
s.client.Lock()
defer s.client.Unlock()
c, ok := s.client.projectLoggingControllers[s.ns]
if ok {
return c
}
genericController := controller.NewGenericController(ProjectLoggingGroupVersionKind.Kind+"Controller",
s.objectClient)
c = &projectLoggingController{
GenericController: genericController,
}
s.client.projectLoggingControllers[s.ns] = c
s.client.starters = append(s.client.starters, c)
return c
}
type projectLoggingClient struct {
client *Client
ns string
objectClient *clientbase.ObjectClient
controller ProjectLoggingController
}
func (s *projectLoggingClient) ObjectClient() *clientbase.ObjectClient {
return s.objectClient
}
func (s *projectLoggingClient) Create(o *ProjectLogging) (*ProjectLogging, error) {
obj, err := s.objectClient.Create(o)
return obj.(*ProjectLogging), err
}
func (s *projectLoggingClient) Get(name string, opts metav1.GetOptions) (*ProjectLogging, error) {
obj, err := s.objectClient.Get(name, opts)
return obj.(*ProjectLogging), err
}
func (s *projectLoggingClient) GetNamespaced(namespace, name string, opts metav1.GetOptions) (*ProjectLogging, error) {
obj, err := s.objectClient.GetNamespaced(namespace, name, opts)
return obj.(*ProjectLogging), err
}
func (s *projectLoggingClient) Update(o *ProjectLogging) (*ProjectLogging, error) {
obj, err := s.objectClient.Update(o.Name, o)
return obj.(*ProjectLogging), err
}
func (s *projectLoggingClient) Delete(name string, options *metav1.DeleteOptions) error {
return s.objectClient.Delete(name, options)
}
func (s *projectLoggingClient) DeleteNamespaced(namespace, name string, options *metav1.DeleteOptions) error {
return s.objectClient.DeleteNamespaced(namespace, name, options)
}
func (s *projectLoggingClient) List(opts metav1.ListOptions) (*ProjectLoggingList, error) {
obj, err := s.objectClient.List(opts)
return obj.(*ProjectLoggingList), err
}
func (s *projectLoggingClient) Watch(opts metav1.ListOptions) (watch.Interface, error) {
return s.objectClient.Watch(opts)
}
// Patch applies the patch and returns the patched deployment.
func (s *projectLoggingClient) Patch(o *ProjectLogging, data []byte, subresources ...string) (*ProjectLogging, error) {
obj, err := s.objectClient.Patch(o.Name, o, data, subresources...)
return obj.(*ProjectLogging), err
}
func (s *projectLoggingClient) DeleteCollection(deleteOpts *metav1.DeleteOptions, listOpts metav1.ListOptions) error {
return s.objectClient.DeleteCollection(deleteOpts, listOpts)
}
func (s *projectLoggingClient) AddHandler(name string, sync ProjectLoggingHandlerFunc) {
s.Controller().AddHandler(name, sync)
}
func (s *projectLoggingClient) AddLifecycle(name string, lifecycle ProjectLoggingLifecycle) {
sync := NewProjectLoggingLifecycleAdapter(name, false, s, lifecycle)
s.AddHandler(name, sync)
}
func (s *projectLoggingClient) AddClusterScopedHandler(name, clusterName string, sync ProjectLoggingHandlerFunc) {
s.Controller().AddClusterScopedHandler(name, clusterName, sync)
}
func (s *projectLoggingClient) AddClusterScopedLifecycle(name, clusterName string, lifecycle ProjectLoggingLifecycle) {
sync := NewProjectLoggingLifecycleAdapter(name+"_"+clusterName, true, s, lifecycle)
s.AddClusterScopedHandler(name, clusterName, sync)
}

View File

@ -0,0 +1,51 @@
package v3
import (
"github.com/rancher/norman/lifecycle"
"k8s.io/apimachinery/pkg/runtime"
)
type ProjectLoggingLifecycle interface {
Create(obj *ProjectLogging) (*ProjectLogging, error)
Remove(obj *ProjectLogging) (*ProjectLogging, error)
Updated(obj *ProjectLogging) (*ProjectLogging, error)
}
type projectLoggingLifecycleAdapter struct {
lifecycle ProjectLoggingLifecycle
}
func (w *projectLoggingLifecycleAdapter) Create(obj runtime.Object) (runtime.Object, error) {
o, err := w.lifecycle.Create(obj.(*ProjectLogging))
if o == nil {
return nil, err
}
return o, err
}
func (w *projectLoggingLifecycleAdapter) Finalize(obj runtime.Object) (runtime.Object, error) {
o, err := w.lifecycle.Remove(obj.(*ProjectLogging))
if o == nil {
return nil, err
}
return o, err
}
func (w *projectLoggingLifecycleAdapter) Updated(obj runtime.Object) (runtime.Object, error) {
o, err := w.lifecycle.Updated(obj.(*ProjectLogging))
if o == nil {
return nil, err
}
return o, err
}
func NewProjectLoggingLifecycleAdapter(name string, clusterScoped bool, client ProjectLoggingInterface, l ProjectLoggingLifecycle) ProjectLoggingHandlerFunc {
adapter := &projectLoggingLifecycleAdapter{lifecycle: l}
syncFn := lifecycle.NewObjectLifecycleAdapter(name, clusterScoped, adapter, client.ObjectClient())
return func(key string, obj *ProjectLogging) error {
if obj == nil {
return syncFn(key, nil)
}
return syncFn(key, obj)
}
}

View File

@ -81,6 +81,14 @@ func addKnownTypes(scheme *runtime.Scheme) error {
&StackList{}, &StackList{},
&Preference{}, &Preference{},
&PreferenceList{}, &PreferenceList{},
&ClusterLogging{},
&ClusterLoggingList{},
&ProjectLogging{},
&ProjectLoggingList{},
&ListenConfig{},
&ListenConfigList{},
&Setting{},
&SettingList{},
) )
return nil return nil
} }

View File

@ -0,0 +1,251 @@
package v3
import (
"context"
"github.com/rancher/norman/clientbase"
"github.com/rancher/norman/controller"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/watch"
"k8s.io/client-go/tools/cache"
)
var (
SettingGroupVersionKind = schema.GroupVersionKind{
Version: Version,
Group: GroupName,
Kind: "Setting",
}
SettingResource = metav1.APIResource{
Name: "settings",
SingularName: "setting",
Namespaced: false,
Kind: SettingGroupVersionKind.Kind,
}
)
type SettingList struct {
metav1.TypeMeta `json:",inline"`
metav1.ListMeta `json:"metadata,omitempty"`
Items []Setting
}
type SettingHandlerFunc func(key string, obj *Setting) error
type SettingLister interface {
List(namespace string, selector labels.Selector) (ret []*Setting, err error)
Get(namespace, name string) (*Setting, error)
}
type SettingController interface {
Informer() cache.SharedIndexInformer
Lister() SettingLister
AddHandler(name string, handler SettingHandlerFunc)
AddClusterScopedHandler(name, clusterName string, handler SettingHandlerFunc)
Enqueue(namespace, name string)
Sync(ctx context.Context) error
Start(ctx context.Context, threadiness int) error
}
type SettingInterface interface {
ObjectClient() *clientbase.ObjectClient
Create(*Setting) (*Setting, error)
GetNamespaced(namespace, name string, opts metav1.GetOptions) (*Setting, error)
Get(name string, opts metav1.GetOptions) (*Setting, error)
Update(*Setting) (*Setting, error)
Delete(name string, options *metav1.DeleteOptions) error
DeleteNamespaced(namespace, name string, options *metav1.DeleteOptions) error
List(opts metav1.ListOptions) (*SettingList, error)
Watch(opts metav1.ListOptions) (watch.Interface, error)
DeleteCollection(deleteOpts *metav1.DeleteOptions, listOpts metav1.ListOptions) error
Controller() SettingController
AddHandler(name string, sync SettingHandlerFunc)
AddLifecycle(name string, lifecycle SettingLifecycle)
AddClusterScopedHandler(name, clusterName string, sync SettingHandlerFunc)
AddClusterScopedLifecycle(name, clusterName string, lifecycle SettingLifecycle)
}
type settingLister struct {
controller *settingController
}
func (l *settingLister) List(namespace string, selector labels.Selector) (ret []*Setting, err error) {
err = cache.ListAllByNamespace(l.controller.Informer().GetIndexer(), namespace, selector, func(obj interface{}) {
ret = append(ret, obj.(*Setting))
})
return
}
func (l *settingLister) Get(namespace, name string) (*Setting, error) {
var key string
if namespace != "" {
key = namespace + "/" + name
} else {
key = name
}
obj, exists, err := l.controller.Informer().GetIndexer().GetByKey(key)
if err != nil {
return nil, err
}
if !exists {
return nil, errors.NewNotFound(schema.GroupResource{
Group: SettingGroupVersionKind.Group,
Resource: "setting",
}, name)
}
return obj.(*Setting), nil
}
type settingController struct {
controller.GenericController
}
func (c *settingController) Lister() SettingLister {
return &settingLister{
controller: c,
}
}
func (c *settingController) AddHandler(name string, handler SettingHandlerFunc) {
c.GenericController.AddHandler(name, func(key string) error {
obj, exists, err := c.Informer().GetStore().GetByKey(key)
if err != nil {
return err
}
if !exists {
return handler(key, nil)
}
return handler(key, obj.(*Setting))
})
}
func (c *settingController) AddClusterScopedHandler(name, cluster string, handler SettingHandlerFunc) {
c.GenericController.AddHandler(name, func(key string) error {
obj, exists, err := c.Informer().GetStore().GetByKey(key)
if err != nil {
return err
}
if !exists {
return handler(key, nil)
}
if !controller.ObjectInCluster(cluster, obj) {
return nil
}
return handler(key, obj.(*Setting))
})
}
type settingFactory struct {
}
func (c settingFactory) Object() runtime.Object {
return &Setting{}
}
func (c settingFactory) List() runtime.Object {
return &SettingList{}
}
func (s *settingClient) Controller() SettingController {
s.client.Lock()
defer s.client.Unlock()
c, ok := s.client.settingControllers[s.ns]
if ok {
return c
}
genericController := controller.NewGenericController(SettingGroupVersionKind.Kind+"Controller",
s.objectClient)
c = &settingController{
GenericController: genericController,
}
s.client.settingControllers[s.ns] = c
s.client.starters = append(s.client.starters, c)
return c
}
type settingClient struct {
client *Client
ns string
objectClient *clientbase.ObjectClient
controller SettingController
}
func (s *settingClient) ObjectClient() *clientbase.ObjectClient {
return s.objectClient
}
func (s *settingClient) Create(o *Setting) (*Setting, error) {
obj, err := s.objectClient.Create(o)
return obj.(*Setting), err
}
func (s *settingClient) Get(name string, opts metav1.GetOptions) (*Setting, error) {
obj, err := s.objectClient.Get(name, opts)
return obj.(*Setting), err
}
func (s *settingClient) GetNamespaced(namespace, name string, opts metav1.GetOptions) (*Setting, error) {
obj, err := s.objectClient.GetNamespaced(namespace, name, opts)
return obj.(*Setting), err
}
func (s *settingClient) Update(o *Setting) (*Setting, error) {
obj, err := s.objectClient.Update(o.Name, o)
return obj.(*Setting), err
}
func (s *settingClient) Delete(name string, options *metav1.DeleteOptions) error {
return s.objectClient.Delete(name, options)
}
func (s *settingClient) DeleteNamespaced(namespace, name string, options *metav1.DeleteOptions) error {
return s.objectClient.DeleteNamespaced(namespace, name, options)
}
func (s *settingClient) List(opts metav1.ListOptions) (*SettingList, error) {
obj, err := s.objectClient.List(opts)
return obj.(*SettingList), err
}
func (s *settingClient) Watch(opts metav1.ListOptions) (watch.Interface, error) {
return s.objectClient.Watch(opts)
}
// Patch applies the patch and returns the patched deployment.
func (s *settingClient) Patch(o *Setting, data []byte, subresources ...string) (*Setting, error) {
obj, err := s.objectClient.Patch(o.Name, o, data, subresources...)
return obj.(*Setting), err
}
func (s *settingClient) DeleteCollection(deleteOpts *metav1.DeleteOptions, listOpts metav1.ListOptions) error {
return s.objectClient.DeleteCollection(deleteOpts, listOpts)
}
func (s *settingClient) AddHandler(name string, sync SettingHandlerFunc) {
s.Controller().AddHandler(name, sync)
}
func (s *settingClient) AddLifecycle(name string, lifecycle SettingLifecycle) {
sync := NewSettingLifecycleAdapter(name, false, s, lifecycle)
s.AddHandler(name, sync)
}
func (s *settingClient) AddClusterScopedHandler(name, clusterName string, sync SettingHandlerFunc) {
s.Controller().AddClusterScopedHandler(name, clusterName, sync)
}
func (s *settingClient) AddClusterScopedLifecycle(name, clusterName string, lifecycle SettingLifecycle) {
sync := NewSettingLifecycleAdapter(name+"_"+clusterName, true, s, lifecycle)
s.AddClusterScopedHandler(name, clusterName, sync)
}

View File

@ -0,0 +1,51 @@
package v3
import (
"github.com/rancher/norman/lifecycle"
"k8s.io/apimachinery/pkg/runtime"
)
type SettingLifecycle interface {
Create(obj *Setting) (*Setting, error)
Remove(obj *Setting) (*Setting, error)
Updated(obj *Setting) (*Setting, error)
}
type settingLifecycleAdapter struct {
lifecycle SettingLifecycle
}
func (w *settingLifecycleAdapter) Create(obj runtime.Object) (runtime.Object, error) {
o, err := w.lifecycle.Create(obj.(*Setting))
if o == nil {
return nil, err
}
return o, err
}
func (w *settingLifecycleAdapter) Finalize(obj runtime.Object) (runtime.Object, error) {
o, err := w.lifecycle.Remove(obj.(*Setting))
if o == nil {
return nil, err
}
return o, err
}
func (w *settingLifecycleAdapter) Updated(obj runtime.Object) (runtime.Object, error) {
o, err := w.lifecycle.Updated(obj.(*Setting))
if o == nil {
return nil, err
}
return o, err
}
func NewSettingLifecycleAdapter(name string, clusterScoped bool, client SettingInterface, l SettingLifecycle) SettingHandlerFunc {
adapter := &settingLifecycleAdapter{lifecycle: l}
syncFn := lifecycle.NewObjectLifecycleAdapter(name, clusterScoped, adapter, client.ObjectClient())
return func(key string, obj *Setting) error {
if obj == nil {
return syncFn(key, nil)
}
return syncFn(key, obj)
}
}

View File

@ -1,7 +1,8 @@
# package # package
github.com/rancher/types github.com/rancher/types
k8s.io/kubernetes v1.8.3 transitive=true,staging=true k8s.io/kubernetes v1.8.3 transitive=true,staging=true
bitbucket.org/ww/goautoneg a547fc61f48d567d5b4ec6f8aee5573d8efce11d https://github.com/rancher/goautoneg.git bitbucket.org/ww/goautoneg a547fc61f48d567d5b4ec6f8aee5573d8efce11d https://github.com/rancher/goautoneg.git
github.com/rancher/norman 76d825608521a24007cfdb110d9b88d45c50d9c5 golang.org/x/sync fd80eb99c8f653c847d294a001bdf2a3a6f768f5
golang.org/x/sync fd80eb99c8f653c847d294a001bdf2a3a6f768f5
github.com/rancher/norman 0b548c23c75c159d4351385cec07a35281c8f5f7

22
vendor/github.com/ugorji/go/LICENSE generated vendored Normal file
View File

@ -0,0 +1,22 @@
The MIT License (MIT)
Copyright (c) 2012-2015 Ugorji Nwoke.
All rights reserved.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

20
vendor/github.com/ugorji/go/README.md generated vendored Normal file
View File

@ -0,0 +1,20 @@
# go/codec
This repository contains the `go-codec` library,
a High Performance and Feature-Rich Idiomatic encode/decode and rpc library for
- msgpack: https://github.com/msgpack/msgpack
- binc: http://github.com/ugorji/binc
- cbor: http://cbor.io http://tools.ietf.org/html/rfc7049
- json: http://json.org http://tools.ietf.org/html/rfc7159
For more information:
- [see the codec/Readme for quick usage information](https://github.com/ugorji/go/tree/master/codec#readme)
- [view the API on godoc](http://godoc.org/github.com/ugorji/go/codec)
- [read the detailed usage/how-to primer](http://ugorji.net/blog/go-codec-primer)
Install using:
go get github.com/ugorji/go/codec

View File

@ -1,10 +1,9 @@
// Copyright (c) 2012-2018 Ugorji Nwoke. All rights reserved. // Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved.
// Use of this source code is governed by a MIT license found in the LICENSE file. // Use of this source code is governed by a MIT license found in the LICENSE file.
/* /*
Package codec provides a High Performance, Feature-Rich Idiomatic Go 1.4+ codec/encoding library for
High Performance, Feature-Rich Idiomatic Go 1.4+ codec/encoding library binc, msgpack, cbor, json
for binc, msgpack, cbor, json.
Supported Serialization formats are: Supported Serialization formats are:
@ -33,14 +32,15 @@ Rich Feature Set includes:
- Simple but extremely powerful and feature-rich API - Simple but extremely powerful and feature-rich API
- Support for go1.4 and above, while selectively using newer APIs for later releases - Support for go1.4 and above, while selectively using newer APIs for later releases
- Excellent code coverage ( > 90% ) - Good code coverage ( > 70% )
- Very High Performance. - Very High Performance.
Our extensive benchmarks show us outperforming Gob, Json, Bson, etc by 2-4X. Our extensive benchmarks show us outperforming Gob, Json, Bson, etc by 2-4X.
- Careful selected use of 'unsafe' for targeted performance gains. - Careful selected use of 'unsafe' for targeted performance gains.
100% mode exists where 'unsafe' is not used at all. 100% mode exists where 'unsafe' is not used at all.
- Lock-free (sans mutex) concurrency for scaling to 100's of cores - Lock-free (sans mutex) concurrency for scaling to 100's of cores
- Coerce types where appropriate - Multiple conversions:
e.g. decode an int in the stream into a float, decode numbers from formatted strings, etc Package coerces types where appropriate
e.g. decode an int in the stream into a float, etc.
- Corner Cases: - Corner Cases:
Overflows, nil maps/slices, nil values in streams are handled correctly Overflows, nil maps/slices, nil values in streams are handled correctly
- Standard field renaming via tags - Standard field renaming via tags
@ -49,16 +49,10 @@ Rich Feature Set includes:
(struct, slice, map, primitives, pointers, interface{}, etc) (struct, slice, map, primitives, pointers, interface{}, etc)
- Extensions to support efficient encoding/decoding of any named types - Extensions to support efficient encoding/decoding of any named types
- Support encoding.(Binary|Text)(M|Unm)arshaler interfaces - Support encoding.(Binary|Text)(M|Unm)arshaler interfaces
- Support IsZero() bool to determine if a value is a zero value.
Analogous to time.Time.IsZero() bool.
- Decoding without a schema (into a interface{}). - Decoding without a schema (into a interface{}).
Includes Options to configure what specific map or slice type to use Includes Options to configure what specific map or slice type to use
when decoding an encoded list or map into a nil interface{} when decoding an encoded list or map into a nil interface{}
- Mapping a non-interface type to an interface, so we can decode appropriately
into any interface type with a correctly configured non-interface value.
- Encode a struct as an array, and decode struct from an array in the data stream - Encode a struct as an array, and decode struct from an array in the data stream
- Option to encode struct keys as numbers (instead of strings)
(to support structured streams with fields encoded as numeric codes)
- Comprehensive support for anonymous fields - Comprehensive support for anonymous fields
- Fast (no-reflection) encoding/decoding of common maps and slices - Fast (no-reflection) encoding/decoding of common maps and slices
- Code-generation for faster performance. - Code-generation for faster performance.
@ -99,27 +93,6 @@ encoded as an empty map because it has no exported fields, while UUID
would be encoded as a string. However, with extension support, you can would be encoded as a string. However, with extension support, you can
encode any of these however you like. encode any of these however you like.
Custom Encoding and Decoding
This package maintains symmetry in the encoding and decoding halfs.
We determine how to encode or decode by walking this decision tree
- is type a codec.Selfer?
- is there an extension registered for the type?
- is format binary, and is type a encoding.BinaryMarshaler and BinaryUnmarshaler?
- is format specifically json, and is type a encoding/json.Marshaler and Unmarshaler?
- is format text-based, and type an encoding.TextMarshaler?
- else we use a pair of functions based on the "kind" of the type e.g. map, slice, int64, etc
This symmetry is important to reduce chances of issues happening because the
encoding and decoding sides are out of sync e.g. decoded via very specific
encoding.TextUnmarshaler but encoded via kind-specific generalized mode.
Consequently, if a type only defines one-half of the symmetry
(e.g. it implements UnmarshalJSON() but not MarshalJSON() ),
then that type doesn't satisfy the check and we will continue walking down the
decision tree.
RPC RPC
RPC Client and Server Codecs are implemented, so the codecs can be used RPC Client and Server Codecs are implemented, so the codecs can be used
@ -207,63 +180,6 @@ Running Benchmarks
Please see http://github.com/ugorji/go-codec-bench . Please see http://github.com/ugorji/go-codec-bench .
Caveats
Struct fields matching the following are ignored during encoding and decoding
- struct tag value set to -
- func, complex numbers, unsafe pointers
- unexported and not embedded
- unexported and embedded and not struct kind
- unexported and embedded pointers (from go1.10)
Every other field in a struct will be encoded/decoded.
Embedded fields are encoded as if they exist in the top-level struct,
with some caveats. See Encode documentation.
*/ */
package codec package codec
// TODO:
// - In Go 1.10, when mid-stack inlining is enabled,
// we should use committed functions for writeXXX and readXXX calls.
// This involves uncommenting the methods for decReaderSwitch and encWriterSwitch
// and using those (decReaderSwitch and encWriterSwitch) in all handles
// instead of encWriter and decReader.
// The benefit is that, for the (En|De)coder over []byte, the encWriter/decReader
// will be inlined, giving a performance bump for that typical case.
// However, it will only be inlined if mid-stack inlining is enabled,
// as we call panic to raise errors, and panic currently prevents inlining.
//
// - Unexport BasicHandle.
// If godoc can now show the embedded options, then unexport it.
//
// PUNTED:
// - To make Handle comparable, make extHandle in BasicHandle a non-embedded pointer,
// and use overlay methods on *BasicHandle to call through to extHandle after initializing
// the "xh *extHandle" to point to a real slice.
//
// - Allow mapping a concrete type to an interface, for use during decoding.
//
// BEFORE EACH RELEASE:
// - Look through and fix padding for each type, to eliminate false sharing
// - critical shared objects that are read many times
// TypeInfos
// - pooled objects:
// decNaked, decNakedContainers, codecFner, typeInfoLoadArray,
// - small objects allocated independently, that we read/use much across threads:
// codecFn, typeInfo
// - Objects allocated independently and used a lot
// Decoder, Encoder,
// xxxHandle, xxxEncDriver, xxxDecDriver (xxx = json, msgpack, cbor, binc, simple)
// - In all above, arrange values modified together to be close to each other.
//
// For all of these, either ensure that they occupy full cache lines,
// or ensure that the things just past the cache line boundary are hardly read/written
// e.g. JsonHandle.RawBytesExt - which is copied into json(En|De)cDriver at init
//
// Occupying full cache lines means they occupy 8*N words (where N is an integer).
// Check this out by running: ./run.sh -z
// - look at those tagged ****, meaning they are not occupying full cache lines
// - look at those tagged <<<<, meaning they are larger than 32 words (something to watch)
// - Run "golint -min_confidence 0.81"

View File

@ -31,14 +31,15 @@ Rich Feature Set includes:
- Simple but extremely powerful and feature-rich API - Simple but extremely powerful and feature-rich API
- Support for go1.4 and above, while selectively using newer APIs for later releases - Support for go1.4 and above, while selectively using newer APIs for later releases
- Excellent code coverage ( > 90% ) - Good code coverage ( > 70% )
- Very High Performance. - Very High Performance.
Our extensive benchmarks show us outperforming Gob, Json, Bson, etc by 2-4X. Our extensive benchmarks show us outperforming Gob, Json, Bson, etc by 2-4X.
- Careful selected use of 'unsafe' for targeted performance gains. - Careful selected use of 'unsafe' for targeted performance gains.
100% mode exists where 'unsafe' is not used at all. 100% mode exists where 'unsafe' is not used at all.
- Lock-free (sans mutex) concurrency for scaling to 100's of cores - Lock-free (sans mutex) concurrency for scaling to 100's of cores
- Coerce types where appropriate - Multiple conversions:
e.g. decode an int in the stream into a float, decode numbers from formatted strings, etc Package coerces types where appropriate
e.g. decode an int in the stream into a float, etc.
- Corner Cases: - Corner Cases:
Overflows, nil maps/slices, nil values in streams are handled correctly Overflows, nil maps/slices, nil values in streams are handled correctly
- Standard field renaming via tags - Standard field renaming via tags
@ -47,16 +48,10 @@ Rich Feature Set includes:
(struct, slice, map, primitives, pointers, interface{}, etc) (struct, slice, map, primitives, pointers, interface{}, etc)
- Extensions to support efficient encoding/decoding of any named types - Extensions to support efficient encoding/decoding of any named types
- Support encoding.(Binary|Text)(M|Unm)arshaler interfaces - Support encoding.(Binary|Text)(M|Unm)arshaler interfaces
- Support IsZero() bool to determine if a value is a zero value.
Analogous to time.Time.IsZero() bool.
- Decoding without a schema (into a interface{}). - Decoding without a schema (into a interface{}).
Includes Options to configure what specific map or slice type to use Includes Options to configure what specific map or slice type to use
when decoding an encoded list or map into a nil interface{} when decoding an encoded list or map into a nil interface{}
- Mapping a non-interface type to an interface, so we can decode appropriately
into any interface type with a correctly configured non-interface value.
- Encode a struct as an array, and decode struct from an array in the data stream - Encode a struct as an array, and decode struct from an array in the data stream
- Option to encode struct keys as numbers (instead of strings)
(to support structured streams with fields encoded as numeric codes)
- Comprehensive support for anonymous fields - Comprehensive support for anonymous fields
- Fast (no-reflection) encoding/decoding of common maps and slices - Fast (no-reflection) encoding/decoding of common maps and slices
- Code-generation for faster performance. - Code-generation for faster performance.
@ -96,27 +91,6 @@ encoded as an empty map because it has no exported fields, while UUID
would be encoded as a string. However, with extension support, you can would be encoded as a string. However, with extension support, you can
encode any of these however you like. encode any of these however you like.
## Custom Encoding and Decoding
This package maintains symmetry in the encoding and decoding halfs.
We determine how to encode or decode by walking this decision tree
- is type a codec.Selfer?
- is there an extension registered for the type?
- is format binary, and is type a encoding.BinaryMarshaler and BinaryUnmarshaler?
- is format specifically json, and is type a encoding/json.Marshaler and Unmarshaler?
- is format text-based, and type an encoding.TextMarshaler?
- else we use a pair of functions based on the "kind" of the type e.g. map, slice, int64, etc
This symmetry is important to reduce chances of issues happening because the
encoding and decoding sides are out of sync e.g. decoded via very specific
encoding.TextUnmarshaler but encoded via kind-specific generalized mode.
Consequently, if a type only defines one-half of the symmetry
(e.g. it implements UnmarshalJSON() but not MarshalJSON() ),
then that type doesn't satisfy the check and we will continue walking down the
decision tree.
## RPC ## RPC
RPC Client and Server Codecs are implemented, so the codecs can be used RPC Client and Server Codecs are implemented, so the codecs can be used
@ -190,17 +164,3 @@ You can run the tag 'safe' to run tests or build in safe mode. e.g.
Please see http://github.com/ugorji/go-codec-bench . Please see http://github.com/ugorji/go-codec-bench .
## Caveats
Struct fields matching the following are ignored during encoding and decoding
- struct tag value set to -
- func, complex numbers, unsafe pointers
- unexported and not embedded
- unexported and embedded and not struct kind
- unexported and embedded pointers (from go1.10)
Every other field in a struct will be encoded/decoded.
Embedded fields are encoded as if they exist in the top-level struct,
with some caveats. See Encode documentation.

View File

@ -1,4 +1,4 @@
// Copyright (c) 2012-2018 Ugorji Nwoke. All rights reserved. // Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved.
// Use of this source code is governed by a MIT license found in the LICENSE file. // Use of this source code is governed by a MIT license found in the LICENSE file.
package codec package codec
@ -57,31 +57,38 @@ const (
type bincEncDriver struct { type bincEncDriver struct {
e *Encoder e *Encoder
h *BincHandle
w encWriter w encWriter
m map[string]uint16 // symbols m map[string]uint16 // symbols
b [16]byte // scratch, used for encoding numbers - bigendian style b [scratchByteArrayLen]byte
s uint16 // symbols sequencer s uint16 // symbols sequencer
// c containerState
encDriverTrackContainerWriter
noBuiltInTypes
// encNoSeparator // encNoSeparator
encDriverNoopContainerWriter
}
func (e *bincEncDriver) IsBuiltinType(rt uintptr) bool {
return rt == timeTypId
}
func (e *bincEncDriver) EncodeBuiltin(rt uintptr, v interface{}) {
if rt == timeTypId {
var bs []byte
switch x := v.(type) {
case time.Time:
bs = encodeTime(x)
case *time.Time:
bs = encodeTime(*x)
default:
e.e.errorf("binc error encoding builtin: expect time.Time, received %T", v)
}
e.w.writen1(bincVdTimestamp<<4 | uint8(len(bs)))
e.w.writeb(bs)
}
} }
func (e *bincEncDriver) EncodeNil() { func (e *bincEncDriver) EncodeNil() {
e.w.writen1(bincVdSpecial<<4 | bincSpNil) e.w.writen1(bincVdSpecial<<4 | bincSpNil)
} }
func (e *bincEncDriver) EncodeTime(t time.Time) {
if t.IsZero() {
e.EncodeNil()
} else {
bs := bincEncodeTime(t)
e.w.writen1(bincVdTimestamp<<4 | uint8(len(bs)))
e.w.writeb(bs)
}
}
func (e *bincEncDriver) EncodeBool(b bool) { func (e *bincEncDriver) EncodeBool(b bool) {
if b { if b {
e.w.writen1(bincVdSpecial<<4 | bincSpTrue) e.w.writen1(bincVdSpecial<<4 | bincSpTrue)
@ -191,19 +198,13 @@ func (e *bincEncDriver) encodeExtPreamble(xtag byte, length int) {
func (e *bincEncDriver) WriteArrayStart(length int) { func (e *bincEncDriver) WriteArrayStart(length int) {
e.encLen(bincVdArray<<4, uint64(length)) e.encLen(bincVdArray<<4, uint64(length))
e.c = containerArrayStart
} }
func (e *bincEncDriver) WriteMapStart(length int) { func (e *bincEncDriver) WriteMapStart(length int) {
e.encLen(bincVdMap<<4, uint64(length)) e.encLen(bincVdMap<<4, uint64(length))
e.c = containerMapStart
} }
func (e *bincEncDriver) EncodeString(c charEncoding, v string) { func (e *bincEncDriver) EncodeString(c charEncoding, v string) {
if e.c == containerMapKey && c == cUTF8 && (e.h.AsSymbols == 0 || e.h.AsSymbols == 1) {
e.EncodeSymbol(v)
return
}
l := uint64(len(v)) l := uint64(len(v))
e.encBytesLen(c, l) e.encBytesLen(c, l)
if l > 0 { if l > 0 {
@ -213,7 +214,7 @@ func (e *bincEncDriver) EncodeString(c charEncoding, v string) {
func (e *bincEncDriver) EncodeSymbol(v string) { func (e *bincEncDriver) EncodeSymbol(v string) {
// if WriteSymbolsNoRefs { // if WriteSymbolsNoRefs {
// e.encodeString(cUTF8, v) // e.encodeString(c_UTF8, v)
// return // return
// } // }
@ -223,10 +224,10 @@ func (e *bincEncDriver) EncodeSymbol(v string) {
l := len(v) l := len(v)
if l == 0 { if l == 0 {
e.encBytesLen(cUTF8, 0) e.encBytesLen(c_UTF8, 0)
return return
} else if l == 1 { } else if l == 1 {
e.encBytesLen(cUTF8, 1) e.encBytesLen(c_UTF8, 1)
e.w.writen1(v[0]) e.w.writen1(v[0])
return return
} }
@ -276,10 +277,6 @@ func (e *bincEncDriver) EncodeSymbol(v string) {
} }
func (e *bincEncDriver) EncodeStringBytes(c charEncoding, v []byte) { func (e *bincEncDriver) EncodeStringBytes(c charEncoding, v []byte) {
if v == nil {
e.EncodeNil()
return
}
l := uint64(len(v)) l := uint64(len(v))
e.encBytesLen(c, l) e.encBytesLen(c, l)
if l > 0 { if l > 0 {
@ -289,7 +286,7 @@ func (e *bincEncDriver) EncodeStringBytes(c charEncoding, v []byte) {
func (e *bincEncDriver) encBytesLen(c charEncoding, length uint64) { func (e *bincEncDriver) encBytesLen(c charEncoding, length uint64) {
//TODO: support bincUnicodeOther (for now, just use string or bytearray) //TODO: support bincUnicodeOther (for now, just use string or bytearray)
if c == cRAW { if c == c_RAW {
e.encLen(bincVdByteArray<<4, length) e.encLen(bincVdByteArray<<4, length)
} else { } else {
e.encLen(bincVdString<<4, length) e.encLen(bincVdString<<4, length)
@ -328,9 +325,6 @@ type bincDecSymbol struct {
} }
type bincDecDriver struct { type bincDecDriver struct {
decDriverNoopContainerReader
noBuiltInTypes
d *Decoder d *Decoder
h *BincHandle h *BincHandle
r decReader r decReader
@ -339,15 +333,14 @@ type bincDecDriver struct {
bd byte bd byte
vd byte vd byte
vs byte vs byte
_ [3]byte // padding // noStreamingCodec
// decNoSeparator
b [scratchByteArrayLen]byte
// linear searching on this slice is ok, // linear searching on this slice is ok,
// because we typically expect < 32 symbols in each stream. // because we typically expect < 32 symbols in each stream.
s []bincDecSymbol s []bincDecSymbol
decDriverNoopContainerReader
// noStreamingCodec
// decNoSeparator
b [8 * 8]byte // scratch
} }
func (d *bincDecDriver) readNextBd() { func (d *bincDecDriver) readNextBd() {
@ -378,10 +371,9 @@ func (d *bincDecDriver) ContainerType() (vt valueType) {
return valueTypeArray return valueTypeArray
} else if d.vd == bincVdMap { } else if d.vd == bincVdMap {
return valueTypeMap return valueTypeMap
} else {
// d.d.errorf("isContainerType: unsupported parameter: %v", vt)
} }
// else {
// d.d.errorf("isContainerType: unsupported parameter: %v", vt)
// }
return valueTypeUnset return valueTypeUnset
} }
@ -396,24 +388,27 @@ func (d *bincDecDriver) TryDecodeAsNil() bool {
return false return false
} }
func (d *bincDecDriver) DecodeTime() (t time.Time) { func (d *bincDecDriver) IsBuiltinType(rt uintptr) bool {
return rt == timeTypId
}
func (d *bincDecDriver) DecodeBuiltin(rt uintptr, v interface{}) {
if !d.bdRead { if !d.bdRead {
d.readNextBd() d.readNextBd()
} }
if d.bd == bincVdSpecial<<4|bincSpNil { if rt == timeTypId {
if d.vd != bincVdTimestamp {
d.d.errorf("Invalid d.vd. Expecting 0x%x. Received: 0x%x", bincVdTimestamp, d.vd)
return
}
tt, err := decodeTime(d.r.readx(int(d.vs)))
if err != nil {
panic(err)
}
var vt *time.Time = v.(*time.Time)
*vt = tt
d.bdRead = false d.bdRead = false
return
} }
if d.vd != bincVdTimestamp {
d.d.errorf("Invalid d.vd. Expecting 0x%x. Received: 0x%x", bincVdTimestamp, d.vd)
return
}
t, err := bincDecodeTime(d.r.readx(int(d.vs)))
if err != nil {
panic(err)
}
d.bdRead = false
return
} }
func (d *bincDecDriver) decFloatPre(vs, defaultLen byte) { func (d *bincDecDriver) decFloatPre(vs, defaultLen byte) {
@ -502,33 +497,45 @@ func (d *bincDecDriver) decCheckInteger() (ui uint64, neg bool) {
return return
} }
} else { } else {
d.d.errorf("integer can only be decoded from int/uint. d.bd: 0x%x, d.vd: 0x%x", d.bd, d.vd) d.d.errorf("number can only be decoded from uint or int values. d.bd: 0x%x, d.vd: 0x%x", d.bd, d.vd)
return return
} }
return return
} }
func (d *bincDecDriver) DecodeInt64() (i int64) { func (d *bincDecDriver) DecodeInt(bitsize uint8) (i int64) {
ui, neg := d.decCheckInteger() ui, neg := d.decCheckInteger()
i = chkOvf.SignedIntV(ui) i, overflow := chkOvf.SignedInt(ui)
if overflow {
d.d.errorf("simple: overflow converting %v to signed integer", ui)
return
}
if neg { if neg {
i = -i i = -i
} }
if chkOvf.Int(i, bitsize) {
d.d.errorf("binc: overflow integer: %v for num bits: %v", i, bitsize)
return
}
d.bdRead = false d.bdRead = false
return return
} }
func (d *bincDecDriver) DecodeUint64() (ui uint64) { func (d *bincDecDriver) DecodeUint(bitsize uint8) (ui uint64) {
ui, neg := d.decCheckInteger() ui, neg := d.decCheckInteger()
if neg { if neg {
d.d.errorf("Assigning negative signed value to unsigned type") d.d.errorf("Assigning negative signed value to unsigned type")
return return
} }
if chkOvf.Uint(ui, bitsize) {
d.d.errorf("binc: overflow integer: %v", ui)
return
}
d.bdRead = false d.bdRead = false
return return
} }
func (d *bincDecDriver) DecodeFloat64() (f float64) { func (d *bincDecDriver) DecodeFloat(chkOverflow32 bool) (f float64) {
if !d.bdRead { if !d.bdRead {
d.readNextBd() d.readNextBd()
} }
@ -550,7 +557,11 @@ func (d *bincDecDriver) DecodeFloat64() (f float64) {
} else if vd == bincVdFloat { } else if vd == bincVdFloat {
f = d.decFloat() f = d.decFloat()
} else { } else {
f = float64(d.DecodeInt64()) f = float64(d.DecodeInt(64))
}
if chkOverflow32 && chkOvf.Float32(f) {
d.d.errorf("binc: float32 overflow: %v", f)
return
} }
d.bdRead = false d.bdRead = false
return return
@ -622,8 +633,7 @@ func (d *bincDecDriver) decLenNumber() (v uint64) {
return return
} }
func (d *bincDecDriver) decStringAndBytes(bs []byte, withString, zerocopy bool) ( func (d *bincDecDriver) decStringAndBytes(bs []byte, withString, zerocopy bool) (bs2 []byte, s string) {
bs2 []byte, s string) {
if !d.bdRead { if !d.bdRead {
d.readNextBd() d.readNextBd()
} }
@ -631,7 +641,7 @@ func (d *bincDecDriver) decStringAndBytes(bs []byte, withString, zerocopy bool)
d.bdRead = false d.bdRead = false
return return
} }
var slen = -1 var slen int = -1
// var ok bool // var ok bool
switch d.vd { switch d.vd {
case bincVdString, bincVdByteArray: case bincVdString, bincVdByteArray:
@ -733,11 +743,6 @@ func (d *bincDecDriver) DecodeBytes(bs []byte, zerocopy bool) (bsOut []byte) {
d.bdRead = false d.bdRead = false
return nil return nil
} }
// check if an "array" of uint8's (see ContainerType for how to infer if an array)
if d.vd == bincVdArray {
bsOut, _ = fastpathTV.DecSliceUint8V(bs, true, d.d)
return
}
var clen int var clen int
if d.vd == bincVdString || d.vd == bincVdByteArray { if d.vd == bincVdString || d.vd == bincVdByteArray {
clen = d.decLen() clen = d.decLen()
@ -858,8 +863,8 @@ func (d *bincDecDriver) DecodeNaked() {
n.v = valueTypeBytes n.v = valueTypeBytes
n.l = d.DecodeBytes(nil, false) n.l = d.DecodeBytes(nil, false)
case bincVdTimestamp: case bincVdTimestamp:
n.v = valueTypeTime n.v = valueTypeTimestamp
tt, err := bincDecodeTime(d.r.readx(int(d.vs))) tt, err := decodeTime(d.r.readx(int(d.vs)))
if err != nil { if err != nil {
panic(err) panic(err)
} }
@ -907,50 +912,27 @@ type BincHandle struct {
BasicHandle BasicHandle
binaryEncodingType binaryEncodingType
noElemSeparators noElemSeparators
// AsSymbols defines what should be encoded as symbols.
//
// Encoding as symbols can reduce the encoded size significantly.
//
// However, during decoding, each string to be encoded as a symbol must
// be checked to see if it has been seen before. Consequently, encoding time
// will increase if using symbols, because string comparisons has a clear cost.
//
// Values:
// - 0: default: library uses best judgement
// - 1: use symbols
// - 2: do not use symbols
AsSymbols uint8
// AsSymbols: may later on introduce more options ...
// - m: map keys
// - s: struct fields
// - n: none
// - a: all: same as m, s, ...
_ [1]uint64 // padding
} }
// Name returns the name of the handle: binc
func (h *BincHandle) Name() string { return "binc" }
// SetBytesExt sets an extension
func (h *BincHandle) SetBytesExt(rt reflect.Type, tag uint64, ext BytesExt) (err error) { func (h *BincHandle) SetBytesExt(rt reflect.Type, tag uint64, ext BytesExt) (err error) {
return h.SetExt(rt, tag, &extWrapper{ext, interfaceExtFailer{}}) return h.SetExt(rt, tag, &setExtWrapper{b: ext})
} }
func (h *BincHandle) newEncDriver(e *Encoder) encDriver { func (h *BincHandle) newEncDriver(e *Encoder) encDriver {
return &bincEncDriver{e: e, h: h, w: e.w} return &bincEncDriver{e: e, w: e.w}
} }
func (h *BincHandle) newDecDriver(d *Decoder) decDriver { func (h *BincHandle) newDecDriver(d *Decoder) decDriver {
return &bincDecDriver{d: d, h: h, r: d.r, br: d.bytes} return &bincDecDriver{d: d, h: h, r: d.r, br: d.bytes}
} }
func (_ *BincHandle) IsBuiltinType(rt uintptr) bool {
return rt == timeTypId
}
func (e *bincEncDriver) reset() { func (e *bincEncDriver) reset() {
e.w = e.e.w e.w = e.e.w
e.s = 0 e.s = 0
e.c = 0
e.m = nil e.m = nil
} }
@ -960,165 +942,5 @@ func (d *bincDecDriver) reset() {
d.bd, d.bdRead, d.vd, d.vs = 0, false, 0, 0 d.bd, d.bdRead, d.vd, d.vs = 0, false, 0, 0
} }
// var timeDigits = [...]byte{'0', '1', '2', '3', '4', '5', '6', '7', '8', '9'}
// EncodeTime encodes a time.Time as a []byte, including
// information on the instant in time and UTC offset.
//
// Format Description
//
// A timestamp is composed of 3 components:
//
// - secs: signed integer representing seconds since unix epoch
// - nsces: unsigned integer representing fractional seconds as a
// nanosecond offset within secs, in the range 0 <= nsecs < 1e9
// - tz: signed integer representing timezone offset in minutes east of UTC,
// and a dst (daylight savings time) flag
//
// When encoding a timestamp, the first byte is the descriptor, which
// defines which components are encoded and how many bytes are used to
// encode secs and nsecs components. *If secs/nsecs is 0 or tz is UTC, it
// is not encoded in the byte array explicitly*.
//
// Descriptor 8 bits are of the form `A B C DDD EE`:
// A: Is secs component encoded? 1 = true
// B: Is nsecs component encoded? 1 = true
// C: Is tz component encoded? 1 = true
// DDD: Number of extra bytes for secs (range 0-7).
// If A = 1, secs encoded in DDD+1 bytes.
// If A = 0, secs is not encoded, and is assumed to be 0.
// If A = 1, then we need at least 1 byte to encode secs.
// DDD says the number of extra bytes beyond that 1.
// E.g. if DDD=0, then secs is represented in 1 byte.
// if DDD=2, then secs is represented in 3 bytes.
// EE: Number of extra bytes for nsecs (range 0-3).
// If B = 1, nsecs encoded in EE+1 bytes (similar to secs/DDD above)
//
// Following the descriptor bytes, subsequent bytes are:
//
// secs component encoded in `DDD + 1` bytes (if A == 1)
// nsecs component encoded in `EE + 1` bytes (if B == 1)
// tz component encoded in 2 bytes (if C == 1)
//
// secs and nsecs components are integers encoded in a BigEndian
// 2-complement encoding format.
//
// tz component is encoded as 2 bytes (16 bits). Most significant bit 15 to
// Least significant bit 0 are described below:
//
// Timezone offset has a range of -12:00 to +14:00 (ie -720 to +840 minutes).
// Bit 15 = have\_dst: set to 1 if we set the dst flag.
// Bit 14 = dst\_on: set to 1 if dst is in effect at the time, or 0 if not.
// Bits 13..0 = timezone offset in minutes. It is a signed integer in Big Endian format.
//
func bincEncodeTime(t time.Time) []byte {
//t := rv.Interface().(time.Time)
tsecs, tnsecs := t.Unix(), t.Nanosecond()
var (
bd byte
btmp [8]byte
bs [16]byte
i int = 1
)
l := t.Location()
if l == time.UTC {
l = nil
}
if tsecs != 0 {
bd = bd | 0x80
bigen.PutUint64(btmp[:], uint64(tsecs))
f := pruneSignExt(btmp[:], tsecs >= 0)
bd = bd | (byte(7-f) << 2)
copy(bs[i:], btmp[f:])
i = i + (8 - f)
}
if tnsecs != 0 {
bd = bd | 0x40
bigen.PutUint32(btmp[:4], uint32(tnsecs))
f := pruneSignExt(btmp[:4], true)
bd = bd | byte(3-f)
copy(bs[i:], btmp[f:4])
i = i + (4 - f)
}
if l != nil {
bd = bd | 0x20
// Note that Go Libs do not give access to dst flag.
_, zoneOffset := t.Zone()
//zoneName, zoneOffset := t.Zone()
zoneOffset /= 60
z := uint16(zoneOffset)
bigen.PutUint16(btmp[:2], z)
// clear dst flags
bs[i] = btmp[0] & 0x3f
bs[i+1] = btmp[1]
i = i + 2
}
bs[0] = bd
return bs[0:i]
}
// bincDecodeTime decodes a []byte into a time.Time.
func bincDecodeTime(bs []byte) (tt time.Time, err error) {
bd := bs[0]
var (
tsec int64
tnsec uint32
tz uint16
i byte = 1
i2 byte
n byte
)
if bd&(1<<7) != 0 {
var btmp [8]byte
n = ((bd >> 2) & 0x7) + 1
i2 = i + n
copy(btmp[8-n:], bs[i:i2])
//if first bit of bs[i] is set, then fill btmp[0..8-n] with 0xff (ie sign extend it)
if bs[i]&(1<<7) != 0 {
copy(btmp[0:8-n], bsAll0xff)
//for j,k := byte(0), 8-n; j < k; j++ { btmp[j] = 0xff }
}
i = i2
tsec = int64(bigen.Uint64(btmp[:]))
}
if bd&(1<<6) != 0 {
var btmp [4]byte
n = (bd & 0x3) + 1
i2 = i + n
copy(btmp[4-n:], bs[i:i2])
i = i2
tnsec = bigen.Uint32(btmp[:])
}
if bd&(1<<5) == 0 {
tt = time.Unix(tsec, int64(tnsec)).UTC()
return
}
// In stdlib time.Parse, when a date is parsed without a zone name, it uses "" as zone name.
// However, we need name here, so it can be shown when time is printed.
// Zone name is in form: UTC-08:00.
// Note that Go Libs do not give access to dst flag, so we ignore dst bits
i2 = i + 2
tz = bigen.Uint16(bs[i:i2])
// i = i2
// sign extend sign bit into top 2 MSB (which were dst bits):
if tz&(1<<13) == 0 { // positive
tz = tz & 0x3fff //clear 2 MSBs: dst bits
} else { // negative
tz = tz | 0xc000 //set 2 MSBs: dst bits
}
tzint := int16(tz)
if tzint == 0 {
tt = time.Unix(tsec, int64(tnsec)).UTC()
} else {
// For Go Time, do not use a descriptive timezone.
// It's unnecessary, and makes it harder to do a reflect.DeepEqual.
// The Offset already tells what the offset should be, if not on UTC and unknown zone name.
// var zoneName = timeLocUTCName(tzint)
tt = time.Unix(tsec, int64(tnsec)).In(time.FixedZone("", int(tzint)*60))
}
return
}
var _ decDriver = (*bincDecDriver)(nil) var _ decDriver = (*bincDecDriver)(nil)
var _ encDriver = (*bincEncDriver)(nil) var _ encDriver = (*bincEncDriver)(nil)

View File

@ -1,4 +1,4 @@
// Copyright (c) 2012-2018 Ugorji Nwoke. All rights reserved. // Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved.
// Use of this source code is governed by a MIT license found in the LICENSE file. // Use of this source code is governed by a MIT license found in the LICENSE file.
package codec package codec
@ -6,7 +6,6 @@ package codec
import ( import (
"math" "math"
"reflect" "reflect"
"time"
) )
const ( const (
@ -39,8 +38,6 @@ const (
cborBdBreak = 0xff cborBdBreak = 0xff
) )
// These define some in-stream descriptors for
// manual encoding e.g. when doing explicit indefinite-length
const ( const (
CborStreamBytes byte = 0x5f CborStreamBytes byte = 0x5f
CborStreamString = 0x7f CborStreamString = 0x7f
@ -70,7 +67,6 @@ type cborEncDriver struct {
w encWriter w encWriter
h *CborHandle h *CborHandle
x [8]byte x [8]byte
_ [3]uint64 // padding
} }
func (e *cborEncDriver) EncodeNil() { func (e *cborEncDriver) EncodeNil() {
@ -128,24 +124,6 @@ func (e *cborEncDriver) encLen(bd byte, length int) {
e.encUint(uint64(length), bd) e.encUint(uint64(length), bd)
} }
func (e *cborEncDriver) EncodeTime(t time.Time) {
if t.IsZero() {
e.EncodeNil()
} else if e.h.TimeRFC3339 {
e.encUint(0, cborBaseTag)
e.EncodeString(cUTF8, t.Format(time.RFC3339Nano))
} else {
e.encUint(1, cborBaseTag)
t = t.UTC().Round(time.Microsecond)
sec, nsec := t.Unix(), uint64(t.Nanosecond())
if nsec == 0 {
e.EncodeInt(sec)
} else {
e.EncodeFloat64(float64(sec) + float64(nsec)/1e9)
}
}
}
func (e *cborEncDriver) EncodeExt(rv interface{}, xtag uint64, ext Ext, en *Encoder) { func (e *cborEncDriver) EncodeExt(rv interface{}, xtag uint64, ext Ext, en *Encoder) {
e.encUint(uint64(xtag), cborBaseTag) e.encUint(uint64(xtag), cborBaseTag)
if v := ext.ConvertExt(rv); v == nil { if v := ext.ConvertExt(rv); v == nil {
@ -195,65 +173,36 @@ func (e *cborEncDriver) WriteArrayEnd() {
} }
func (e *cborEncDriver) EncodeString(c charEncoding, v string) { func (e *cborEncDriver) EncodeString(c charEncoding, v string) {
e.encStringBytesS(cborBaseString, v) e.encLen(cborBaseString, len(v))
e.w.writestr(v)
}
func (e *cborEncDriver) EncodeSymbol(v string) {
e.EncodeString(c_UTF8, v)
} }
func (e *cborEncDriver) EncodeStringBytes(c charEncoding, v []byte) { func (e *cborEncDriver) EncodeStringBytes(c charEncoding, v []byte) {
if v == nil { if c == c_RAW {
e.EncodeNil() e.encLen(cborBaseBytes, len(v))
} else if c == cRAW {
e.encStringBytesS(cborBaseBytes, stringView(v))
} else { } else {
e.encStringBytesS(cborBaseString, stringView(v)) e.encLen(cborBaseString, len(v))
}
}
func (e *cborEncDriver) encStringBytesS(bb byte, v string) {
if e.h.IndefiniteLength {
if bb == cborBaseBytes {
e.w.writen1(cborBdIndefiniteBytes)
} else {
e.w.writen1(cborBdIndefiniteString)
}
blen := len(v) / 4
if blen == 0 {
blen = 64
} else if blen > 1024 {
blen = 1024
}
for i := 0; i < len(v); {
var v2 string
i2 := i + blen
if i2 < len(v) {
v2 = v[i:i2]
} else {
v2 = v[i:]
}
e.encLen(bb, len(v2))
e.w.writestr(v2)
i = i2
}
e.w.writen1(cborBdBreak)
} else {
e.encLen(bb, len(v))
e.w.writestr(v)
} }
e.w.writeb(v)
} }
// ---------------------- // ----------------------
type cborDecDriver struct { type cborDecDriver struct {
d *Decoder d *Decoder
h *CborHandle h *CborHandle
r decReader r decReader
// b [scratchByteArrayLen]byte b [scratchByteArrayLen]byte
br bool // bytes reader br bool // bytes reader
bdRead bool bdRead bool
bd byte bd byte
noBuiltInTypes noBuiltInTypes
// decNoSeparator // decNoSeparator
decDriverNoopContainerReader decDriverNoopContainerReader
_ [3]uint64 // padding
} }
func (d *cborDecDriver) readNextBd() { func (d *cborDecDriver) readNextBd() {
@ -282,10 +231,9 @@ func (d *cborDecDriver) ContainerType() (vt valueType) {
return valueTypeArray return valueTypeArray
} else if d.bd == cborBdIndefiniteMap || (d.bd >= cborBaseMap && d.bd < cborBaseTag) { } else if d.bd == cborBdIndefiniteMap || (d.bd >= cborBaseMap && d.bd < cborBaseTag) {
return valueTypeMap return valueTypeMap
} else {
// d.d.errorf("isContainerType: unsupported parameter: %v", vt)
} }
// else {
// d.d.errorf("isContainerType: unsupported parameter: %v", vt)
// }
return valueTypeUnset return valueTypeUnset
} }
@ -348,30 +296,46 @@ func (d *cborDecDriver) decCheckInteger() (neg bool) {
return return
} }
func (d *cborDecDriver) DecodeInt64() (i int64) { func (d *cborDecDriver) DecodeInt(bitsize uint8) (i int64) {
neg := d.decCheckInteger() neg := d.decCheckInteger()
ui := d.decUint() ui := d.decUint()
// check if this number can be converted to an int without overflow // check if this number can be converted to an int without overflow
var overflow bool
if neg { if neg {
i = -(chkOvf.SignedIntV(ui + 1)) if i, overflow = chkOvf.SignedInt(ui + 1); overflow {
d.d.errorf("cbor: overflow converting %v to signed integer", ui+1)
return
}
i = -i
} else { } else {
i = chkOvf.SignedIntV(ui) if i, overflow = chkOvf.SignedInt(ui); overflow {
d.d.errorf("cbor: overflow converting %v to signed integer", ui)
return
}
}
if chkOvf.Int(i, bitsize) {
d.d.errorf("cbor: overflow integer: %v", i)
return
} }
d.bdRead = false d.bdRead = false
return return
} }
func (d *cborDecDriver) DecodeUint64() (ui uint64) { func (d *cborDecDriver) DecodeUint(bitsize uint8) (ui uint64) {
if d.decCheckInteger() { if d.decCheckInteger() {
d.d.errorf("Assigning negative signed value to unsigned type") d.d.errorf("Assigning negative signed value to unsigned type")
return return
} }
ui = d.decUint() ui = d.decUint()
if chkOvf.Uint(ui, bitsize) {
d.d.errorf("cbor: overflow integer: %v", ui)
return
}
d.bdRead = false d.bdRead = false
return return
} }
func (d *cborDecDriver) DecodeFloat64() (f float64) { func (d *cborDecDriver) DecodeFloat(chkOverflow32 bool) (f float64) {
if !d.bdRead { if !d.bdRead {
d.readNextBd() d.readNextBd()
} }
@ -382,11 +346,15 @@ func (d *cborDecDriver) DecodeFloat64() (f float64) {
} else if bd == cborBdFloat64 { } else if bd == cborBdFloat64 {
f = math.Float64frombits(bigen.Uint64(d.r.readx(8))) f = math.Float64frombits(bigen.Uint64(d.r.readx(8)))
} else if bd >= cborBaseUint && bd < cborBaseBytes { } else if bd >= cborBaseUint && bd < cborBaseBytes {
f = float64(d.DecodeInt64()) f = float64(d.DecodeInt(64))
} else { } else {
d.d.errorf("Float only valid from float16/32/64: Invalid descriptor: %v", bd) d.d.errorf("Float only valid from float16/32/64: Invalid descriptor: %v", bd)
return return
} }
if chkOverflow32 && chkOvf.Float32(f) {
d.d.errorf("cbor: float32 overflow: %v", f)
return
}
d.bdRead = false d.bdRead = false
return return
} }
@ -440,8 +408,7 @@ func (d *cborDecDriver) decAppendIndefiniteBytes(bs []byte) []byte {
break break
} }
if major := d.bd >> 5; major != cborMajorBytes && major != cborMajorText { if major := d.bd >> 5; major != cborMajorBytes && major != cborMajorText {
d.d.errorf("expect bytes/string major type in indefinite string/bytes;"+ d.d.errorf("cbor: expect bytes or string major type in indefinite string/bytes; got: %v, byte: %v", major, d.bd)
" got: %v, byte: %v", major, d.bd)
return nil return nil
} }
n := d.decLen() n := d.decLen()
@ -471,84 +438,29 @@ func (d *cborDecDriver) DecodeBytes(bs []byte, zerocopy bool) (bsOut []byte) {
return nil return nil
} }
if d.bd == cborBdIndefiniteBytes || d.bd == cborBdIndefiniteString { if d.bd == cborBdIndefiniteBytes || d.bd == cborBdIndefiniteString {
d.bdRead = false
if bs == nil { if bs == nil {
if zerocopy { return d.decAppendIndefiniteBytes(nil)
return d.decAppendIndefiniteBytes(d.d.b[:0])
}
return d.decAppendIndefiniteBytes(zeroByteSlice)
} }
return d.decAppendIndefiniteBytes(bs[:0]) return d.decAppendIndefiniteBytes(bs[:0])
} }
// check if an "array" of uint8's (see ContainerType for how to infer if an array)
if d.bd == cborBdIndefiniteArray || (d.bd >= cborBaseArray && d.bd < cborBaseMap) {
bsOut, _ = fastpathTV.DecSliceUint8V(bs, true, d.d)
return
}
clen := d.decLen() clen := d.decLen()
d.bdRead = false d.bdRead = false
if zerocopy { if zerocopy {
if d.br { if d.br {
return d.r.readx(clen) return d.r.readx(clen)
} else if len(bs) == 0 { } else if len(bs) == 0 {
bs = d.d.b[:] bs = d.b[:]
} }
} }
return decByteSlice(d.r, clen, d.h.MaxInitLen, bs) return decByteSlice(d.r, clen, d.d.h.MaxInitLen, bs)
} }
func (d *cborDecDriver) DecodeString() (s string) { func (d *cborDecDriver) DecodeString() (s string) {
return string(d.DecodeBytes(d.d.b[:], true)) return string(d.DecodeBytes(d.b[:], true))
} }
func (d *cborDecDriver) DecodeStringAsBytes() (s []byte) { func (d *cborDecDriver) DecodeStringAsBytes() (s []byte) {
return d.DecodeBytes(d.d.b[:], true) return d.DecodeBytes(d.b[:], true)
}
func (d *cborDecDriver) DecodeTime() (t time.Time) {
if !d.bdRead {
d.readNextBd()
}
if d.bd == cborBdNil || d.bd == cborBdUndefined {
d.bdRead = false
return
}
xtag := d.decUint()
d.bdRead = false
return d.decodeTime(xtag)
}
func (d *cborDecDriver) decodeTime(xtag uint64) (t time.Time) {
if !d.bdRead {
d.readNextBd()
}
switch xtag {
case 0:
var err error
if t, err = time.Parse(time.RFC3339, stringView(d.DecodeStringAsBytes())); err != nil {
d.d.errorv(err)
}
case 1:
// decode an int64 or a float, and infer time.Time from there.
// for floats, round to microseconds, as that is what is guaranteed to fit well.
switch {
case d.bd == cborBdFloat16, d.bd == cborBdFloat32:
f1, f2 := math.Modf(d.DecodeFloat64())
t = time.Unix(int64(f1), int64(f2*1e9))
case d.bd == cborBdFloat64:
f1, f2 := math.Modf(d.DecodeFloat64())
t = time.Unix(int64(f1), int64(f2*1e9))
case d.bd >= cborBaseUint && d.bd < cborBaseNegInt,
d.bd >= cborBaseNegInt && d.bd < cborBaseBytes:
t = time.Unix(d.DecodeInt64(), 0)
default:
d.d.errorf("time.Time can only be decoded from a number (or RFC3339 string)")
}
default:
d.d.errorf("invalid tag for time.Time - expecting 0 or 1, got 0x%x", xtag)
}
t = t.UTC().Round(time.Microsecond)
return
} }
func (d *cborDecDriver) DecodeExt(rv interface{}, xtag uint64, ext Ext) (realxtag uint64) { func (d *cborDecDriver) DecodeExt(rv interface{}, xtag uint64, ext Ext) (realxtag uint64) {
@ -591,9 +503,12 @@ func (d *cborDecDriver) DecodeNaked() {
case cborBdTrue: case cborBdTrue:
n.v = valueTypeBool n.v = valueTypeBool
n.b = true n.b = true
case cborBdFloat16, cborBdFloat32, cborBdFloat64: case cborBdFloat16, cborBdFloat32:
n.v = valueTypeFloat n.v = valueTypeFloat
n.f = d.DecodeFloat64() n.f = d.DecodeFloat(true)
case cborBdFloat64:
n.v = valueTypeFloat
n.f = d.DecodeFloat(false)
case cborBdIndefiniteBytes: case cborBdIndefiniteBytes:
n.v = valueTypeBytes n.v = valueTypeBytes
n.l = d.DecodeBytes(nil, false) n.l = d.DecodeBytes(nil, false)
@ -611,14 +526,14 @@ func (d *cborDecDriver) DecodeNaked() {
case d.bd >= cborBaseUint && d.bd < cborBaseNegInt: case d.bd >= cborBaseUint && d.bd < cborBaseNegInt:
if d.h.SignedInteger { if d.h.SignedInteger {
n.v = valueTypeInt n.v = valueTypeInt
n.i = d.DecodeInt64() n.i = d.DecodeInt(64)
} else { } else {
n.v = valueTypeUint n.v = valueTypeUint
n.u = d.DecodeUint64() n.u = d.DecodeUint(64)
} }
case d.bd >= cborBaseNegInt && d.bd < cborBaseBytes: case d.bd >= cborBaseNegInt && d.bd < cborBaseBytes:
n.v = valueTypeInt n.v = valueTypeInt
n.i = d.DecodeInt64() n.i = d.DecodeInt(64)
case d.bd >= cborBaseBytes && d.bd < cborBaseString: case d.bd >= cborBaseBytes && d.bd < cborBaseString:
n.v = valueTypeBytes n.v = valueTypeBytes
n.l = d.DecodeBytes(nil, false) n.l = d.DecodeBytes(nil, false)
@ -635,11 +550,6 @@ func (d *cborDecDriver) DecodeNaked() {
n.v = valueTypeExt n.v = valueTypeExt
n.u = d.decUint() n.u = d.decUint()
n.l = nil n.l = nil
if n.u == 0 || n.u == 1 {
d.bdRead = false
n.v = valueTypeTime
n.t = d.decodeTime(n.u)
}
// d.bdRead = false // d.bdRead = false
// d.d.decode(&re.Value) // handled by decode itself. // d.d.decode(&re.Value) // handled by decode itself.
// decodeFurther = true // decodeFurther = true
@ -670,8 +580,23 @@ func (d *cborDecDriver) DecodeNaked() {
// //
// None of the optional extensions (with tags) defined in the spec are supported out-of-the-box. // None of the optional extensions (with tags) defined in the spec are supported out-of-the-box.
// Users can implement them as needed (using SetExt), including spec-documented ones: // Users can implement them as needed (using SetExt), including spec-documented ones:
// - timestamp, BigNum, BigFloat, Decimals, // - timestamp, BigNum, BigFloat, Decimals, Encoded Text (e.g. URL, regexp, base64, MIME Message), etc.
// - Encoded Text (e.g. URL, regexp, base64, MIME Message), etc. //
// To encode with indefinite lengths (streaming), users will use
// (Must)Encode methods of *Encoder, along with writing CborStreamXXX constants.
//
// For example, to encode "one-byte" as an indefinite length string:
// var buf bytes.Buffer
// e := NewEncoder(&buf, new(CborHandle))
// buf.WriteByte(CborStreamString)
// e.MustEncode("one-")
// e.MustEncode("byte")
// buf.WriteByte(CborStreamBreak)
// encodedBytes := buf.Bytes()
// var vv interface{}
// NewDecoderBytes(buf.Bytes(), new(CborHandle)).MustDecode(&vv)
// // Now, vv contains the same string "one-byte"
//
type CborHandle struct { type CborHandle struct {
binaryEncodingType binaryEncodingType
noElemSeparators noElemSeparators
@ -679,20 +604,10 @@ type CborHandle struct {
// IndefiniteLength=true, means that we encode using indefinitelength // IndefiniteLength=true, means that we encode using indefinitelength
IndefiniteLength bool IndefiniteLength bool
// TimeRFC3339 says to encode time.Time using RFC3339 format.
// If unset, we encode time.Time using seconds past epoch.
TimeRFC3339 bool
_ [1]uint64 // padding
} }
// Name returns the name of the handle: cbor
func (h *CborHandle) Name() string { return "cbor" }
// SetInterfaceExt sets an extension
func (h *CborHandle) SetInterfaceExt(rt reflect.Type, tag uint64, ext InterfaceExt) (err error) { func (h *CborHandle) SetInterfaceExt(rt reflect.Type, tag uint64, ext InterfaceExt) (err error) {
return h.SetExt(rt, tag, &extWrapper{bytesExtFailer{}, ext}) return h.SetExt(rt, tag, &setExtWrapper{i: ext})
} }
func (h *CborHandle) newEncDriver(e *Encoder) encDriver { func (h *CborHandle) newEncDriver(e *Encoder) encDriver {

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -3,7 +3,10 @@
// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved. // Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved.
// Use of this source code is governed by a MIT license found in the LICENSE file. // Use of this source code is governed by a MIT license found in the LICENSE file.
// Code generated from fast-path.go.tmpl - DO NOT EDIT. // ************************************************************
// DO NOT EDIT.
// THIS FILE IS AUTO-GENERATED from fast-path.go.tmpl
// ************************************************************
package codec package codec
@ -15,19 +18,19 @@ package codec
// This file can be omitted without causing a build failure. // This file can be omitted without causing a build failure.
// //
// The advantage of fast paths is: // The advantage of fast paths is:
// - Many calls bypass reflection altogether // - Many calls bypass reflection altogether
// //
// Currently support // Currently support
// - slice of all builtin types, // - slice of all builtin types,
// - map of all builtin types to string or interface value // - map of all builtin types to string or interface value
// - symmetrical maps of all builtin types (e.g. str-str, uint8-uint8) // - symmetrical maps of all builtin types (e.g. str-str, uint8-uint8)
// This should provide adequate "typical" implementations. // This should provide adequate "typical" implementations.
// //
// Note that fast track decode functions must handle values for which an address cannot be obtained. // Note that fast track decode functions must handle values for which an address cannot be obtained.
// For example: // For example:
// m2 := map[string]int{} // m2 := map[string]int{}
// p2 := []interface{}{m2} // p2 := []interface{}{m2}
// // decoding into p2 will bomb if fast track functions do not treat like unaddressable. // // decoding into p2 will bomb if fast track functions do not treat like unaddressable.
// //
import ( import (
@ -83,13 +86,17 @@ func init() {
fd func(*Decoder, *codecFnInfo, reflect.Value)) (f fastpathE) { fd func(*Decoder, *codecFnInfo, reflect.Value)) (f fastpathE) {
xrt := reflect.TypeOf(v) xrt := reflect.TypeOf(v)
xptr := rt2id(xrt) xptr := rt2id(xrt)
if useLookupRecognizedTypes {
recognizedRtids = append(recognizedRtids, xptr)
recognizedRtidPtrs = append(recognizedRtidPtrs, rt2id(reflect.PtrTo(xrt)))
}
fastpathAV[i] = fastpathE{xptr, xrt, fe, fd} fastpathAV[i] = fastpathE{xptr, xrt, fe, fd}
i++ i++
return return
} }
{{/* do not register []uint8 in fast-path */}}
{{range .Values}}{{if not .Primitive}}{{if not .MapKey }}{{if ne .Elem "uint8"}} {{range .Values}}{{if not .Primitive}}{{if not .MapKey }}
fn([]{{ .Elem }}(nil), (*Encoder).{{ .MethodNamePfx "fastpathEnc" false }}R, (*Decoder).{{ .MethodNamePfx "fastpathDec" false }}R){{end}}{{end}}{{end}}{{end}} fn([]{{ .Elem }}(nil), (*Encoder).{{ .MethodNamePfx "fastpathEnc" false }}R, (*Decoder).{{ .MethodNamePfx "fastpathDec" false }}R){{end}}{{end}}{{end}}
{{range .Values}}{{if not .Primitive}}{{if .MapKey }} {{range .Values}}{{if not .Primitive}}{{if .MapKey }}
fn(map[{{ .MapKey }}]{{ .Elem }}(nil), (*Encoder).{{ .MethodNamePfx "fastpathEnc" false }}R, (*Decoder).{{ .MethodNamePfx "fastpathDec" false }}R){{end}}{{end}}{{end}} fn(map[{{ .MapKey }}]{{ .Elem }}(nil), (*Encoder).{{ .MethodNamePfx "fastpathEnc" false }}R, (*Decoder).{{ .MethodNamePfx "fastpathDec" false }}R){{end}}{{end}}{{end}}
@ -102,37 +109,21 @@ func init() {
// -- -- fast path type switch // -- -- fast path type switch
func fastpathEncodeTypeSwitch(iv interface{}, e *Encoder) bool { func fastpathEncodeTypeSwitch(iv interface{}, e *Encoder) bool {
switch v := iv.(type) { switch v := iv.(type) {
{{range .Values}}{{if not .Primitive}}{{if not .MapKey }}
{{range .Values}}{{if not .Primitive}}{{if not .MapKey }}{{if ne .Elem "uint8"}} case []{{ .Elem }}:{{else}}
case []{{ .Elem }}: case map[{{ .MapKey }}]{{ .Elem }}:{{end}}
fastpathTV.{{ .MethodNamePfx "Enc" false }}V(v, e) fastpathTV.{{ .MethodNamePfx "Enc" false }}V(v, e){{if not .MapKey }}
case *[]{{ .Elem }}: case *[]{{ .Elem }}:{{else}}
fastpathTV.{{ .MethodNamePfx "Enc" false }}V(*v, e){{/* case *map[{{ .MapKey }}]{{ .Elem }}:{{end}}
*/}}{{end}}{{end}}{{end}}{{end}} fastpathTV.{{ .MethodNamePfx "Enc" false }}V(*v, e)
{{end}}{{end}}
{{range .Values}}{{if not .Primitive}}{{if .MapKey }}
case map[{{ .MapKey }}]{{ .Elem }}:
fastpathTV.{{ .MethodNamePfx "Enc" false }}V(v, e)
case *map[{{ .MapKey }}]{{ .Elem }}:
fastpathTV.{{ .MethodNamePfx "Enc" false }}V(*v, e){{/*
*/}}{{end}}{{end}}{{end}}
default: default:
_ = v // workaround https://github.com/golang/go/issues/12927 seen in go1.4 _ = v // TODO: workaround https://github.com/golang/go/issues/12927 (remove after go 1.6 release)
return false return false
} }
return true return true
} }
{{/*
**** removing this block, as they are never called directly ****
**** removing this block, as they are never called directly ****
func fastpathEncodeTypeSwitchSlice(iv interface{}, e *Encoder) bool { func fastpathEncodeTypeSwitchSlice(iv interface{}, e *Encoder) bool {
switch v := iv.(type) { switch v := iv.(type) {
{{range .Values}}{{if not .Primitive}}{{if not .MapKey }} {{range .Values}}{{if not .Primitive}}{{if not .MapKey }}
@ -142,7 +133,7 @@ func fastpathEncodeTypeSwitchSlice(iv interface{}, e *Encoder) bool {
fastpathTV.{{ .MethodNamePfx "Enc" false }}V(*v, e) fastpathTV.{{ .MethodNamePfx "Enc" false }}V(*v, e)
{{end}}{{end}}{{end}} {{end}}{{end}}{{end}}
default: default:
_ = v // workaround https://github.com/golang/go/issues/12927 seen in go1.4 _ = v // TODO: workaround https://github.com/golang/go/issues/12927 (remove after go 1.6 release)
return false return false
} }
return true return true
@ -157,23 +148,15 @@ func fastpathEncodeTypeSwitchMap(iv interface{}, e *Encoder) bool {
fastpathTV.{{ .MethodNamePfx "Enc" false }}V(*v, e) fastpathTV.{{ .MethodNamePfx "Enc" false }}V(*v, e)
{{end}}{{end}}{{end}} {{end}}{{end}}{{end}}
default: default:
_ = v // workaround https://github.com/golang/go/issues/12927 seen in go1.4 _ = v // TODO: workaround https://github.com/golang/go/issues/12927 (remove after go 1.6 release)
return false return false
} }
return true return true
} }
**** removing this block, as they are never called directly ****
**** removing this block, as they are never called directly ****
*/}}
// -- -- fast path functions // -- -- fast path functions
{{range .Values}}{{if not .Primitive}}{{if not .MapKey }} {{range .Values}}{{if not .Primitive}}{{if not .MapKey }}
func (e *Encoder) {{ .MethodNamePfx "fastpathEnc" false }}R(f *codecFnInfo, rv reflect.Value) { func (e *Encoder) {{ .MethodNamePfx "fastpathEnc" false }}R(f *codecFnInfo, rv reflect.Value) {
if f.ti.mbs { if f.ti.mbs {
fastpathTV.{{ .MethodNamePfx "EncAsMap" false }}V(rv2i(rv).([]{{ .Elem }}), e) fastpathTV.{{ .MethodNamePfx "EncAsMap" false }}V(rv2i(rv).([]{{ .Elem }}), e)
@ -182,25 +165,15 @@ func (e *Encoder) {{ .MethodNamePfx "fastpathEnc" false }}R(f *codecFnInfo, rv r
} }
} }
func (_ fastpathT) {{ .MethodNamePfx "Enc" false }}V(v []{{ .Elem }}, e *Encoder) { func (_ fastpathT) {{ .MethodNamePfx "Enc" false }}V(v []{{ .Elem }}, e *Encoder) {
if v == nil { e.e.EncodeNil(); return }
ee, esep := e.e, e.hh.hasElemSeparators() ee, esep := e.e, e.hh.hasElemSeparators()
ee.WriteArrayStart(len(v)) ee.WriteArrayStart(len(v))
if esep {
for _, v2 := range v {
ee.WriteArrayElem()
{{ encmd .Elem "v2"}}
}
} else {
for _, v2 := range v {
{{ encmd .Elem "v2"}}
}
} {{/*
for _, v2 := range v { for _, v2 := range v {
if esep { ee.WriteArrayElem() } if esep { ee.WriteArrayElem() }
{{ encmd .Elem "v2"}} {{ encmd .Elem "v2"}}
} */}} }
ee.WriteArrayEnd() ee.WriteArrayEnd()
} }
func (_ fastpathT) {{ .MethodNamePfx "EncAsMap" false }}V(v []{{ .Elem }}, e *Encoder) { func (_ fastpathT) {{ .MethodNamePfx "EncAsMap" false }}V(v []{{ .Elem }}, e *Encoder) {
ee, esep := e.e, e.hh.hasElemSeparators() ee, esep := e.e, e.hh.hasElemSeparators()
if len(v)%2 == 1 { if len(v)%2 == 1 {
@ -208,20 +181,6 @@ func (_ fastpathT) {{ .MethodNamePfx "EncAsMap" false }}V(v []{{ .Elem }}, e *En
return return
} }
ee.WriteMapStart(len(v) / 2) ee.WriteMapStart(len(v) / 2)
if esep {
for j, v2 := range v {
if j%2 == 0 {
ee.WriteMapElemKey()
} else {
ee.WriteMapElemValue()
}
{{ encmd .Elem "v2"}}
}
} else {
for _, v2 := range v {
{{ encmd .Elem "v2"}}
}
} {{/*
for j, v2 := range v { for j, v2 := range v {
if esep { if esep {
if j%2 == 0 { if j%2 == 0 {
@ -231,20 +190,22 @@ func (_ fastpathT) {{ .MethodNamePfx "EncAsMap" false }}V(v []{{ .Elem }}, e *En
} }
} }
{{ encmd .Elem "v2"}} {{ encmd .Elem "v2"}}
} */}} }
ee.WriteMapEnd() ee.WriteMapEnd()
} }
{{end}}{{end}}{{end}} {{end}}{{end}}{{end}}
{{range .Values}}{{if not .Primitive}}{{if .MapKey }} {{range .Values}}{{if not .Primitive}}{{if .MapKey }}
func (e *Encoder) {{ .MethodNamePfx "fastpathEnc" false }}R(f *codecFnInfo, rv reflect.Value) { func (e *Encoder) {{ .MethodNamePfx "fastpathEnc" false }}R(f *codecFnInfo, rv reflect.Value) {
fastpathTV.{{ .MethodNamePfx "Enc" false }}V(rv2i(rv).(map[{{ .MapKey }}]{{ .Elem }}), e) fastpathTV.{{ .MethodNamePfx "Enc" false }}V(rv2i(rv).(map[{{ .MapKey }}]{{ .Elem }}), e)
} }
func (_ fastpathT) {{ .MethodNamePfx "Enc" false }}V(v map[{{ .MapKey }}]{{ .Elem }}, e *Encoder) { func (_ fastpathT) {{ .MethodNamePfx "Enc" false }}V(v map[{{ .MapKey }}]{{ .Elem }}, e *Encoder) {
if v == nil { e.e.EncodeNil(); return }
ee, esep := e.e, e.hh.hasElemSeparators() ee, esep := e.e, e.hh.hasElemSeparators()
ee.WriteMapStart(len(v)) ee.WriteMapStart(len(v))
if e.h.Canonical { {{if eq .MapKey "string"}}asSymbols := e.h.AsSymbols&AsSymbolMapStringKeysFlag != 0
{{end}}if e.h.Canonical {
{{if eq .MapKey "interface{}"}}{{/* out of band {{if eq .MapKey "interface{}"}}{{/* out of band
*/}}var mksv []byte = make([]byte, 0, len(v)*16) // temporary byte slice for the encoding */}}var mksv []byte = make([]byte, 0, len(v)*16) // temporary byte slice for the encoding
e2 := NewEncoderBytes(&mksv, e.hh) e2 := NewEncoderBytes(&mksv, e.hh)
@ -260,126 +221,76 @@ func (_ fastpathT) {{ .MethodNamePfx "Enc" false }}V(v map[{{ .MapKey }}]{{ .Ele
i++ i++
} }
sort.Sort(bytesISlice(v2)) sort.Sort(bytesISlice(v2))
if esep {
for j := range v2 {
ee.WriteMapElemKey()
e.asis(v2[j].v)
ee.WriteMapElemValue()
e.encode(v[v2[j].i])
}
} else {
for j := range v2 {
e.asis(v2[j].v)
e.encode(v[v2[j].i])
}
} {{/*
for j := range v2 { for j := range v2 {
if esep { ee.WriteMapElemKey() } if esep { ee.WriteMapElemKey() }
e.asis(v2[j].v) e.asis(v2[j].v)
if esep { ee.WriteMapElemValue() } if esep { ee.WriteMapElemValue() }
e.encode(v[v2[j].i]) e.encode(v[v2[j].i])
} */}} {{else}}{{ $x := sorttype .MapKey true}}v2 := make([]{{ $x }}, len(v)) } {{else}}{{ $x := sorttype .MapKey true}}v2 := make([]{{ $x }}, len(v))
var i int var i int
for k, _ := range v { for k, _ := range v {
v2[i] = {{ $x }}(k) v2[i] = {{ $x }}(k)
i++ i++
} }
sort.Sort({{ sorttype .MapKey false}}(v2)) sort.Sort({{ sorttype .MapKey false}}(v2))
if esep {
for _, k2 := range v2 {
ee.WriteMapElemKey()
{{if eq .MapKey "string"}}ee.EncodeString(cUTF8, k2){{else}}{{ $y := printf "%s(k2)" .MapKey }}{{ encmd .MapKey $y }}{{end}}
ee.WriteMapElemValue()
{{ $y := printf "v[%s(k2)]" .MapKey }}{{ encmd .Elem $y }}
}
} else {
for _, k2 := range v2 {
{{if eq .MapKey "string"}}ee.EncodeString(cUTF8, k2){{else}}{{ $y := printf "%s(k2)" .MapKey }}{{ encmd .MapKey $y }}{{end}}
{{ $y := printf "v[%s(k2)]" .MapKey }}{{ encmd .Elem $y }}
}
} {{/*
for _, k2 := range v2 { for _, k2 := range v2 {
if esep { ee.WriteMapElemKey() } if esep { ee.WriteMapElemKey() }
{{if eq .MapKey "string"}}ee.EncodeString(cUTF8, k2){{else}}{{ $y := printf "%s(k2)" .MapKey }}{{ encmd .MapKey $y }}{{end}} {{if eq .MapKey "string"}}if asSymbols {
ee.EncodeSymbol(k2)
} else {
ee.EncodeString(c_UTF8, k2)
}{{else}}{{ $y := printf "%s(k2)" .MapKey }}{{ encmd .MapKey $y }}{{end}}
if esep { ee.WriteMapElemValue() } if esep { ee.WriteMapElemValue() }
{{ $y := printf "v[%s(k2)]" .MapKey }}{{ encmd .Elem $y }} {{ $y := printf "v[%s(k2)]" .MapKey }}{{ encmd .Elem $y }}
} */}} {{end}} } {{end}}
} else { } else {
if esep {
for k2, v2 := range v {
ee.WriteMapElemKey()
{{if eq .MapKey "string"}}ee.EncodeString(cUTF8, k2){{else}}{{ encmd .MapKey "k2"}}{{end}}
ee.WriteMapElemValue()
{{ encmd .Elem "v2"}}
}
} else {
for k2, v2 := range v {
{{if eq .MapKey "string"}}ee.EncodeString(cUTF8, k2){{else}}{{ encmd .MapKey "k2"}}{{end}}
{{ encmd .Elem "v2"}}
}
} {{/*
for k2, v2 := range v { for k2, v2 := range v {
if esep { ee.WriteMapElemKey() } if esep { ee.WriteMapElemKey() }
{{if eq .MapKey "string"}}ee.EncodeString(cUTF8, k2){{else}}{{ encmd .MapKey "k2"}}{{end}} {{if eq .MapKey "string"}}if asSymbols {
ee.EncodeSymbol(k2)
} else {
ee.EncodeString(c_UTF8, k2)
}{{else}}{{ encmd .MapKey "k2"}}{{end}}
if esep { ee.WriteMapElemValue() } if esep { ee.WriteMapElemValue() }
{{ encmd .Elem "v2"}} {{ encmd .Elem "v2"}}
} */}} }
} }
ee.WriteMapEnd() ee.WriteMapEnd()
} }
{{end}}{{end}}{{end}} {{end}}{{end}}{{end}}
// -- decode // -- decode
// -- -- fast path type switch // -- -- fast path type switch
func fastpathDecodeTypeSwitch(iv interface{}, d *Decoder) bool { func fastpathDecodeTypeSwitch(iv interface{}, d *Decoder) bool {
var changed bool
switch v := iv.(type) { switch v := iv.(type) {
{{range .Values}}{{if not .Primitive}}{{if not .MapKey }}{{if ne .Elem "uint8"}} {{range .Values}}{{if not .Primitive}}{{if not .MapKey }}
case []{{ .Elem }}: case []{{ .Elem }}:{{else}}
var v2 []{{ .Elem }} case map[{{ .MapKey }}]{{ .Elem }}:{{end}}
v2, changed = fastpathTV.{{ .MethodNamePfx "Dec" false }}V(v, false, d) fastpathTV.{{ .MethodNamePfx "Dec" false }}V(v, false, d){{if not .MapKey }}
if changed && len(v) > 0 && len(v2) > 0 && !(len(v2) == len(v) && &v2[0] == &v[0]) { case *[]{{ .Elem }}: {{else}}
copy(v, v2) case *map[{{ .MapKey }}]{{ .Elem }}: {{end}}
if v2, changed2 := fastpathTV.{{ .MethodNamePfx "Dec" false }}V(*v, true, d); changed2 {
*v = v2
} }
case *[]{{ .Elem }}: {{end}}{{end}}
var v2 []{{ .Elem }}
v2, changed = fastpathTV.{{ .MethodNamePfx "Dec" false }}V(*v, true, d)
if changed {
*v = v2
}{{/*
*/}}{{end}}{{end}}{{end}}{{end}}
{{range .Values}}{{if not .Primitive}}{{if .MapKey }}{{/*
// maps only change if nil, and in that case, there's no point copying
*/}}
case map[{{ .MapKey }}]{{ .Elem }}:
fastpathTV.{{ .MethodNamePfx "Dec" false }}V(v, false, d)
case *map[{{ .MapKey }}]{{ .Elem }}:
var v2 map[{{ .MapKey }}]{{ .Elem }}
v2, changed = fastpathTV.{{ .MethodNamePfx "Dec" false }}V(*v, true, d)
if changed {
*v = v2
}{{/*
*/}}{{end}}{{end}}{{end}}
default: default:
_ = v // workaround https://github.com/golang/go/issues/12927 seen in go1.4 _ = v // TODO: workaround https://github.com/golang/go/issues/12927 (remove after go 1.6 release)
return false return false
} }
return true return true
} }
func fastpathDecodeSetZeroTypeSwitch(iv interface{}) bool { func fastpathDecodeSetZeroTypeSwitch(iv interface{}, d *Decoder) bool {
switch v := iv.(type) { switch v := iv.(type) {
{{range .Values}}{{if not .Primitive}}{{if not .MapKey }} {{range .Values}}{{if not .Primitive}}{{if not .MapKey }}
case *[]{{ .Elem }}: case *[]{{ .Elem }}: {{else}}
*v = nil {{/* case *map[{{ .MapKey }}]{{ .Elem }}: {{end}}
*/}}{{end}}{{end}}{{end}} *v = nil
{{range .Values}}{{if not .Primitive}}{{if .MapKey }} {{end}}{{end}}
case *map[{{ .MapKey }}]{{ .Elem }}:
*v = nil {{/*
*/}}{{end}}{{end}}{{end}}
default: default:
_ = v // workaround https://github.com/golang/go/issues/12927 seen in go1.4 _ = v // TODO: workaround https://github.com/golang/go/issues/12927 (remove after go 1.6 release)
return false return false
} }
return true return true
@ -393,36 +304,39 @@ Slices can change if they
- are addressable (from a ptr) - are addressable (from a ptr)
- are settable (e.g. contained in an interface{}) - are settable (e.g. contained in an interface{})
*/}} */}}
func (d *Decoder) {{ .MethodNamePfx "fastpathDec" false }}R(f *codecFnInfo, rv reflect.Value) { func (d *Decoder) {{ .MethodNamePfx "fastpathDec" false }}R(f *codecFnInfo, rv reflect.Value) {
if array := f.seq == seqTypeArray; !array && rv.Kind() == reflect.Ptr { if array := f.seq == seqTypeArray; !array && rv.Kind() == reflect.Ptr {
vp := rv2i(rv).(*[]{{ .Elem }}) var vp = rv2i(rv).(*[]{{ .Elem }})
v, changed := fastpathTV.{{ .MethodNamePfx "Dec" false }}V(*vp, !array, d) if v, changed := fastpathTV.{{ .MethodNamePfx "Dec" false }}V(*vp, !array, d); changed {
if changed { *vp = v } *vp = v
}
} else { } else {
v := rv2i(rv).([]{{ .Elem }}) fastpathTV.{{ .MethodNamePfx "Dec" false }}V(rv2i(rv).([]{{ .Elem }}), !array, d)
v2, changed := fastpathTV.{{ .MethodNamePfx "Dec" false }}V(v, !array, d)
if changed && len(v) > 0 && len(v2) > 0 && !(len(v2) == len(v) && &v2[0] == &v[0]) {
copy(v, v2)
}
} }
} }
func (f fastpathT) {{ .MethodNamePfx "Dec" false }}X(vp *[]{{ .Elem }}, d *Decoder) { func (f fastpathT) {{ .MethodNamePfx "Dec" false }}X(vp *[]{{ .Elem }}, d *Decoder) {
v, changed := f.{{ .MethodNamePfx "Dec" false }}V(*vp, true, d) if v, changed := f.{{ .MethodNamePfx "Dec" false }}V(*vp, true, d); changed {
if changed { *vp = v } *vp = v
}
} }
func (_ fastpathT) {{ .MethodNamePfx "Dec" false }}V(v []{{ .Elem }}, canChange bool, d *Decoder) (_ []{{ .Elem }}, changed bool) { func (_ fastpathT) {{ .MethodNamePfx "Dec" false }}V(v []{{ .Elem }}, canChange bool, d *Decoder) (_ []{{ .Elem }}, changed bool) {
dd := d.d{{/* dd := d.d
// if dd.isContainerType(valueTypeNil) { dd.TryDecodeAsNil() {{/* // if dd.isContainerType(valueTypeNil) { dd.TryDecodeAsNil() */}}
*/}}
slh, containerLenS := d.decSliceHelperStart() slh, containerLenS := d.decSliceHelperStart()
if containerLenS == 0 { if containerLenS == 0 {
if canChange { if canChange {
if v == nil { v = []{{ .Elem }}{} } else if len(v) != 0 { v = v[:0] } if v == nil {
v = []{{ .Elem }}{}
} else if len(v) != 0 {
v = v[:0]
}
changed = true changed = true
} }
slh.End() slh.End()
return v, changed return v, changed
} }
hasLen := containerLenS > 0 hasLen := containerLenS > 0
var xlen int var xlen int
if hasLen && canChange { if hasLen && canChange {
@ -441,7 +355,7 @@ func (_ fastpathT) {{ .MethodNamePfx "Dec" false }}V(v []{{ .Elem }}, canChange
} }
j := 0 j := 0
for ; (hasLen && j < containerLenS) || !(hasLen || dd.CheckBreak()); j++ { for ; (hasLen && j < containerLenS) || !(hasLen || dd.CheckBreak()); j++ {
if j == 0 && len(v) == 0 && canChange { if j == 0 && len(v) == 0 {
if hasLen { if hasLen {
xlen = decInferLen(containerLenS, d.h.MaxInitLen, {{ .Size }}) xlen = decInferLen(containerLenS, d.h.MaxInitLen, {{ .Size }})
} else { } else {
@ -460,12 +374,10 @@ func (_ fastpathT) {{ .MethodNamePfx "Dec" false }}V(v []{{ .Elem }}, canChange
d.arrayCannotExpand(len(v), j+1) d.arrayCannotExpand(len(v), j+1)
decodeIntoBlank = true decodeIntoBlank = true
} }
} }
slh.ElemContainerState(j) slh.ElemContainerState(j)
if decodeIntoBlank { if decodeIntoBlank {
d.swallow() d.swallow()
} else if dd.TryDecodeAsNil() {
v[j] = {{ zerocmd .Elem }}
} else { } else {
{{ if eq .Elem "interface{}" }}d.decode(&v[j]){{ else }}v[j] = {{ decmd .Elem }}{{ end }} {{ if eq .Elem "interface{}" }}d.decode(&v[j]){{ else }}v[j] = {{ decmd .Elem }}{{ end }}
} }
@ -482,8 +394,10 @@ func (_ fastpathT) {{ .MethodNamePfx "Dec" false }}V(v []{{ .Elem }}, canChange
slh.End() slh.End()
return v, changed return v, changed
} }
{{end}}{{end}}{{end}} {{end}}{{end}}{{end}}
{{range .Values}}{{if not .Primitive}}{{if .MapKey }} {{range .Values}}{{if not .Primitive}}{{if .MapKey }}
{{/* {{/*
Maps can change if they are Maps can change if they are
@ -493,21 +407,22 @@ Maps can change if they are
func (d *Decoder) {{ .MethodNamePfx "fastpathDec" false }}R(f *codecFnInfo, rv reflect.Value) { func (d *Decoder) {{ .MethodNamePfx "fastpathDec" false }}R(f *codecFnInfo, rv reflect.Value) {
if rv.Kind() == reflect.Ptr { if rv.Kind() == reflect.Ptr {
vp := rv2i(rv).(*map[{{ .MapKey }}]{{ .Elem }}) vp := rv2i(rv).(*map[{{ .MapKey }}]{{ .Elem }})
v, changed := fastpathTV.{{ .MethodNamePfx "Dec" false }}V(*vp, true, d); if v, changed := fastpathTV.{{ .MethodNamePfx "Dec" false }}V(*vp, true, d); changed {
if changed { *vp = v } *vp = v
} else { }
fastpathTV.{{ .MethodNamePfx "Dec" false }}V(rv2i(rv).(map[{{ .MapKey }}]{{ .Elem }}), false, d) return
} }
fastpathTV.{{ .MethodNamePfx "Dec" false }}V(rv2i(rv).(map[{{ .MapKey }}]{{ .Elem }}), false, d)
} }
func (f fastpathT) {{ .MethodNamePfx "Dec" false }}X(vp *map[{{ .MapKey }}]{{ .Elem }}, d *Decoder) { func (f fastpathT) {{ .MethodNamePfx "Dec" false }}X(vp *map[{{ .MapKey }}]{{ .Elem }}, d *Decoder) {
v, changed := f.{{ .MethodNamePfx "Dec" false }}V(*vp, true, d) if v, changed := f.{{ .MethodNamePfx "Dec" false }}V(*vp, true, d); changed {
if changed { *vp = v } *vp = v
}
} }
func (_ fastpathT) {{ .MethodNamePfx "Dec" false }}V(v map[{{ .MapKey }}]{{ .Elem }}, canChange bool, func (_ fastpathT) {{ .MethodNamePfx "Dec" false }}V(v map[{{ .MapKey }}]{{ .Elem }}, canChange bool,
d *Decoder) (_ map[{{ .MapKey }}]{{ .Elem }}, changed bool) { d *Decoder) (_ map[{{ .MapKey }}]{{ .Elem }}, changed bool) {
dd, esep := d.d, d.hh.hasElemSeparators(){{/* dd, esep := d.d, d.hh.hasElemSeparators()
// if dd.isContainerType(valueTypeNil) {dd.TryDecodeAsNil() {{/* // if dd.isContainerType(valueTypeNil) {dd.TryDecodeAsNil() */}}
*/}}
containerLen := dd.ReadMapStart() containerLen := dd.ReadMapStart()
if canChange && v == nil { if canChange && v == nil {
xlen := decInferLen(containerLen, d.h.MaxInitLen, {{ .Size }}) xlen := decInferLen(containerLen, d.h.MaxInitLen, {{ .Size }})
@ -518,8 +433,8 @@ func (_ fastpathT) {{ .MethodNamePfx "Dec" false }}V(v map[{{ .MapKey }}]{{ .Ele
dd.ReadMapEnd() dd.ReadMapEnd()
return v, changed return v, changed
} }
{{ if eq .Elem "interface{}" }}mapGet := v != nil && !d.h.MapValueReset && !d.h.InterfaceReset {{ if eq .Elem "interface{}" }}mapGet := !d.h.MapValueReset && !d.h.InterfaceReset{{end}}
{{end}}var mk {{ .MapKey }} var mk {{ .MapKey }}
var mv {{ .Elem }} var mv {{ .Elem }}
hasLen := containerLen > 0 hasLen := containerLen > 0
for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
@ -531,14 +446,17 @@ func (_ fastpathT) {{ .MethodNamePfx "Dec" false }}V(v map[{{ .MapKey }}]{{ .Ele
}{{ else }}mk = {{ decmd .MapKey }}{{ end }} }{{ else }}mk = {{ decmd .MapKey }}{{ end }}
if esep { dd.ReadMapElemValue() } if esep { dd.ReadMapElemValue() }
if dd.TryDecodeAsNil() { if dd.TryDecodeAsNil() {
if v == nil {} else if d.h.DeleteOnNilMapValue { delete(v, mk) } else { v[mk] = {{ zerocmd .Elem }} } if d.h.DeleteOnNilMapValue { delete(v, mk) } else { v[mk] = {{ zerocmd .Elem }} }
continue continue
} }
{{ if eq .Elem "interface{}" }}if mapGet { mv = v[mk] } else { mv = nil } {{ if eq .Elem "interface{}" }}if mapGet { mv = v[mk] } else { mv = nil }
d.decode(&mv){{ else }}mv = {{ decmd .Elem }}{{ end }} d.decode(&mv){{ else }}mv = {{ decmd .Elem }}{{ end }}
if v != nil { v[mk] = mv } if v != nil {
v[mk] = mv
}
} }
dd.ReadMapEnd() dd.ReadMapEnd()
return v, changed return v, changed
} }
{{end}}{{end}}{{end}} {{end}}{{end}}{{end}}

View File

@ -1,6 +1,3 @@
// Copyright (c) 2012-2018 Ugorji Nwoke. All rights reserved.
// Use of this source code is governed by a MIT license found in the LICENSE file.
// +build notfastpath // +build notfastpath
package codec package codec
@ -17,11 +14,11 @@ const fastpathEnabled = false
// This tag disables fastpath during build, allowing for faster build, test execution, // This tag disables fastpath during build, allowing for faster build, test execution,
// short-program runs, etc. // short-program runs, etc.
func fastpathDecodeTypeSwitch(iv interface{}, d *Decoder) bool { return false } func fastpathDecodeTypeSwitch(iv interface{}, d *Decoder) bool { return false }
func fastpathEncodeTypeSwitch(iv interface{}, e *Encoder) bool { return false } func fastpathEncodeTypeSwitch(iv interface{}, e *Encoder) bool { return false }
func fastpathEncodeTypeSwitchSlice(iv interface{}, e *Encoder) bool { return false } func fastpathEncodeTypeSwitchSlice(iv interface{}, e *Encoder) bool { return false }
func fastpathEncodeTypeSwitchMap(iv interface{}, e *Encoder) bool { return false } func fastpathEncodeTypeSwitchMap(iv interface{}, e *Encoder) bool { return false }
func fastpathDecodeSetZeroTypeSwitch(iv interface{}) bool { return false } func fastpathDecodeSetZeroTypeSwitch(iv interface{}, d *Decoder) bool { return false }
type fastpathT struct{} type fastpathT struct{}
type fastpathE struct { type fastpathE struct {
@ -34,14 +31,5 @@ type fastpathA [0]fastpathE
func (x fastpathA) index(rtid uintptr) int { return -1 } func (x fastpathA) index(rtid uintptr) int { return -1 }
func (_ fastpathT) DecSliceUint8V(v []uint8, canChange bool, d *Decoder) (_ []uint8, changed bool) {
fn := d.cfer().get(uint8SliceTyp, true, true)
d.kSlice(&fn.i, reflect.ValueOf(&v).Elem())
return v, true
}
var fastpathAV fastpathA var fastpathAV fastpathA
var fastpathTV fastpathT var fastpathTV fastpathT
// ----
type TestMammoth2Wrapper struct{} // to allow testMammoth work in notfastpath mode

View File

@ -43,7 +43,7 @@ if {{var "l"}} == 0 {
{{var "c"}} = true {{var "c"}} = true
}{{end}} }{{end}}
{{var "h"}}.ElemContainerState({{var "j"}}) {{var "h"}}.ElemContainerState({{var "j"}})
{{/* {{var "dn"}} = r.TryDecodeAsNil() */}} // {{var "dn"}} = r.TryDecodeAsNil()
{{if isChan}}{{ $x := printf "%[1]vv%[2]v" .TempVar .Rand }}var {{var $x}} {{ .Typ }} {{if isChan}}{{ $x := printf "%[1]vv%[2]v" .TempVar .Rand }}var {{var $x}} {{ .Typ }}
{{ decLineVar $x }} {{ decLineVar $x }}
{{var "v"}} <- {{ $x }} {{var "v"}} <- {{ $x }}

View File

@ -3,7 +3,10 @@
// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved. // Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved.
// Use of this source code is governed by a MIT license found in the LICENSE file. // Use of this source code is governed by a MIT license found in the LICENSE file.
// Code generated from gen-helper.go.tmpl - DO NOT EDIT. // ************************************************************
// DO NOT EDIT.
// THIS FILE IS AUTO-GENERATED from gen-helper.go.tmpl
// ************************************************************
package codec package codec
@ -28,73 +31,30 @@ const GenVersion = 8
// GenHelperEncoder is exported so that it can be used externally by codecgen. // GenHelperEncoder is exported so that it can be used externally by codecgen.
// //
// Library users: DO NOT USE IT DIRECTLY. IT WILL CHANGE CONTINOUSLY WITHOUT NOTICE. // Library users: DO NOT USE IT DIRECTLY. IT WILL CHANGE CONTINOUSLY WITHOUT NOTICE.
func GenHelperEncoder(e *Encoder) (ge genHelperEncoder, ee genHelperEncDriver) { func GenHelperEncoder(e *Encoder) (genHelperEncoder, encDriver) {
ge = genHelperEncoder{e: e} return genHelperEncoder{e: e}, e.e
ee = genHelperEncDriver{encDriver: e.e}
return
} }
// GenHelperDecoder is exported so that it can be used externally by codecgen. // GenHelperDecoder is exported so that it can be used externally by codecgen.
// //
// Library users: DO NOT USE IT DIRECTLY. IT WILL CHANGE CONTINOUSLY WITHOUT NOTICE. // Library users: DO NOT USE IT DIRECTLY. IT WILL CHANGE CONTINOUSLY WITHOUT NOTICE.
func GenHelperDecoder(d *Decoder) (gd genHelperDecoder, dd genHelperDecDriver) { func GenHelperDecoder(d *Decoder) (genHelperDecoder, decDriver) {
gd = genHelperDecoder{d: d} return genHelperDecoder{d: d}, d.d
dd = genHelperDecDriver{decDriver: d.d}
return
} }
type genHelperEncDriver struct { // Library users: DO NOT USE IT DIRECTLY. IT WILL CHANGE CONTINOUSLY WITHOUT NOTICE.
encDriver func BasicHandleDoNotUse(h Handle) *BasicHandle {
} return h.getBasicHandle()
func (x genHelperEncDriver) EncodeBuiltin(rt uintptr, v interface{}) {}
func (x genHelperEncDriver) EncStructFieldKey(keyType valueType, s string) {
encStructFieldKey(x.encDriver, keyType, s)
}
func (x genHelperEncDriver) EncodeSymbol(s string) {
x.encDriver.EncodeString(cUTF8, s)
}
type genHelperDecDriver struct {
decDriver
C checkOverflow
}
func (x genHelperDecDriver) DecodeBuiltin(rt uintptr, v interface{}) {}
func (x genHelperDecDriver) DecStructFieldKey(keyType valueType, buf *[decScratchByteArrayLen]byte) []byte {
return decStructFieldKey(x.decDriver, keyType, buf)
}
func (x genHelperDecDriver) DecodeInt(bitsize uint8) (i int64) {
return x.C.IntV(x.decDriver.DecodeInt64(), bitsize)
}
func (x genHelperDecDriver) DecodeUint(bitsize uint8) (ui uint64) {
return x.C.UintV(x.decDriver.DecodeUint64(), bitsize)
}
func (x genHelperDecDriver) DecodeFloat(chkOverflow32 bool) (f float64) {
f = x.DecodeFloat64()
if chkOverflow32 && chkOvf.Float32(f) {
panicv.errorf("float32 overflow: %v", f)
}
return
}
func (x genHelperDecDriver) DecodeFloat32As64() (f float64) {
f = x.DecodeFloat64()
if chkOvf.Float32(f) {
panicv.errorf("float32 overflow: %v", f)
}
return
} }
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* // FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
type genHelperEncoder struct { type genHelperEncoder struct {
M must
e *Encoder e *Encoder
F fastpathT F fastpathT
} }
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* // FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
type genHelperDecoder struct { type genHelperDecoder struct {
C checkOverflow
d *Decoder d *Decoder
F fastpathT F fastpathT
} }
@ -106,12 +66,7 @@ func (f genHelperEncoder) EncBasicHandle() *BasicHandle {
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* // FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperEncoder) EncBinary() bool { func (f genHelperEncoder) EncBinary() bool {
return f.e.be // f.e.hh.isBinaryEncoding() return f.e.cf.be // f.e.hh.isBinaryEncoding()
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperEncoder) IsJSONHandle() bool {
return f.e.js
} }
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* // FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
@ -124,65 +79,52 @@ func (f genHelperEncoder) EncFallback(iv interface{}) {
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* // FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperEncoder) EncTextMarshal(iv encoding.TextMarshaler) { func (f genHelperEncoder) EncTextMarshal(iv encoding.TextMarshaler) {
bs, fnerr := iv.MarshalText() bs, fnerr := iv.MarshalText()
f.e.marshal(bs, fnerr, false, cUTF8) f.e.marshal(bs, fnerr, false, c_UTF8)
} }
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* // FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperEncoder) EncJSONMarshal(iv jsonMarshaler) { func (f genHelperEncoder) EncJSONMarshal(iv jsonMarshaler) {
bs, fnerr := iv.MarshalJSON() bs, fnerr := iv.MarshalJSON()
f.e.marshal(bs, fnerr, true, cUTF8) f.e.marshal(bs, fnerr, true, c_UTF8)
} }
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* // FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperEncoder) EncBinaryMarshal(iv encoding.BinaryMarshaler) { func (f genHelperEncoder) EncBinaryMarshal(iv encoding.BinaryMarshaler) {
bs, fnerr := iv.MarshalBinary() bs, fnerr := iv.MarshalBinary()
f.e.marshal(bs, fnerr, false, cRAW) f.e.marshal(bs, fnerr, false, c_RAW)
} }
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* // FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperEncoder) EncRaw(iv Raw) { f.e.rawBytes(iv) } func (f genHelperEncoder) EncRaw(iv Raw) {
f.e.rawBytes(iv)
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
//
// Deprecated: builtin no longer supported - so we make this method a no-op,
// but leave in-place so that old generated files continue to work without regeneration.
func (f genHelperEncoder) TimeRtidIfBinc() (v uintptr) { return }
// func (f genHelperEncoder) TimeRtidIfBinc() uintptr {
// if _, ok := f.e.hh.(*BincHandle); ok {
// return timeTypId
// }
// }
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperEncoder) I2Rtid(v interface{}) uintptr {
return i2rtid(v)
} }
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* // FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperEncoder) Extension(rtid uintptr) (xfn *extTypeTagFn) { func (f genHelperEncoder) TimeRtidIfBinc() uintptr {
return f.e.h.getExt(rtid) if _, ok := f.e.hh.(*BincHandle); ok {
return timeTypId
}
return 0
} }
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* // FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperEncoder) EncExtension(v interface{}, xfFn *extTypeTagFn) { func (f genHelperEncoder) IsJSONHandle() bool {
f.e.e.EncodeExt(v, xfFn.tag, xfFn.ext, f.e) return f.e.cf.js
} }
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* // FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
//
// Deprecated: No longer used,
// but leave in-place so that old generated files continue to work without regeneration.
func (f genHelperEncoder) HasExtensions() bool { func (f genHelperEncoder) HasExtensions() bool {
return len(f.e.h.extHandle) != 0 return len(f.e.h.extHandle) != 0
} }
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* // FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
//
// Deprecated: No longer used,
// but leave in-place so that old generated files continue to work without regeneration.
func (f genHelperEncoder) EncExt(v interface{}) (r bool) { func (f genHelperEncoder) EncExt(v interface{}) (r bool) {
if xfFn := f.e.h.getExt(i2rtid(v)); xfFn != nil { rt := reflect.TypeOf(v)
if rt.Kind() == reflect.Ptr {
rt = rt.Elem()
}
rtid := rt2id(rt)
if xfFn := f.e.h.getExt(rtid); xfFn != nil {
f.e.e.EncodeExt(v, xfFn.tag, xfFn.ext, f.e) f.e.e.EncodeExt(v, xfFn.tag, xfFn.ext, f.e)
return true return true
} }
@ -202,18 +144,15 @@ func (f genHelperDecoder) DecBinary() bool {
} }
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* // FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperDecoder) DecSwallow() { f.d.swallow() } func (f genHelperDecoder) DecSwallow() {
f.d.swallow()
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* // FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperDecoder) DecScratchBuffer() []byte { func (f genHelperDecoder) DecScratchBuffer() []byte {
return f.d.b[:] return f.d.b[:]
} }
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperDecoder) DecScratchArrayBuffer() *[decScratchByteArrayLen]byte {
return &f.d.b
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* // FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperDecoder) DecFallback(iv interface{}, chkPtr bool) { func (f genHelperDecoder) DecFallback(iv interface{}, chkPtr bool) {
// println(">>>>>>>>> DecFallback") // println(">>>>>>>>> DecFallback")
@ -221,7 +160,7 @@ func (f genHelperDecoder) DecFallback(iv interface{}, chkPtr bool) {
if chkPtr { if chkPtr {
rv = f.d.ensureDecodeable(rv) rv = f.d.ensureDecodeable(rv)
} }
f.d.decodeValue(rv, nil, false) f.d.decodeValue(rv, nil, false, false)
// f.d.decodeValueFallback(rv) // f.d.decodeValueFallback(rv)
} }
@ -267,21 +206,17 @@ func (f genHelperDecoder) DecBinaryUnmarshal(bm encoding.BinaryUnmarshaler) {
} }
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* // FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperDecoder) DecRaw() []byte { return f.d.rawBytes() } func (f genHelperDecoder) DecRaw() []byte {
return f.d.rawBytes()
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* // FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
// func (f genHelperDecoder) TimeRtidIfBinc() uintptr {
// Deprecated: builtin no longer supported - so we make this method a no-op, if _, ok := f.d.hh.(*BincHandle); ok {
// but leave in-place so that old generated files continue to work without regeneration. return timeTypId
func (f genHelperDecoder) TimeRtidIfBinc() (v uintptr) { return } }
return 0
// func (f genHelperDecoder) TimeRtidIfBinc() uintptr { }
// // Note: builtin is no longer supported - so make this a no-op
// if _, ok := f.d.hh.(*BincHandle); ok {
// return timeTypId
// }
// return 0
// }
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* // FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperDecoder) IsJSONHandle() bool { func (f genHelperDecoder) IsJSONHandle() bool {
@ -289,34 +224,15 @@ func (f genHelperDecoder) IsJSONHandle() bool {
} }
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* // FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperDecoder) I2Rtid(v interface{}) uintptr {
return i2rtid(v)
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperDecoder) Extension(rtid uintptr) (xfn *extTypeTagFn) {
return f.d.h.getExt(rtid)
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperDecoder) DecExtension(v interface{}, xfFn *extTypeTagFn) {
f.d.d.DecodeExt(v, xfFn.tag, xfFn.ext)
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
//
// Deprecated: No longer used,
// but leave in-place so that old generated files continue to work without regeneration.
func (f genHelperDecoder) HasExtensions() bool { func (f genHelperDecoder) HasExtensions() bool {
return len(f.d.h.extHandle) != 0 return len(f.d.h.extHandle) != 0
} }
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* // FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
//
// Deprecated: No longer used,
// but leave in-place so that old generated files continue to work without regeneration.
func (f genHelperDecoder) DecExt(v interface{}) (r bool) { func (f genHelperDecoder) DecExt(v interface{}) (r bool) {
if xfFn := f.d.h.getExt(i2rtid(v)); xfFn != nil { rt := reflect.TypeOf(v).Elem()
rtid := rt2id(rt)
if xfFn := f.d.h.getExt(rtid); xfFn != nil {
f.d.d.DecodeExt(v, xfFn.tag, xfFn.ext) f.d.d.DecodeExt(v, xfFn.tag, xfFn.ext)
return true return true
} }
@ -329,7 +245,6 @@ func (f genHelperDecoder) DecInferLen(clen, maxlen, unit int) (rvlen int) {
} }
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* // FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
// func (f genHelperDecoder) StringView(v []byte) string {
// Deprecated: no longer used, return stringView(v)
// but leave in-place so that old generated files continue to work without regeneration. }
func (f genHelperDecoder) StringView(v []byte) string { return stringView(v) }

View File

@ -3,7 +3,10 @@
// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved. // Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved.
// Use of this source code is governed by a MIT license found in the LICENSE file. // Use of this source code is governed by a MIT license found in the LICENSE file.
// Code generated from gen-helper.go.tmpl - DO NOT EDIT. // ************************************************************
// DO NOT EDIT.
// THIS FILE IS AUTO-GENERATED from gen-helper.go.tmpl
// ************************************************************
package codec package codec
@ -18,83 +21,40 @@ const GenVersion = {{ .Version }}
// This file is used to generate helper code for codecgen. // This file is used to generate helper code for codecgen.
// The values here i.e. genHelper(En|De)coder are not to be used directly by // The values here i.e. genHelper(En|De)coder are not to be used directly by
// library users. They WILL change continuously and without notice. // library users. They WILL change continuously and without notice.
// //
// To help enforce this, we create an unexported type with exported members. // To help enforce this, we create an unexported type with exported members.
// The only way to get the type is via the one exported type that we control (somewhat). // The only way to get the type is via the one exported type that we control (somewhat).
// //
// When static codecs are created for types, they will use this value // When static codecs are created for types, they will use this value
// to perform encoding or decoding of primitives or known slice or map types. // to perform encoding or decoding of primitives or known slice or map types.
// GenHelperEncoder is exported so that it can be used externally by codecgen. // GenHelperEncoder is exported so that it can be used externally by codecgen.
// //
// Library users: DO NOT USE IT DIRECTLY. IT WILL CHANGE CONTINOUSLY WITHOUT NOTICE. // Library users: DO NOT USE IT DIRECTLY. IT WILL CHANGE CONTINOUSLY WITHOUT NOTICE.
func GenHelperEncoder(e *Encoder) (ge genHelperEncoder, ee genHelperEncDriver) { func GenHelperEncoder(e *Encoder) (genHelperEncoder, encDriver) {
ge = genHelperEncoder{e: e} return genHelperEncoder{e:e}, e.e
ee = genHelperEncDriver{encDriver: e.e}
return
} }
// GenHelperDecoder is exported so that it can be used externally by codecgen. // GenHelperDecoder is exported so that it can be used externally by codecgen.
// //
// Library users: DO NOT USE IT DIRECTLY. IT WILL CHANGE CONTINOUSLY WITHOUT NOTICE. // Library users: DO NOT USE IT DIRECTLY. IT WILL CHANGE CONTINOUSLY WITHOUT NOTICE.
func GenHelperDecoder(d *Decoder) (gd genHelperDecoder, dd genHelperDecDriver) { func GenHelperDecoder(d *Decoder) (genHelperDecoder, decDriver) {
gd = genHelperDecoder{d: d} return genHelperDecoder{d:d}, d.d
dd = genHelperDecDriver{decDriver: d.d}
return
} }
type genHelperEncDriver struct { // Library users: DO NOT USE IT DIRECTLY. IT WILL CHANGE CONTINOUSLY WITHOUT NOTICE.
encDriver func BasicHandleDoNotUse(h Handle) *BasicHandle {
} return h.getBasicHandle()
func (x genHelperEncDriver) EncodeBuiltin(rt uintptr, v interface{}) {}
func (x genHelperEncDriver) EncStructFieldKey(keyType valueType, s string) {
encStructFieldKey(x.encDriver, keyType, s)
}
func (x genHelperEncDriver) EncodeSymbol(s string) {
x.encDriver.EncodeString(cUTF8, s)
}
type genHelperDecDriver struct {
decDriver
C checkOverflow
}
func (x genHelperDecDriver) DecodeBuiltin(rt uintptr, v interface{}) {}
func (x genHelperDecDriver) DecStructFieldKey(keyType valueType, buf *[decScratchByteArrayLen]byte) []byte {
return decStructFieldKey(x.decDriver, keyType, buf)
}
func (x genHelperDecDriver) DecodeInt(bitsize uint8) (i int64) {
return x.C.IntV(x.decDriver.DecodeInt64(), bitsize)
}
func (x genHelperDecDriver) DecodeUint(bitsize uint8) (ui uint64) {
return x.C.UintV(x.decDriver.DecodeUint64(), bitsize)
}
func (x genHelperDecDriver) DecodeFloat(chkOverflow32 bool) (f float64) {
f = x.DecodeFloat64()
if chkOverflow32 && chkOvf.Float32(f) {
panicv.errorf("float32 overflow: %v", f)
}
return
}
func (x genHelperDecDriver) DecodeFloat32As64() (f float64) {
f = x.DecodeFloat64()
if chkOvf.Float32(f) {
panicv.errorf("float32 overflow: %v", f)
}
return
} }
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* // FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
type genHelperEncoder struct { type genHelperEncoder struct {
M must
e *Encoder e *Encoder
F fastpathT F fastpathT
} }
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* // FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
type genHelperDecoder struct { type genHelperDecoder struct {
C checkOverflow
d *Decoder d *Decoder
F fastpathT F fastpathT
} }
@ -103,13 +63,10 @@ type genHelperDecoder struct {
func (f genHelperEncoder) EncBasicHandle() *BasicHandle { func (f genHelperEncoder) EncBasicHandle() *BasicHandle {
return f.e.h return f.e.h
} }
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* // FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperEncoder) EncBinary() bool { func (f genHelperEncoder) EncBinary() bool {
return f.e.be // f.e.hh.isBinaryEncoding() return f.e.cf.be // f.e.hh.isBinaryEncoding()
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperEncoder) IsJSONHandle() bool {
return f.e.js
} }
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* // FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperEncoder) EncFallback(iv interface{}) { func (f genHelperEncoder) EncFallback(iv interface{}) {
@ -120,61 +77,58 @@ func (f genHelperEncoder) EncFallback(iv interface{}) {
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* // FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperEncoder) EncTextMarshal(iv encoding.TextMarshaler) { func (f genHelperEncoder) EncTextMarshal(iv encoding.TextMarshaler) {
bs, fnerr := iv.MarshalText() bs, fnerr := iv.MarshalText()
f.e.marshal(bs, fnerr, false, cUTF8) f.e.marshal(bs, fnerr, false, c_UTF8)
} }
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* // FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperEncoder) EncJSONMarshal(iv jsonMarshaler) { func (f genHelperEncoder) EncJSONMarshal(iv jsonMarshaler) {
bs, fnerr := iv.MarshalJSON() bs, fnerr := iv.MarshalJSON()
f.e.marshal(bs, fnerr, true, cUTF8) f.e.marshal(bs, fnerr, true, c_UTF8)
} }
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* // FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperEncoder) EncBinaryMarshal(iv encoding.BinaryMarshaler) { func (f genHelperEncoder) EncBinaryMarshal(iv encoding.BinaryMarshaler) {
bs, fnerr := iv.MarshalBinary() bs, fnerr := iv.MarshalBinary()
f.e.marshal(bs, fnerr, false, cRAW) f.e.marshal(bs, fnerr, false, c_RAW)
} }
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* // FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperEncoder) EncRaw(iv Raw) { f.e.rawBytes(iv) } func (f genHelperEncoder) EncRaw(iv Raw) {
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* f.e.rawBytes(iv)
//
// Deprecated: builtin no longer supported - so we make this method a no-op,
// but leave in-place so that old generated files continue to work without regeneration.
func (f genHelperEncoder) TimeRtidIfBinc() (v uintptr) { return }
// func (f genHelperEncoder) TimeRtidIfBinc() uintptr {
// if _, ok := f.e.hh.(*BincHandle); ok {
// return timeTypId
// }
// }
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperEncoder) I2Rtid(v interface{}) uintptr {
return i2rtid(v)
} }
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* // FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperEncoder) Extension(rtid uintptr) (xfn *extTypeTagFn) { func (f genHelperEncoder) TimeRtidIfBinc() uintptr {
return f.e.h.getExt(rtid) if _, ok := f.e.hh.(*BincHandle); ok {
return timeTypId
}
return 0
} }
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* // FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperEncoder) EncExtension(v interface{}, xfFn *extTypeTagFn) { func (f genHelperEncoder) IsJSONHandle() bool {
f.e.e.EncodeExt(v, xfFn.tag, xfFn.ext, f.e) return f.e.cf.js
} }
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* // FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
//
// Deprecated: No longer used,
// but leave in-place so that old generated files continue to work without regeneration.
func (f genHelperEncoder) HasExtensions() bool { func (f genHelperEncoder) HasExtensions() bool {
return len(f.e.h.extHandle) != 0 return len(f.e.h.extHandle) != 0
} }
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* // FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
//
// Deprecated: No longer used,
// but leave in-place so that old generated files continue to work without regeneration.
func (f genHelperEncoder) EncExt(v interface{}) (r bool) { func (f genHelperEncoder) EncExt(v interface{}) (r bool) {
if xfFn := f.e.h.getExt(i2rtid(v)); xfFn != nil { rt := reflect.TypeOf(v)
if rt.Kind() == reflect.Ptr {
rt = rt.Elem()
}
rtid := rt2id(rt)
if xfFn := f.e.h.getExt(rtid); xfFn != nil {
f.e.e.EncodeExt(v, xfFn.tag, xfFn.ext, f.e) f.e.e.EncodeExt(v, xfFn.tag, xfFn.ext, f.e)
return true return true
} }
return false return false
} }
{{/*
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperEncoder) EncSendContainerState(c containerState) {
if f.e.cr != nil {
f.e.cr.sendContainerState(c)
}
}
*/}}
// ---------------- DECODER FOLLOWS ----------------- // ---------------- DECODER FOLLOWS -----------------
@ -187,23 +141,21 @@ func (f genHelperDecoder) DecBinary() bool {
return f.d.be // f.d.hh.isBinaryEncoding() return f.d.be // f.d.hh.isBinaryEncoding()
} }
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* // FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperDecoder) DecSwallow() { f.d.swallow() } func (f genHelperDecoder) DecSwallow() {
f.d.swallow()
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* // FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperDecoder) DecScratchBuffer() []byte { func (f genHelperDecoder) DecScratchBuffer() []byte {
return f.d.b[:] return f.d.b[:]
} }
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* // FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperDecoder) DecScratchArrayBuffer() *[decScratchByteArrayLen]byte {
return &f.d.b
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperDecoder) DecFallback(iv interface{}, chkPtr bool) { func (f genHelperDecoder) DecFallback(iv interface{}, chkPtr bool) {
// println(">>>>>>>>> DecFallback") // println(">>>>>>>>> DecFallback")
rv := reflect.ValueOf(iv) rv := reflect.ValueOf(iv)
if chkPtr { if chkPtr {
rv = f.d.ensureDecodeable(rv) rv = f.d.ensureDecodeable(rv)
} }
f.d.decodeValue(rv, nil, false) f.d.decodeValue(rv, nil, false, false)
// f.d.decodeValueFallback(rv) // f.d.decodeValueFallback(rv)
} }
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* // FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
@ -242,49 +194,29 @@ func (f genHelperDecoder) DecBinaryUnmarshal(bm encoding.BinaryUnmarshaler) {
} }
} }
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* // FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperDecoder) DecRaw() []byte { return f.d.rawBytes() } func (f genHelperDecoder) DecRaw() []byte {
return f.d.rawBytes()
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* // FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
// func (f genHelperDecoder) TimeRtidIfBinc() uintptr {
// Deprecated: builtin no longer supported - so we make this method a no-op, if _, ok := f.d.hh.(*BincHandle); ok {
// but leave in-place so that old generated files continue to work without regeneration. return timeTypId
func (f genHelperDecoder) TimeRtidIfBinc() (v uintptr) { return } }
// func (f genHelperDecoder) TimeRtidIfBinc() uintptr { return 0
// // Note: builtin is no longer supported - so make this a no-op }
// if _, ok := f.d.hh.(*BincHandle); ok {
// return timeTypId
// }
// return 0
// }
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* // FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperDecoder) IsJSONHandle() bool { func (f genHelperDecoder) IsJSONHandle() bool {
return f.d.js return f.d.js
} }
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* // FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperDecoder) I2Rtid(v interface{}) uintptr {
return i2rtid(v)
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperDecoder) Extension(rtid uintptr) (xfn *extTypeTagFn) {
return f.d.h.getExt(rtid)
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperDecoder) DecExtension(v interface{}, xfFn *extTypeTagFn) {
f.d.d.DecodeExt(v, xfFn.tag, xfFn.ext)
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
//
// Deprecated: No longer used,
// but leave in-place so that old generated files continue to work without regeneration.
func (f genHelperDecoder) HasExtensions() bool { func (f genHelperDecoder) HasExtensions() bool {
return len(f.d.h.extHandle) != 0 return len(f.d.h.extHandle) != 0
} }
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* // FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
//
// Deprecated: No longer used,
// but leave in-place so that old generated files continue to work without regeneration.
func (f genHelperDecoder) DecExt(v interface{}) (r bool) { func (f genHelperDecoder) DecExt(v interface{}) (r bool) {
if xfFn := f.d.h.getExt(i2rtid(v)); xfFn != nil { rt := reflect.TypeOf(v).Elem()
rtid := rt2id(rt)
if xfFn := f.d.h.getExt(rtid); xfFn != nil {
f.d.d.DecodeExt(v, xfFn.tag, xfFn.ext) f.d.d.DecodeExt(v, xfFn.tag, xfFn.ext)
return true return true
} }
@ -295,8 +227,170 @@ func (f genHelperDecoder) DecInferLen(clen, maxlen, unit int) (rvlen int) {
return decInferLen(clen, maxlen, unit) return decInferLen(clen, maxlen, unit)
} }
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* // FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
// func (f genHelperDecoder) StringView(v []byte) string {
// Deprecated: no longer used, return stringView(v)
// but leave in-place so that old generated files continue to work without regeneration. }
func (f genHelperDecoder) StringView(v []byte) string { return stringView(v) } {{/*
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperDecoder) DecSendContainerState(c containerState) {
if f.d.cr != nil {
f.d.cr.sendContainerState(c)
}
}
*/}}
{{/*
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperEncoder) EncDriver() encDriver {
return f.e.e
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperDecoder) DecDriver() decDriver {
return f.d.d
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperEncoder) EncNil() {
f.e.e.EncodeNil()
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperEncoder) EncBytes(v []byte) {
f.e.e.EncodeStringBytes(c_RAW, v)
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperEncoder) EncArrayStart(length int) {
f.e.e.EncodeArrayStart(length)
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperEncoder) EncArrayEnd() {
f.e.e.EncodeArrayEnd()
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperEncoder) EncArrayEntrySeparator() {
f.e.e.EncodeArrayEntrySeparator()
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperEncoder) EncMapStart(length int) {
f.e.e.EncodeMapStart(length)
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperEncoder) EncMapEnd() {
f.e.e.EncodeMapEnd()
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperEncoder) EncMapEntrySeparator() {
f.e.e.EncodeMapEntrySeparator()
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperEncoder) EncMapKVSeparator() {
f.e.e.EncodeMapKVSeparator()
}
// ---------
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperDecoder) DecBytes(v *[]byte) {
*v = f.d.d.DecodeBytes(*v)
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperDecoder) DecTryNil() bool {
return f.d.d.TryDecodeAsNil()
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperDecoder) DecContainerIsNil() (b bool) {
return f.d.d.IsContainerType(valueTypeNil)
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperDecoder) DecContainerIsMap() (b bool) {
return f.d.d.IsContainerType(valueTypeMap)
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperDecoder) DecContainerIsArray() (b bool) {
return f.d.d.IsContainerType(valueTypeArray)
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperDecoder) DecCheckBreak() bool {
return f.d.d.CheckBreak()
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperDecoder) DecMapStart() int {
return f.d.d.ReadMapStart()
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperDecoder) DecArrayStart() int {
return f.d.d.ReadArrayStart()
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperDecoder) DecMapEnd() {
f.d.d.ReadMapEnd()
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperDecoder) DecArrayEnd() {
f.d.d.ReadArrayEnd()
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperDecoder) DecArrayEntrySeparator() {
f.d.d.ReadArrayEntrySeparator()
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperDecoder) DecMapEntrySeparator() {
f.d.d.ReadMapEntrySeparator()
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperDecoder) DecMapKVSeparator() {
f.d.d.ReadMapKVSeparator()
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperDecoder) ReadStringAsBytes(bs []byte) []byte {
return f.d.d.DecodeStringAsBytes(bs)
}
// -- encode calls (primitives)
{{range .Values}}{{if .Primitive }}{{if ne .Primitive "interface{}" }}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperEncoder) {{ .MethodNamePfx "Enc" true }}(v {{ .Primitive }}) {
ee := f.e.e
{{ encmd .Primitive "v" }}
}
{{ end }}{{ end }}{{ end }}
// -- decode calls (primitives)
{{range .Values}}{{if .Primitive }}{{if ne .Primitive "interface{}" }}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperDecoder) {{ .MethodNamePfx "Dec" true }}(vp *{{ .Primitive }}) {
dd := f.d.d
*vp = {{ decmd .Primitive }}
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperDecoder) {{ .MethodNamePfx "Read" true }}() (v {{ .Primitive }}) {
dd := f.d.d
v = {{ decmd .Primitive }}
return
}
{{ end }}{{ end }}{{ end }}
// -- encode calls (slices/maps)
{{range .Values}}{{if not .Primitive }}{{if .Slice }}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperEncoder) {{ .MethodNamePfx "Enc" false }}(v []{{ .Elem }}) { {{ else }}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperEncoder) {{ .MethodNamePfx "Enc" false }}(v map[{{ .MapKey }}]{{ .Elem }}) { {{end}}
f.F.{{ .MethodNamePfx "Enc" false }}V(v, false, f.e)
}
{{ end }}{{ end }}
// -- decode calls (slices/maps)
{{range .Values}}{{if not .Primitive }}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
{{if .Slice }}func (f genHelperDecoder) {{ .MethodNamePfx "Dec" false }}(vp *[]{{ .Elem }}) {
{{else}}func (f genHelperDecoder) {{ .MethodNamePfx "Dec" false }}(vp *map[{{ .MapKey }}]{{ .Elem }}) { {{end}}
v, changed := f.F.{{ .MethodNamePfx "Dec" false }}V(*vp, false, true, f.d)
if changed {
*vp = v
}
}
{{ end }}{{ end }}
*/}}

View File

@ -1,5 +1,3 @@
// +build codecgen.exec
// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved. // Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved.
// Use of this source code is governed by a MIT license found in the LICENSE file. // Use of this source code is governed by a MIT license found in the LICENSE file.
@ -98,7 +96,7 @@ if {{var "l"}} == 0 {
{{var "c"}} = true {{var "c"}} = true
}{{end}} }{{end}}
{{var "h"}}.ElemContainerState({{var "j"}}) {{var "h"}}.ElemContainerState({{var "j"}})
{{/* {{var "dn"}} = r.TryDecodeAsNil() */}} // {{var "dn"}} = r.TryDecodeAsNil()
{{if isChan}}{{ $x := printf "%[1]vv%[2]v" .TempVar .Rand }}var {{var $x}} {{ .Typ }} {{if isChan}}{{ $x := printf "%[1]vv%[2]v" .TempVar .Rand }}var {{var $x}} {{ .Typ }}
{{ decLineVar $x }} {{ decLineVar $x }}
{{var "v"}} <- {{ $x }} {{var "v"}} <- {{ $x }}
@ -131,3 +129,4 @@ if {{var "l"}} == 0 {
}{{end}} }{{end}}
` `

File diff suppressed because it is too large Load Diff

View File

@ -1,4 +1,4 @@
// Copyright (c) 2012-2018 Ugorji Nwoke. All rights reserved. // Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved.
// Use of this source code is governed by a MIT license found in the LICENSE file. // Use of this source code is governed by a MIT license found in the LICENSE file.
// +build go1.5 // +build go1.5

View File

@ -1,4 +1,4 @@
// Copyright (c) 2012-2018 Ugorji Nwoke. All rights reserved. // Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved.
// Use of this source code is governed by a MIT license found in the LICENSE file. // Use of this source code is governed by a MIT license found in the LICENSE file.
// +build !go1.5 // +build !go1.5

View File

@ -1,4 +1,4 @@
// Copyright (c) 2012-2018 Ugorji Nwoke. All rights reserved. // Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved.
// Use of this source code is governed by a MIT license found in the LICENSE file. // Use of this source code is governed by a MIT license found in the LICENSE file.
// +build go1.9 // +build go1.9

View File

@ -1,4 +1,4 @@
// Copyright (c) 2012-2018 Ugorji Nwoke. All rights reserved. // Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved.
// Use of this source code is governed by a MIT license found in the LICENSE file. // Use of this source code is governed by a MIT license found in the LICENSE file.
// +build !go1.9 // +build !go1.9

View File

@ -1,8 +0,0 @@
// Copyright (c) 2012-2018 Ugorji Nwoke. All rights reserved.
// Use of this source code is governed by a MIT license found in the LICENSE file.
// +build go1.10
package codec
const allowSetUnexportedEmbeddedPtr = false

View File

@ -1,8 +0,0 @@
// Copyright (c) 2012-2018 Ugorji Nwoke. All rights reserved.
// Use of this source code is governed by a MIT license found in the LICENSE file.
// +build !go1.10
package codec
const allowSetUnexportedEmbeddedPtr = true

View File

@ -1,4 +1,4 @@
// Copyright (c) 2012-2018 Ugorji Nwoke. All rights reserved. // Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved.
// Use of this source code is governed by a MIT license found in the LICENSE file. // Use of this source code is governed by a MIT license found in the LICENSE file.
// +build !go1.4 // +build !go1.4

View File

@ -1,4 +1,4 @@
// Copyright (c) 2012-2018 Ugorji Nwoke. All rights reserved. // Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved.
// Use of this source code is governed by a MIT license found in the LICENSE file. // Use of this source code is governed by a MIT license found in the LICENSE file.
// +build go1.5,!go1.6 // +build go1.5,!go1.6

View File

@ -1,4 +1,4 @@
// Copyright (c) 2012-2018 Ugorji Nwoke. All rights reserved. // Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved.
// Use of this source code is governed by a MIT license found in the LICENSE file. // Use of this source code is governed by a MIT license found in the LICENSE file.
// +build go1.6,!go1.7 // +build go1.6,!go1.7

View File

@ -1,4 +1,4 @@
// Copyright (c) 2012-2018 Ugorji Nwoke. All rights reserved. // Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved.
// Use of this source code is governed by a MIT license found in the LICENSE file. // Use of this source code is governed by a MIT license found in the LICENSE file.
// +build go1.7 // +build go1.7

View File

@ -1,4 +1,4 @@
// Copyright (c) 2012-2018 Ugorji Nwoke. All rights reserved. // Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved.
// Use of this source code is governed by a MIT license found in the LICENSE file. // Use of this source code is governed by a MIT license found in the LICENSE file.
// +build !go1.5 // +build !go1.5

File diff suppressed because it is too large Load Diff

View File

@ -6,6 +6,74 @@ package codec
// All non-std package dependencies live in this file, // All non-std package dependencies live in this file,
// so porting to different environment is easy (just update functions). // so porting to different environment is easy (just update functions).
import (
"errors"
"fmt"
"math"
"reflect"
)
func panicValToErr(panicVal interface{}, err *error) {
if panicVal == nil {
return
}
// case nil
switch xerr := panicVal.(type) {
case error:
*err = xerr
case string:
*err = errors.New(xerr)
default:
*err = fmt.Errorf("%v", panicVal)
}
return
}
func hIsEmptyValue(v reflect.Value, deref, checkStruct bool) bool {
switch v.Kind() {
case reflect.Invalid:
return true
case reflect.Array, reflect.Map, reflect.Slice, reflect.String:
return v.Len() == 0
case reflect.Bool:
return !v.Bool()
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
return v.Int() == 0
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
return v.Uint() == 0
case reflect.Float32, reflect.Float64:
return v.Float() == 0
case reflect.Interface, reflect.Ptr:
if deref {
if v.IsNil() {
return true
}
return hIsEmptyValue(v.Elem(), deref, checkStruct)
} else {
return v.IsNil()
}
case reflect.Struct:
if !checkStruct {
return false
}
// return true if all fields are empty. else return false.
// we cannot use equality check, because some fields may be maps/slices/etc
// and consequently the structs are not comparable.
// return v.Interface() == reflect.Zero(v.Type()).Interface()
for i, n := 0, v.NumField(); i < n; i++ {
if !hIsEmptyValue(v.Field(i), deref, checkStruct) {
return false
}
}
return true
}
return false
}
func isEmptyValue(v reflect.Value, deref, checkStruct bool) bool {
return hIsEmptyValue(v, deref, checkStruct)
}
func pruneSignExt(v []byte, pos bool) (n int) { func pruneSignExt(v []byte, pos bool) (n int) {
if len(v) < 2 { if len(v) < 2 {
} else if pos && v[0] == 0 { } else if pos && v[0] == 0 {
@ -18,6 +86,37 @@ func pruneSignExt(v []byte, pos bool) (n int) {
return return
} }
func implementsIntf(typ, iTyp reflect.Type) (success bool, indir int8) {
if typ == nil {
return
}
rt := typ
// The type might be a pointer and we need to keep
// dereferencing to the base type until we find an implementation.
for {
if rt.Implements(iTyp) {
return true, indir
}
if p := rt; p.Kind() == reflect.Ptr {
indir++
if indir >= math.MaxInt8 { // insane number of indirections
return false, 0
}
rt = p.Elem()
continue
}
break
}
// No luck yet, but if this is a base type (non-pointer), the pointer might satisfy.
if typ.Kind() != reflect.Ptr {
// Not a pointer, but does the pointer work?
if reflect.PtrTo(typ).Implements(iTyp) {
return true, -1
}
}
return false, 0
}
// validate that this function is correct ... // validate that this function is correct ...
// culled from OGRE (Object-Oriented Graphics Rendering Engine) // culled from OGRE (Object-Oriented Graphics Rendering Engine)
// function: halfToFloatI (http://stderr.org/doc/ogre-doc/api/OgreBitwise_8h-source.html) // function: halfToFloatI (http://stderr.org/doc/ogre-doc/api/OgreBitwise_8h-source.html)
@ -30,20 +129,21 @@ func halfFloatToFloatBits(yy uint16) (d uint32) {
if e == 0 { if e == 0 {
if m == 0 { // plu or minus 0 if m == 0 { // plu or minus 0
return s << 31 return s << 31
} else { // Denormalized number -- renormalize it
for (m & 0x00000400) == 0 {
m <<= 1
e -= 1
}
e += 1
const zz uint32 = 0x0400
m &= ^zz
} }
// Denormalized number -- renormalize it
for (m & 0x00000400) == 0 {
m <<= 1
e -= 1
}
e += 1
const zz uint32 = 0x0400
m &= ^zz
} else if e == 31 { } else if e == 31 {
if m == 0 { // Inf if m == 0 { // Inf
return (s << 31) | 0x7f800000 return (s << 31) | 0x7f800000
} else { // NaN
return (s << 31) | 0x7f800000 | (m << 13)
} }
return (s << 31) | 0x7f800000 | (m << 13) // NaN
} }
e = e + (127 - 15) e = e + (127 - 15)
m = m << 13 m = m << 13

View File

@ -1,6 +1,6 @@
// +build !go1.7 safe appengine // +build !go1.7 safe appengine
// Copyright (c) 2012-2018 Ugorji Nwoke. All rights reserved. // Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved.
// Use of this source code is governed by a MIT license found in the LICENSE file. // Use of this source code is governed by a MIT license found in the LICENSE file.
package codec package codec
@ -8,11 +8,8 @@ package codec
import ( import (
"reflect" "reflect"
"sync/atomic" "sync/atomic"
"time"
) )
const safeMode = true
// stringView returns a view of the []byte as a string. // stringView returns a view of the []byte as a string.
// In unsafe mode, it doesn't incur allocation and copying caused by conversion. // In unsafe mode, it doesn't incur allocation and copying caused by conversion.
// In regular safe mode, it is an allocation and copy. // In regular safe mode, it is an allocation and copy.
@ -34,11 +31,28 @@ func bytesView(v string) []byte {
} }
func definitelyNil(v interface{}) bool { func definitelyNil(v interface{}) bool {
// this is a best-effort option.
// We just return false, so we don't unnecessarily incur the cost of reflection this early.
return false return false
// rv := reflect.ValueOf(v)
// switch rv.Kind() {
// case reflect.Invalid:
// return true
// case reflect.Ptr, reflect.Interface, reflect.Chan, reflect.Slice, reflect.Map, reflect.Func:
// return rv.IsNil()
// default:
// return false
// }
} }
// // keepAlive4BytesView maintains a reference to the input parameter for bytesView.
// //
// // Usage: call this at point where done with the bytes view.
// func keepAlive4BytesView(v string) {}
// // keepAlive4BytesView maintains a reference to the input parameter for stringView.
// //
// // Usage: call this at point where done with the string view.
// func keepAlive4StringView(v []byte) {}
func rv2i(rv reflect.Value) interface{} { func rv2i(rv reflect.Value) interface{} {
return rv.Interface() return rv.Interface()
} }
@ -51,50 +65,16 @@ func rv2rtid(rv reflect.Value) uintptr {
return reflect.ValueOf(rv.Type()).Pointer() return reflect.ValueOf(rv.Type()).Pointer()
} }
func i2rtid(i interface{}) uintptr {
return reflect.ValueOf(reflect.TypeOf(i)).Pointer()
}
// --------------------------
func isEmptyValue(v reflect.Value, tinfos *TypeInfos, deref, checkStruct bool) bool {
switch v.Kind() {
case reflect.Invalid:
return true
case reflect.Array, reflect.Map, reflect.Slice, reflect.String:
return v.Len() == 0
case reflect.Bool:
return !v.Bool()
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
return v.Int() == 0
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
return v.Uint() == 0
case reflect.Float32, reflect.Float64:
return v.Float() == 0
case reflect.Interface, reflect.Ptr:
if deref {
if v.IsNil() {
return true
}
return isEmptyValue(v.Elem(), tinfos, deref, checkStruct)
}
return v.IsNil()
case reflect.Struct:
return isEmptyStruct(v, tinfos, deref, checkStruct)
}
return false
}
// -------------------------- // --------------------------
// type ptrToRvMap struct{} // type ptrToRvMap struct{}
// func (*ptrToRvMap) init() {} // func (_ *ptrToRvMap) init() {}
// func (*ptrToRvMap) get(i interface{}) reflect.Value { // func (_ *ptrToRvMap) get(i interface{}) reflect.Value {
// return reflect.ValueOf(i).Elem() // return reflect.ValueOf(i).Elem()
// } // }
// -------------------------- // --------------------------
type atomicTypeInfoSlice struct { // expected to be 2 words type atomicTypeInfoSlice struct {
v atomic.Value v atomic.Value
} }
@ -123,150 +103,54 @@ func (d *Decoder) kBool(f *codecFnInfo, rv reflect.Value) {
rv.SetBool(d.d.DecodeBool()) rv.SetBool(d.d.DecodeBool())
} }
func (d *Decoder) kTime(f *codecFnInfo, rv reflect.Value) {
rv.Set(reflect.ValueOf(d.d.DecodeTime()))
}
func (d *Decoder) kFloat32(f *codecFnInfo, rv reflect.Value) { func (d *Decoder) kFloat32(f *codecFnInfo, rv reflect.Value) {
fv := d.d.DecodeFloat64() rv.SetFloat(d.d.DecodeFloat(true))
if chkOvf.Float32(fv) {
d.errorf("float32 overflow: %v", fv)
}
rv.SetFloat(fv)
} }
func (d *Decoder) kFloat64(f *codecFnInfo, rv reflect.Value) { func (d *Decoder) kFloat64(f *codecFnInfo, rv reflect.Value) {
rv.SetFloat(d.d.DecodeFloat64()) rv.SetFloat(d.d.DecodeFloat(false))
} }
func (d *Decoder) kInt(f *codecFnInfo, rv reflect.Value) { func (d *Decoder) kInt(f *codecFnInfo, rv reflect.Value) {
rv.SetInt(chkOvf.IntV(d.d.DecodeInt64(), intBitsize)) rv.SetInt(d.d.DecodeInt(intBitsize))
} }
func (d *Decoder) kInt8(f *codecFnInfo, rv reflect.Value) { func (d *Decoder) kInt8(f *codecFnInfo, rv reflect.Value) {
rv.SetInt(chkOvf.IntV(d.d.DecodeInt64(), 8)) rv.SetInt(d.d.DecodeInt(8))
} }
func (d *Decoder) kInt16(f *codecFnInfo, rv reflect.Value) { func (d *Decoder) kInt16(f *codecFnInfo, rv reflect.Value) {
rv.SetInt(chkOvf.IntV(d.d.DecodeInt64(), 16)) rv.SetInt(d.d.DecodeInt(16))
} }
func (d *Decoder) kInt32(f *codecFnInfo, rv reflect.Value) { func (d *Decoder) kInt32(f *codecFnInfo, rv reflect.Value) {
rv.SetInt(chkOvf.IntV(d.d.DecodeInt64(), 32)) rv.SetInt(d.d.DecodeInt(32))
} }
func (d *Decoder) kInt64(f *codecFnInfo, rv reflect.Value) { func (d *Decoder) kInt64(f *codecFnInfo, rv reflect.Value) {
rv.SetInt(d.d.DecodeInt64()) rv.SetInt(d.d.DecodeInt(64))
} }
func (d *Decoder) kUint(f *codecFnInfo, rv reflect.Value) { func (d *Decoder) kUint(f *codecFnInfo, rv reflect.Value) {
rv.SetUint(chkOvf.UintV(d.d.DecodeUint64(), uintBitsize)) rv.SetUint(d.d.DecodeUint(uintBitsize))
} }
func (d *Decoder) kUintptr(f *codecFnInfo, rv reflect.Value) { func (d *Decoder) kUintptr(f *codecFnInfo, rv reflect.Value) {
rv.SetUint(chkOvf.UintV(d.d.DecodeUint64(), uintBitsize)) rv.SetUint(d.d.DecodeUint(uintBitsize))
} }
func (d *Decoder) kUint8(f *codecFnInfo, rv reflect.Value) { func (d *Decoder) kUint8(f *codecFnInfo, rv reflect.Value) {
rv.SetUint(chkOvf.UintV(d.d.DecodeUint64(), 8)) rv.SetUint(d.d.DecodeUint(8))
} }
func (d *Decoder) kUint16(f *codecFnInfo, rv reflect.Value) { func (d *Decoder) kUint16(f *codecFnInfo, rv reflect.Value) {
rv.SetUint(chkOvf.UintV(d.d.DecodeUint64(), 16)) rv.SetUint(d.d.DecodeUint(16))
} }
func (d *Decoder) kUint32(f *codecFnInfo, rv reflect.Value) { func (d *Decoder) kUint32(f *codecFnInfo, rv reflect.Value) {
rv.SetUint(chkOvf.UintV(d.d.DecodeUint64(), 32)) rv.SetUint(d.d.DecodeUint(32))
} }
func (d *Decoder) kUint64(f *codecFnInfo, rv reflect.Value) { func (d *Decoder) kUint64(f *codecFnInfo, rv reflect.Value) {
rv.SetUint(d.d.DecodeUint64()) rv.SetUint(d.d.DecodeUint(64))
} }
// ----------------
func (e *Encoder) kBool(f *codecFnInfo, rv reflect.Value) {
e.e.EncodeBool(rv.Bool())
}
func (e *Encoder) kTime(f *codecFnInfo, rv reflect.Value) {
e.e.EncodeTime(rv2i(rv).(time.Time))
}
func (e *Encoder) kString(f *codecFnInfo, rv reflect.Value) {
e.e.EncodeString(cUTF8, rv.String())
}
func (e *Encoder) kFloat64(f *codecFnInfo, rv reflect.Value) {
e.e.EncodeFloat64(rv.Float())
}
func (e *Encoder) kFloat32(f *codecFnInfo, rv reflect.Value) {
e.e.EncodeFloat32(float32(rv.Float()))
}
func (e *Encoder) kInt(f *codecFnInfo, rv reflect.Value) {
e.e.EncodeInt(rv.Int())
}
func (e *Encoder) kInt8(f *codecFnInfo, rv reflect.Value) {
e.e.EncodeInt(rv.Int())
}
func (e *Encoder) kInt16(f *codecFnInfo, rv reflect.Value) {
e.e.EncodeInt(rv.Int())
}
func (e *Encoder) kInt32(f *codecFnInfo, rv reflect.Value) {
e.e.EncodeInt(rv.Int())
}
func (e *Encoder) kInt64(f *codecFnInfo, rv reflect.Value) {
e.e.EncodeInt(rv.Int())
}
func (e *Encoder) kUint(f *codecFnInfo, rv reflect.Value) {
e.e.EncodeUint(rv.Uint())
}
func (e *Encoder) kUint8(f *codecFnInfo, rv reflect.Value) {
e.e.EncodeUint(rv.Uint())
}
func (e *Encoder) kUint16(f *codecFnInfo, rv reflect.Value) {
e.e.EncodeUint(rv.Uint())
}
func (e *Encoder) kUint32(f *codecFnInfo, rv reflect.Value) {
e.e.EncodeUint(rv.Uint())
}
func (e *Encoder) kUint64(f *codecFnInfo, rv reflect.Value) {
e.e.EncodeUint(rv.Uint())
}
func (e *Encoder) kUintptr(f *codecFnInfo, rv reflect.Value) {
e.e.EncodeUint(rv.Uint())
}
// // keepAlive4BytesView maintains a reference to the input parameter for bytesView.
// //
// // Usage: call this at point where done with the bytes view.
// func keepAlive4BytesView(v string) {}
// // keepAlive4BytesView maintains a reference to the input parameter for stringView.
// //
// // Usage: call this at point where done with the string view.
// func keepAlive4StringView(v []byte) {}
// func definitelyNil(v interface{}) bool {
// rv := reflect.ValueOf(v)
// switch rv.Kind() {
// case reflect.Invalid:
// return true
// case reflect.Ptr, reflect.Interface, reflect.Chan, reflect.Slice, reflect.Map, reflect.Func:
// return rv.IsNil()
// default:
// return false
// }
// }

View File

@ -2,7 +2,7 @@
// +build !appengine // +build !appengine
// +build go1.7 // +build go1.7
// Copyright (c) 2012-2018 Ugorji Nwoke. All rights reserved. // Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved.
// Use of this source code is governed by a MIT license found in the LICENSE file. // Use of this source code is governed by a MIT license found in the LICENSE file.
package codec package codec
@ -10,7 +10,6 @@ package codec
import ( import (
"reflect" "reflect"
"sync/atomic" "sync/atomic"
"time"
"unsafe" "unsafe"
) )
@ -19,7 +18,6 @@ import (
// var zeroRTv [4]uintptr // var zeroRTv [4]uintptr
const safeMode = false
const unsafeFlagIndir = 1 << 7 // keep in sync with GO_ROOT/src/reflect/value.go const unsafeFlagIndir = 1 << 7 // keep in sync with GO_ROOT/src/reflect/value.go
type unsafeString struct { type unsafeString struct {
@ -48,49 +46,50 @@ func stringView(v []byte) string {
if len(v) == 0 { if len(v) == 0 {
return "" return ""
} }
bx := (*unsafeSlice)(unsafe.Pointer(&v)) bx := (*unsafeSlice)(unsafe.Pointer(&v))
return *(*string)(unsafe.Pointer(&unsafeString{bx.Data, bx.Len})) sx := unsafeString{bx.Data, bx.Len}
return *(*string)(unsafe.Pointer(&sx))
} }
func bytesView(v string) []byte { func bytesView(v string) []byte {
if len(v) == 0 { if len(v) == 0 {
return zeroByteSlice return zeroByteSlice
} }
sx := (*unsafeString)(unsafe.Pointer(&v)) sx := (*unsafeString)(unsafe.Pointer(&v))
return *(*[]byte)(unsafe.Pointer(&unsafeSlice{sx.Data, sx.Len, sx.Len})) bx := unsafeSlice{sx.Data, sx.Len, sx.Len}
return *(*[]byte)(unsafe.Pointer(&bx))
} }
func definitelyNil(v interface{}) bool { func definitelyNil(v interface{}) bool {
// There is no global way of checking if an interface is nil. return (*unsafeIntf)(unsafe.Pointer(&v)).word == nil
// For true references (map, ptr, func, chan), you can just look
// at the word of the interface. However, for slices, you have to dereference
// the word, and get a pointer to the 3-word interface value.
//
// However, the following are cheap calls
// - TypeOf(interface): cheap 2-line call.
// - ValueOf(interface{}): expensive
// - type.Kind: cheap call through an interface
// - Value.Type(): cheap call
// except it's a method value (e.g. r.Read, which implies that it is a Func)
return ((*unsafeIntf)(unsafe.Pointer(&v))).word == nil
} }
func rv2i(rv reflect.Value) interface{} { // func keepAlive4BytesView(v string) {
// TODO: consider a more generally-known optimization for reflect.Value ==> Interface // runtime.KeepAlive(v)
// // }
// Currently, we use this fragile method that taps into implememtation details from
// the source go stdlib reflect/value.go, and trims the implementation.
urv := (*unsafeReflectValue)(unsafe.Pointer(&rv)) // func keepAlive4StringView(v []byte) {
// true references (map, func, chan, ptr - NOT slice) may be double-referenced as flagIndir // runtime.KeepAlive(v)
var ptr unsafe.Pointer // }
if refBitset.isset(byte(urv.flag&(1<<5-1))) && urv.flag&unsafeFlagIndir != 0 {
ptr = *(*unsafe.Pointer)(urv.ptr) // TODO: consider a more generally-known optimization for reflect.Value ==> Interface
} else { //
ptr = urv.ptr // Currently, we use this fragile method that taps into implememtation details from
// the source go stdlib reflect/value.go,
// and trims the implementation.
func rv2i(rv reflect.Value) interface{} {
if false {
return rv.Interface()
} }
return *(*interface{})(unsafe.Pointer(&unsafeIntf{typ: urv.typ, word: ptr})) urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
// references that are single-words (map, ptr) may be double-referenced as flagIndir
kk := urv.flag & (1<<5 - 1)
if (kk == uintptr(reflect.Map) || kk == uintptr(reflect.Ptr)) && urv.flag&unsafeFlagIndir != 0 {
return *(*interface{})(unsafe.Pointer(&unsafeIntf{word: *(*unsafe.Pointer)(urv.ptr), typ: urv.typ}))
}
return *(*interface{})(unsafe.Pointer(&unsafeIntf{word: urv.ptr, typ: urv.typ}))
} }
func rt2id(rt reflect.Type) uintptr { func rt2id(rt reflect.Type) uintptr {
@ -101,83 +100,16 @@ func rv2rtid(rv reflect.Value) uintptr {
return uintptr((*unsafeReflectValue)(unsafe.Pointer(&rv)).typ) return uintptr((*unsafeReflectValue)(unsafe.Pointer(&rv)).typ)
} }
func i2rtid(i interface{}) uintptr { // func rv0t(rt reflect.Type) reflect.Value {
return uintptr(((*unsafeIntf)(unsafe.Pointer(&i))).typ) // ut := (*unsafeIntf)(unsafe.Pointer(&rt))
} // // we need to determine whether ifaceIndir, and then whether to just pass 0 as the ptr
// uv := unsafeReflectValue{ut.word, &zeroRTv, flag(rt.Kind())}
// return *(*reflect.Value)(unsafe.Pointer(&uv})
// }
// -------------------------- // --------------------------
type atomicTypeInfoSlice struct {
func isEmptyValue(v reflect.Value, tinfos *TypeInfos, deref, checkStruct bool) bool {
urv := (*unsafeReflectValue)(unsafe.Pointer(&v))
if urv.flag == 0 {
return true
}
switch v.Kind() {
case reflect.Invalid:
return true
case reflect.String:
return (*unsafeString)(urv.ptr).Len == 0
case reflect.Slice:
return (*unsafeSlice)(urv.ptr).Len == 0
case reflect.Bool:
return !*(*bool)(urv.ptr)
case reflect.Int:
return *(*int)(urv.ptr) == 0
case reflect.Int8:
return *(*int8)(urv.ptr) == 0
case reflect.Int16:
return *(*int16)(urv.ptr) == 0
case reflect.Int32:
return *(*int32)(urv.ptr) == 0
case reflect.Int64:
return *(*int64)(urv.ptr) == 0
case reflect.Uint:
return *(*uint)(urv.ptr) == 0
case reflect.Uint8:
return *(*uint8)(urv.ptr) == 0
case reflect.Uint16:
return *(*uint16)(urv.ptr) == 0
case reflect.Uint32:
return *(*uint32)(urv.ptr) == 0
case reflect.Uint64:
return *(*uint64)(urv.ptr) == 0
case reflect.Uintptr:
return *(*uintptr)(urv.ptr) == 0
case reflect.Float32:
return *(*float32)(urv.ptr) == 0
case reflect.Float64:
return *(*float64)(urv.ptr) == 0
case reflect.Interface:
isnil := urv.ptr == nil || *(*unsafe.Pointer)(urv.ptr) == nil
if deref {
if isnil {
return true
}
return isEmptyValue(v.Elem(), tinfos, deref, checkStruct)
}
return isnil
case reflect.Ptr:
isnil := urv.ptr == nil
if deref {
if isnil {
return true
}
return isEmptyValue(v.Elem(), tinfos, deref, checkStruct)
}
return isnil
case reflect.Struct:
return isEmptyStruct(v, tinfos, deref, checkStruct)
case reflect.Map, reflect.Array, reflect.Chan:
return v.Len() == 0
}
return false
}
// --------------------------
type atomicTypeInfoSlice struct { // expected to be 2 words
v unsafe.Pointer v unsafe.Pointer
_ [8]byte // padding
} }
func (x *atomicTypeInfoSlice) load() *[]rtid2ti { func (x *atomicTypeInfoSlice) load() *[]rtid2ti {
@ -191,6 +123,9 @@ func (x *atomicTypeInfoSlice) store(p *[]rtid2ti) {
// -------------------------- // --------------------------
func (d *Decoder) raw(f *codecFnInfo, rv reflect.Value) { func (d *Decoder) raw(f *codecFnInfo, rv reflect.Value) {
urv := (*unsafeReflectValue)(unsafe.Pointer(&rv)) urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
// if urv.flag&unsafeFlagIndir != 0 {
// urv.ptr = *(*unsafe.Pointer)(urv.ptr)
// }
*(*[]byte)(urv.ptr) = d.rawBytes() *(*[]byte)(urv.ptr) = d.rawBytes()
} }
@ -204,214 +139,73 @@ func (d *Decoder) kBool(f *codecFnInfo, rv reflect.Value) {
*(*bool)(urv.ptr) = d.d.DecodeBool() *(*bool)(urv.ptr) = d.d.DecodeBool()
} }
func (d *Decoder) kTime(f *codecFnInfo, rv reflect.Value) {
urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
*(*time.Time)(urv.ptr) = d.d.DecodeTime()
}
func (d *Decoder) kFloat32(f *codecFnInfo, rv reflect.Value) { func (d *Decoder) kFloat32(f *codecFnInfo, rv reflect.Value) {
fv := d.d.DecodeFloat64()
if chkOvf.Float32(fv) {
d.errorf("float32 overflow: %v", fv)
}
urv := (*unsafeReflectValue)(unsafe.Pointer(&rv)) urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
*(*float32)(urv.ptr) = float32(fv) *(*float32)(urv.ptr) = float32(d.d.DecodeFloat(true))
} }
func (d *Decoder) kFloat64(f *codecFnInfo, rv reflect.Value) { func (d *Decoder) kFloat64(f *codecFnInfo, rv reflect.Value) {
urv := (*unsafeReflectValue)(unsafe.Pointer(&rv)) urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
*(*float64)(urv.ptr) = d.d.DecodeFloat64() *(*float64)(urv.ptr) = d.d.DecodeFloat(false)
} }
func (d *Decoder) kInt(f *codecFnInfo, rv reflect.Value) { func (d *Decoder) kInt(f *codecFnInfo, rv reflect.Value) {
urv := (*unsafeReflectValue)(unsafe.Pointer(&rv)) urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
*(*int)(urv.ptr) = int(chkOvf.IntV(d.d.DecodeInt64(), intBitsize)) *(*int)(urv.ptr) = int(d.d.DecodeInt(intBitsize))
} }
func (d *Decoder) kInt8(f *codecFnInfo, rv reflect.Value) { func (d *Decoder) kInt8(f *codecFnInfo, rv reflect.Value) {
urv := (*unsafeReflectValue)(unsafe.Pointer(&rv)) urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
*(*int8)(urv.ptr) = int8(chkOvf.IntV(d.d.DecodeInt64(), 8)) *(*int8)(urv.ptr) = int8(d.d.DecodeInt(8))
} }
func (d *Decoder) kInt16(f *codecFnInfo, rv reflect.Value) { func (d *Decoder) kInt16(f *codecFnInfo, rv reflect.Value) {
urv := (*unsafeReflectValue)(unsafe.Pointer(&rv)) urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
*(*int16)(urv.ptr) = int16(chkOvf.IntV(d.d.DecodeInt64(), 16)) *(*int16)(urv.ptr) = int16(d.d.DecodeInt(16))
} }
func (d *Decoder) kInt32(f *codecFnInfo, rv reflect.Value) { func (d *Decoder) kInt32(f *codecFnInfo, rv reflect.Value) {
urv := (*unsafeReflectValue)(unsafe.Pointer(&rv)) urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
*(*int32)(urv.ptr) = int32(chkOvf.IntV(d.d.DecodeInt64(), 32)) *(*int32)(urv.ptr) = int32(d.d.DecodeInt(32))
} }
func (d *Decoder) kInt64(f *codecFnInfo, rv reflect.Value) { func (d *Decoder) kInt64(f *codecFnInfo, rv reflect.Value) {
urv := (*unsafeReflectValue)(unsafe.Pointer(&rv)) urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
*(*int64)(urv.ptr) = d.d.DecodeInt64() *(*int64)(urv.ptr) = d.d.DecodeInt(64)
} }
func (d *Decoder) kUint(f *codecFnInfo, rv reflect.Value) { func (d *Decoder) kUint(f *codecFnInfo, rv reflect.Value) {
urv := (*unsafeReflectValue)(unsafe.Pointer(&rv)) urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
*(*uint)(urv.ptr) = uint(chkOvf.UintV(d.d.DecodeUint64(), uintBitsize)) *(*uint)(urv.ptr) = uint(d.d.DecodeUint(uintBitsize))
} }
func (d *Decoder) kUintptr(f *codecFnInfo, rv reflect.Value) { func (d *Decoder) kUintptr(f *codecFnInfo, rv reflect.Value) {
urv := (*unsafeReflectValue)(unsafe.Pointer(&rv)) urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
*(*uintptr)(urv.ptr) = uintptr(chkOvf.UintV(d.d.DecodeUint64(), uintBitsize)) *(*uintptr)(urv.ptr) = uintptr(d.d.DecodeUint(uintBitsize))
} }
func (d *Decoder) kUint8(f *codecFnInfo, rv reflect.Value) { func (d *Decoder) kUint8(f *codecFnInfo, rv reflect.Value) {
urv := (*unsafeReflectValue)(unsafe.Pointer(&rv)) urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
*(*uint8)(urv.ptr) = uint8(chkOvf.UintV(d.d.DecodeUint64(), 8)) *(*uint8)(urv.ptr) = uint8(d.d.DecodeUint(8))
} }
func (d *Decoder) kUint16(f *codecFnInfo, rv reflect.Value) { func (d *Decoder) kUint16(f *codecFnInfo, rv reflect.Value) {
urv := (*unsafeReflectValue)(unsafe.Pointer(&rv)) urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
*(*uint16)(urv.ptr) = uint16(chkOvf.UintV(d.d.DecodeUint64(), 16)) *(*uint16)(urv.ptr) = uint16(d.d.DecodeUint(16))
} }
func (d *Decoder) kUint32(f *codecFnInfo, rv reflect.Value) { func (d *Decoder) kUint32(f *codecFnInfo, rv reflect.Value) {
urv := (*unsafeReflectValue)(unsafe.Pointer(&rv)) urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
*(*uint32)(urv.ptr) = uint32(chkOvf.UintV(d.d.DecodeUint64(), 32)) *(*uint32)(urv.ptr) = uint32(d.d.DecodeUint(32))
} }
func (d *Decoder) kUint64(f *codecFnInfo, rv reflect.Value) { func (d *Decoder) kUint64(f *codecFnInfo, rv reflect.Value) {
urv := (*unsafeReflectValue)(unsafe.Pointer(&rv)) urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
*(*uint64)(urv.ptr) = d.d.DecodeUint64() *(*uint64)(urv.ptr) = d.d.DecodeUint(64)
} }
// ------------ // ------------
func (e *Encoder) kBool(f *codecFnInfo, rv reflect.Value) {
v := (*unsafeReflectValue)(unsafe.Pointer(&rv))
e.e.EncodeBool(*(*bool)(v.ptr))
}
func (e *Encoder) kTime(f *codecFnInfo, rv reflect.Value) {
v := (*unsafeReflectValue)(unsafe.Pointer(&rv))
e.e.EncodeTime(*(*time.Time)(v.ptr))
}
func (e *Encoder) kString(f *codecFnInfo, rv reflect.Value) {
v := (*unsafeReflectValue)(unsafe.Pointer(&rv))
e.e.EncodeString(cUTF8, *(*string)(v.ptr))
}
func (e *Encoder) kFloat64(f *codecFnInfo, rv reflect.Value) {
v := (*unsafeReflectValue)(unsafe.Pointer(&rv))
e.e.EncodeFloat64(*(*float64)(v.ptr))
}
func (e *Encoder) kFloat32(f *codecFnInfo, rv reflect.Value) {
v := (*unsafeReflectValue)(unsafe.Pointer(&rv))
e.e.EncodeFloat32(*(*float32)(v.ptr))
}
func (e *Encoder) kInt(f *codecFnInfo, rv reflect.Value) {
v := (*unsafeReflectValue)(unsafe.Pointer(&rv))
e.e.EncodeInt(int64(*(*int)(v.ptr)))
}
func (e *Encoder) kInt8(f *codecFnInfo, rv reflect.Value) {
v := (*unsafeReflectValue)(unsafe.Pointer(&rv))
e.e.EncodeInt(int64(*(*int8)(v.ptr)))
}
func (e *Encoder) kInt16(f *codecFnInfo, rv reflect.Value) {
v := (*unsafeReflectValue)(unsafe.Pointer(&rv))
e.e.EncodeInt(int64(*(*int16)(v.ptr)))
}
func (e *Encoder) kInt32(f *codecFnInfo, rv reflect.Value) {
v := (*unsafeReflectValue)(unsafe.Pointer(&rv))
e.e.EncodeInt(int64(*(*int32)(v.ptr)))
}
func (e *Encoder) kInt64(f *codecFnInfo, rv reflect.Value) {
v := (*unsafeReflectValue)(unsafe.Pointer(&rv))
e.e.EncodeInt(int64(*(*int64)(v.ptr)))
}
func (e *Encoder) kUint(f *codecFnInfo, rv reflect.Value) {
v := (*unsafeReflectValue)(unsafe.Pointer(&rv))
e.e.EncodeUint(uint64(*(*uint)(v.ptr)))
}
func (e *Encoder) kUint8(f *codecFnInfo, rv reflect.Value) {
v := (*unsafeReflectValue)(unsafe.Pointer(&rv))
e.e.EncodeUint(uint64(*(*uint8)(v.ptr)))
}
func (e *Encoder) kUint16(f *codecFnInfo, rv reflect.Value) {
v := (*unsafeReflectValue)(unsafe.Pointer(&rv))
e.e.EncodeUint(uint64(*(*uint16)(v.ptr)))
}
func (e *Encoder) kUint32(f *codecFnInfo, rv reflect.Value) {
v := (*unsafeReflectValue)(unsafe.Pointer(&rv))
e.e.EncodeUint(uint64(*(*uint32)(v.ptr)))
}
func (e *Encoder) kUint64(f *codecFnInfo, rv reflect.Value) {
v := (*unsafeReflectValue)(unsafe.Pointer(&rv))
e.e.EncodeUint(uint64(*(*uint64)(v.ptr)))
}
func (e *Encoder) kUintptr(f *codecFnInfo, rv reflect.Value) {
v := (*unsafeReflectValue)(unsafe.Pointer(&rv))
e.e.EncodeUint(uint64(*(*uintptr)(v.ptr)))
}
// ------------
// func (d *Decoder) raw(f *codecFnInfo, rv reflect.Value) {
// urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
// // if urv.flag&unsafeFlagIndir != 0 {
// // urv.ptr = *(*unsafe.Pointer)(urv.ptr)
// // }
// *(*[]byte)(urv.ptr) = d.rawBytes()
// }
// func rv0t(rt reflect.Type) reflect.Value {
// ut := (*unsafeIntf)(unsafe.Pointer(&rt))
// // we need to determine whether ifaceIndir, and then whether to just pass 0 as the ptr
// uv := unsafeReflectValue{ut.word, &zeroRTv, flag(rt.Kind())}
// return *(*reflect.Value)(unsafe.Pointer(&uv})
// }
// func rv2i(rv reflect.Value) interface{} {
// urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
// // true references (map, func, chan, ptr - NOT slice) may be double-referenced as flagIndir
// var ptr unsafe.Pointer
// // kk := reflect.Kind(urv.flag & (1<<5 - 1))
// // if (kk == reflect.Map || kk == reflect.Ptr || kk == reflect.Chan || kk == reflect.Func) && urv.flag&unsafeFlagIndir != 0 {
// if refBitset.isset(byte(urv.flag&(1<<5-1))) && urv.flag&unsafeFlagIndir != 0 {
// ptr = *(*unsafe.Pointer)(urv.ptr)
// } else {
// ptr = urv.ptr
// }
// return *(*interface{})(unsafe.Pointer(&unsafeIntf{typ: urv.typ, word: ptr}))
// // return *(*interface{})(unsafe.Pointer(&unsafeIntf{word: *(*unsafe.Pointer)(urv.ptr), typ: urv.typ}))
// // return *(*interface{})(unsafe.Pointer(&unsafeIntf{word: urv.ptr, typ: urv.typ}))
// }
// func definitelyNil(v interface{}) bool {
// var ui *unsafeIntf = (*unsafeIntf)(unsafe.Pointer(&v))
// if ui.word == nil {
// return true
// }
// var tk = reflect.TypeOf(v).Kind()
// return (tk == reflect.Interface || tk == reflect.Slice) && *(*unsafe.Pointer)(ui.word) == nil
// fmt.Printf(">>>> definitely nil: isnil: %v, TYPE: \t%T, word: %v, *word: %v, type: %v, nil: %v\n",
// v == nil, v, word, *((*unsafe.Pointer)(word)), ui.typ, nil)
// }
// func keepAlive4BytesView(v string) {
// runtime.KeepAlive(v)
// }
// func keepAlive4StringView(v []byte) {
// runtime.KeepAlive(v)
// }
// func rt2id(rt reflect.Type) uintptr { // func rt2id(rt reflect.Type) uintptr {
// return uintptr(((*unsafeIntf)(unsafe.Pointer(&rt))).word) // return uintptr(((*unsafeIntf)(unsafe.Pointer(&rt))).word)
// // var i interface{} = rt // // var i interface{} = rt

File diff suppressed because it is too large Load Diff

View File

@ -1,21 +1,18 @@
// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved. // Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved.
// Use of this source code is governed by a MIT license found in the LICENSE file. // Use of this source code is governed by a MIT license found in the LICENSE file.
// Code generated from mammoth-test.go.tmpl - DO NOT EDIT. // ************************************************************
// DO NOT EDIT.
// THIS FILE IS AUTO-GENERATED from mammoth-test.go.tmpl
// ************************************************************
package codec package codec
import "testing"
import "fmt"
import "reflect"
// TestMammoth has all the different paths optimized in fast-path // TestMammoth has all the different paths optimized in fast-path
// It has all the primitives, slices and maps. // It has all the primitives, slices and maps.
// //
// For each of those types, it has a pointer and a non-pointer field. // For each of those types, it has a pointer and a non-pointer field.
func init() { _ = fmt.Printf } // so we can include fmt as needed
type TestMammoth struct { type TestMammoth struct {
{{range .Values }}{{if .Primitive }}{{/* {{range .Values }}{{if .Primitive }}{{/*
@ -34,121 +31,3 @@ type TestMammoth struct {
{{end}}{{end}}{{end}} {{end}}{{end}}{{end}}
} }
{{range .Values }}{{if not .Primitive }}{{if not .MapKey }}{{/*
*/}} type {{ .MethodNamePfx "typMbs" false }} []{{ .Elem }}
func (_ {{ .MethodNamePfx "typMbs" false }}) MapBySlice() { }
{{end}}{{end}}{{end}}
{{range .Values }}{{if not .Primitive }}{{if .MapKey }}{{/*
*/}} type {{ .MethodNamePfx "typMap" false }} map[{{ .MapKey }}]{{ .Elem }}
{{end}}{{end}}{{end}}
func doTestMammothSlices(t *testing.T, h Handle) {
{{range $i, $e := .Values }}{{if not .Primitive }}{{if not .MapKey }}{{/*
*/}}
var v{{$i}}va [8]{{ .Elem }}
for _, v := range [][]{{ .Elem }}{ nil, {}, { {{ nonzerocmd .Elem }}, {{ zerocmd .Elem }}, {{ zerocmd .Elem }}, {{ nonzerocmd .Elem }} } } { {{/*
// fmt.Printf(">>>> running mammoth slice v{{$i}}: %v\n", v)
// - encode value to some []byte
// - decode into a length-wise-equal []byte
// - check if equal to initial slice
// - encode ptr to the value
// - check if encode bytes are same
// - decode into ptrs to: nil, then 1-elem slice, equal-length, then large len slice
// - decode into non-addressable slice of equal length, then larger len
// - for each decode, compare elem-by-elem to the original slice
// -
// - rinse and repeat for a MapBySlice version
// -
*/}}
var v{{$i}}v1, v{{$i}}v2 []{{ .Elem }}
v{{$i}}v1 = v
bs{{$i}} := testMarshalErr(v{{$i}}v1, h, t, "enc-slice-v{{$i}}")
if v == nil { v{{$i}}v2 = nil } else { v{{$i}}v2 = make([]{{ .Elem }}, len(v)) }
testUnmarshalErr(v{{$i}}v2, bs{{$i}}, h, t, "dec-slice-v{{$i}}")
testDeepEqualErr(v{{$i}}v1, v{{$i}}v2, t, "equal-slice-v{{$i}}")
if v == nil { v{{$i}}v2 = nil } else { v{{$i}}v2 = make([]{{ .Elem }}, len(v)) }
testUnmarshalErr(reflect.ValueOf(v{{$i}}v2), bs{{$i}}, h, t, "dec-slice-v{{$i}}-noaddr") // non-addressable value
testDeepEqualErr(v{{$i}}v1, v{{$i}}v2, t, "equal-slice-v{{$i}}-noaddr")
// ...
bs{{$i}} = testMarshalErr(&v{{$i}}v1, h, t, "enc-slice-v{{$i}}-p")
v{{$i}}v2 = nil
testUnmarshalErr(&v{{$i}}v2, bs{{$i}}, h, t, "dec-slice-v{{$i}}-p")
testDeepEqualErr(v{{$i}}v1, v{{$i}}v2, t, "equal-slice-v{{$i}}-p")
v{{$i}}va = [8]{{ .Elem }}{} // clear the array
v{{$i}}v2 = v{{$i}}va[:1:1]
testUnmarshalErr(&v{{$i}}v2, bs{{$i}}, h, t, "dec-slice-v{{$i}}-p-1")
testDeepEqualErr(v{{$i}}v1, v{{$i}}v2, t, "equal-slice-v{{$i}}-p-1")
v{{$i}}va = [8]{{ .Elem }}{} // clear the array
v{{$i}}v2 = v{{$i}}va[:len(v{{$i}}v1):len(v{{$i}}v1)]
testUnmarshalErr(&v{{$i}}v2, bs{{$i}}, h, t, "dec-slice-v{{$i}}-p-len")
testDeepEqualErr(v{{$i}}v1, v{{$i}}v2, t, "equal-slice-v{{$i}}-p-len")
v{{$i}}va = [8]{{ .Elem }}{} // clear the array
v{{$i}}v2 = v{{$i}}va[:]
testUnmarshalErr(&v{{$i}}v2, bs{{$i}}, h, t, "dec-slice-v{{$i}}-p-cap")
testDeepEqualErr(v{{$i}}v1, v{{$i}}v2, t, "equal-slice-v{{$i}}-p-cap")
if len(v{{$i}}v1) > 1 {
v{{$i}}va = [8]{{ .Elem }}{} // clear the array
testUnmarshalErr((&v{{$i}}va)[:len(v{{$i}}v1)], bs{{$i}}, h, t, "dec-slice-v{{$i}}-p-len-noaddr")
testDeepEqualErr(v{{$i}}v1, v{{$i}}va[:len(v{{$i}}v1)], t, "equal-slice-v{{$i}}-p-len-noaddr")
v{{$i}}va = [8]{{ .Elem }}{} // clear the array
testUnmarshalErr((&v{{$i}}va)[:], bs{{$i}}, h, t, "dec-slice-v{{$i}}-p-cap-noaddr")
testDeepEqualErr(v{{$i}}v1, v{{$i}}va[:len(v{{$i}}v1)], t, "equal-slice-v{{$i}}-p-cap-noaddr")
}
// ...
var v{{$i}}v3, v{{$i}}v4 {{ .MethodNamePfx "typMbs" false }}
v{{$i}}v2 = nil
if v != nil { v{{$i}}v2 = make([]{{ .Elem }}, len(v)) }
v{{$i}}v3 = {{ .MethodNamePfx "typMbs" false }}(v{{$i}}v1)
v{{$i}}v4 = {{ .MethodNamePfx "typMbs" false }}(v{{$i}}v2)
bs{{$i}} = testMarshalErr(v{{$i}}v3, h, t, "enc-slice-v{{$i}}-custom")
testUnmarshalErr(v{{$i}}v4, bs{{$i}}, h, t, "dec-slice-v{{$i}}-custom")
testDeepEqualErr(v{{$i}}v3, v{{$i}}v4, t, "equal-slice-v{{$i}}-custom")
bs{{$i}} = testMarshalErr(&v{{$i}}v3, h, t, "enc-slice-v{{$i}}-custom-p")
v{{$i}}v2 = nil
v{{$i}}v4 = {{ .MethodNamePfx "typMbs" false }}(v{{$i}}v2)
testUnmarshalErr(&v{{$i}}v4, bs{{$i}}, h, t, "dec-slice-v{{$i}}-custom-p")
testDeepEqualErr(v{{$i}}v3, v{{$i}}v4, t, "equal-slice-v{{$i}}-custom-p")
}
{{end}}{{end}}{{end}}
}
func doTestMammothMaps(t *testing.T, h Handle) {
{{range $i, $e := .Values }}{{if not .Primitive }}{{if .MapKey }}{{/*
*/}}
for _, v := range []map[{{ .MapKey }}]{{ .Elem }}{ nil, {}, { {{ nonzerocmd .MapKey }}:{{ zerocmd .Elem }} {{if ne "bool" .MapKey}}, {{ nonzerocmd .MapKey }}:{{ nonzerocmd .Elem }} {{end}} } } {
// fmt.Printf(">>>> running mammoth map v{{$i}}: %v\n", v)
var v{{$i}}v1, v{{$i}}v2 map[{{ .MapKey }}]{{ .Elem }}
v{{$i}}v1 = v
bs{{$i}} := testMarshalErr(v{{$i}}v1, h, t, "enc-map-v{{$i}}")
if v == nil { v{{$i}}v2 = nil } else { v{{$i}}v2 = make(map[{{ .MapKey }}]{{ .Elem }}, len(v)) } // reset map
testUnmarshalErr(v{{$i}}v2, bs{{$i}}, h, t, "dec-map-v{{$i}}")
testDeepEqualErr(v{{$i}}v1, v{{$i}}v2, t, "equal-map-v{{$i}}")
if v == nil { v{{$i}}v2 = nil } else { v{{$i}}v2 = make(map[{{ .MapKey }}]{{ .Elem }}, len(v)) } // reset map
testUnmarshalErr(reflect.ValueOf(v{{$i}}v2), bs{{$i}}, h, t, "dec-map-v{{$i}}-noaddr") // decode into non-addressable map value
testDeepEqualErr(v{{$i}}v1, v{{$i}}v2, t, "equal-map-v{{$i}}-noaddr")
if v == nil { v{{$i}}v2 = nil } else { v{{$i}}v2 = make(map[{{ .MapKey }}]{{ .Elem }}, len(v)) } // reset map
testUnmarshalErr(&v{{$i}}v2, bs{{$i}}, h, t, "dec-map-v{{$i}}-p-len")
testDeepEqualErr(v{{$i}}v1, v{{$i}}v2, t, "equal-map-v{{$i}}-p-len")
bs{{$i}} = testMarshalErr(&v{{$i}}v1, h, t, "enc-map-v{{$i}}-p")
v{{$i}}v2 = nil
testUnmarshalErr(&v{{$i}}v2, bs{{$i}}, h, t, "dec-map-v{{$i}}-p-nil")
testDeepEqualErr(v{{$i}}v1, v{{$i}}v2, t, "equal-map-v{{$i}}-p-nil")
// ...
if v == nil { v{{$i}}v2 = nil } else { v{{$i}}v2 = make(map[{{ .MapKey }}]{{ .Elem }}, len(v)) } // reset map
var v{{$i}}v3, v{{$i}}v4 {{ .MethodNamePfx "typMap" false }}
v{{$i}}v3 = {{ .MethodNamePfx "typMap" false }}(v{{$i}}v1)
v{{$i}}v4 = {{ .MethodNamePfx "typMap" false }}(v{{$i}}v2)
bs{{$i}} = testMarshalErr(v{{$i}}v3, h, t, "enc-map-v{{$i}}-custom")
testUnmarshalErr(v{{$i}}v4, bs{{$i}}, h, t, "dec-map-v{{$i}}-p-len")
testDeepEqualErr(v{{$i}}v3, v{{$i}}v4, t, "equal-map-v{{$i}}-p-len")
}
{{end}}{{end}}{{end}}
}
func doTestMammothMapsAndSlices(t *testing.T, h Handle) {
doTestMammothSlices(t, h)
doTestMammothMaps(t, h)
}

View File

@ -1,94 +0,0 @@
// +build !notfastpath
// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved.
// Use of this source code is governed by a MIT license found in the LICENSE file.
// Code generated from mammoth2-test.go.tmpl - DO NOT EDIT.
package codec
// Increase codecoverage by covering all the codecgen paths, in fast-path and gen-helper.go....
//
// Add:
// - test file for creating a mammoth generated file as _mammoth_generated.go
// - generate a second mammoth files in a different file: mammoth2_generated_test.go
// - mammoth-test.go.tmpl will do this
// - run codecgen on it, into mammoth2_codecgen_generated_test.go (no build tags)
// - as part of TestMammoth, run it also
// - this will cover all the codecgen, gen-helper, etc in one full run
// - check in mammoth* files into github also
// - then
//
// Now, add some types:
// - some that implement BinaryMarshal, TextMarshal, JSONMarshal, and one that implements none of it
// - create a wrapper type that includes TestMammoth2, with it in slices, and maps, and the custom types
// - this wrapper object is what we work encode/decode (so that the codecgen methods are called)
// import "encoding/binary"
import "fmt"
type TestMammoth2 struct {
{{range .Values }}{{if .Primitive }}{{/*
*/}}{{ .MethodNamePfx "F" true }} {{ .Primitive }}
{{ .MethodNamePfx "Fptr" true }} *{{ .Primitive }}
{{end}}{{end}}
{{range .Values }}{{if not .Primitive }}{{if not .MapKey }}{{/*
*/}}{{ .MethodNamePfx "F" false }} []{{ .Elem }}
{{ .MethodNamePfx "Fptr" false }} *[]{{ .Elem }}
{{end}}{{end}}{{end}}
{{range .Values }}{{if not .Primitive }}{{if .MapKey }}{{/*
*/}}{{ .MethodNamePfx "F" false }} map[{{ .MapKey }}]{{ .Elem }}
{{ .MethodNamePfx "Fptr" false }} *map[{{ .MapKey }}]{{ .Elem }}
{{end}}{{end}}{{end}}
}
// -----------
type testMammoth2Binary uint64
func (x testMammoth2Binary) MarshalBinary() (data []byte, err error) {
data = make([]byte, 8)
bigen.PutUint64(data, uint64(x))
return
}
func (x *testMammoth2Binary) UnmarshalBinary(data []byte) (err error) {
*x = testMammoth2Binary(bigen.Uint64(data))
return
}
type testMammoth2Text uint64
func (x testMammoth2Text) MarshalText() (data []byte, err error) {
data = []byte(fmt.Sprintf("%b", uint64(x)))
return
}
func (x *testMammoth2Text) UnmarshalText(data []byte) (err error) {
_, err = fmt.Sscanf(string(data), "%b", (*uint64)(x))
return
}
type testMammoth2Json uint64
func (x testMammoth2Json) MarshalJSON() (data []byte, err error) {
data = []byte(fmt.Sprintf("%v", uint64(x)))
return
}
func (x *testMammoth2Json) UnmarshalJSON(data []byte) (err error) {
_, err = fmt.Sscanf(string(data), "%v", (*uint64)(x))
return
}
type testMammoth2Basic [4]uint64
type TestMammoth2Wrapper struct {
V TestMammoth2
T testMammoth2Text
B testMammoth2Binary
J testMammoth2Json
C testMammoth2Basic
M map[testMammoth2Basic]TestMammoth2
L []TestMammoth2
A [4]int64
}

View File

@ -1,4 +1,4 @@
// Copyright (c) 2012-2018 Ugorji Nwoke. All rights reserved. // Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved.
// Use of this source code is governed by a MIT license found in the LICENSE file. // Use of this source code is governed by a MIT license found in the LICENSE file.
/* /*
@ -15,8 +15,8 @@ For compatibility with behaviour of msgpack-c reference implementation:
- Go intX (<0) - Go intX (<0)
IS ENCODED AS IS ENCODED AS
msgpack -ve fixnum, signed msgpack -ve fixnum, signed
*/
*/
package codec package codec
import ( import (
@ -25,7 +25,6 @@ import (
"math" "math"
"net/rpc" "net/rpc"
"reflect" "reflect"
"time"
) )
const ( const (
@ -79,9 +78,6 @@ const (
mpNegFixNumMax = 0xff mpNegFixNumMax = 0xff
) )
var mpTimeExtTag int8 = -1
var mpTimeExtTagU = uint8(mpTimeExtTag)
// MsgpackSpecRpcMultiArgs is a special type which signifies to the MsgpackSpecRpcCodec // MsgpackSpecRpcMultiArgs is a special type which signifies to the MsgpackSpecRpcCodec
// that the backend RPC service takes multiple arguments, which have been arranged // that the backend RPC service takes multiple arguments, which have been arranged
// in sequence in the slice. // in sequence in the slice.
@ -98,18 +94,10 @@ type msgpackContainerType struct {
} }
var ( var (
msgpackContainerStr = msgpackContainerType{ msgpackContainerStr = msgpackContainerType{32, mpFixStrMin, mpStr8, mpStr16, mpStr32, true, true, false}
32, mpFixStrMin, mpStr8, mpStr16, mpStr32, true, true, false, msgpackContainerBin = msgpackContainerType{0, 0, mpBin8, mpBin16, mpBin32, false, true, true}
} msgpackContainerList = msgpackContainerType{16, mpFixArrayMin, 0, mpArray16, mpArray32, true, false, false}
msgpackContainerBin = msgpackContainerType{ msgpackContainerMap = msgpackContainerType{16, mpFixMapMin, 0, mpMap16, mpMap32, true, false, false}
0, 0, mpBin8, mpBin16, mpBin32, false, true, true,
}
msgpackContainerList = msgpackContainerType{
16, mpFixArrayMin, 0, mpArray16, mpArray32, true, false, false,
}
msgpackContainerMap = msgpackContainerType{
16, mpFixMapMin, 0, mpMap16, mpMap32, true, false, false,
}
) )
//--------------------------------------------- //---------------------------------------------
@ -122,7 +110,6 @@ type msgpackEncDriver struct {
w encWriter w encWriter
h *MsgpackHandle h *MsgpackHandle
x [8]byte x [8]byte
_ [3]uint64 // padding
} }
func (e *msgpackEncDriver) EncodeNil() { func (e *msgpackEncDriver) EncodeNil() {
@ -130,26 +117,10 @@ func (e *msgpackEncDriver) EncodeNil() {
} }
func (e *msgpackEncDriver) EncodeInt(i int64) { func (e *msgpackEncDriver) EncodeInt(i int64) {
// if i >= 0 { if i >= 0 {
// e.EncodeUint(uint64(i)) e.EncodeUint(uint64(i))
// } else if false &&
if i > math.MaxInt8 {
if i <= math.MaxInt16 {
e.w.writen1(mpInt16)
bigenHelper{e.x[:2], e.w}.writeUint16(uint16(i))
} else if i <= math.MaxInt32 {
e.w.writen1(mpInt32)
bigenHelper{e.x[:4], e.w}.writeUint32(uint32(i))
} else {
e.w.writen1(mpInt64)
bigenHelper{e.x[:8], e.w}.writeUint64(uint64(i))
}
} else if i >= -32 { } else if i >= -32 {
if e.h.NoFixedNum { e.w.writen1(byte(i))
e.w.writen2(mpInt8, byte(i))
} else {
e.w.writen1(byte(i))
}
} else if i >= math.MinInt8 { } else if i >= math.MinInt8 {
e.w.writen2(mpInt8, byte(i)) e.w.writen2(mpInt8, byte(i))
} else if i >= math.MinInt16 { } else if i >= math.MinInt16 {
@ -166,11 +137,7 @@ func (e *msgpackEncDriver) EncodeInt(i int64) {
func (e *msgpackEncDriver) EncodeUint(i uint64) { func (e *msgpackEncDriver) EncodeUint(i uint64) {
if i <= math.MaxInt8 { if i <= math.MaxInt8 {
if e.h.NoFixedNum { e.w.writen1(byte(i))
e.w.writen2(mpUint8, byte(i))
} else {
e.w.writen1(byte(i))
}
} else if i <= math.MaxUint8 { } else if i <= math.MaxUint8 {
e.w.writen2(mpUint8, byte(i)) e.w.writen2(mpUint8, byte(i))
} else if i <= math.MaxUint16 { } else if i <= math.MaxUint16 {
@ -203,39 +170,6 @@ func (e *msgpackEncDriver) EncodeFloat64(f float64) {
bigenHelper{e.x[:8], e.w}.writeUint64(math.Float64bits(f)) bigenHelper{e.x[:8], e.w}.writeUint64(math.Float64bits(f))
} }
func (e *msgpackEncDriver) EncodeTime(t time.Time) {
if t.IsZero() {
e.EncodeNil()
return
}
t = t.UTC()
sec, nsec := t.Unix(), uint64(t.Nanosecond())
var data64 uint64
var l = 4
if sec >= 0 && sec>>34 == 0 {
data64 = (nsec << 34) | uint64(sec)
if data64&0xffffffff00000000 != 0 {
l = 8
}
} else {
l = 12
}
if e.h.WriteExt {
e.encodeExtPreamble(mpTimeExtTagU, l)
} else {
e.writeContainerLen(msgpackContainerStr, l)
}
switch l {
case 4:
bigenHelper{e.x[:4], e.w}.writeUint32(uint32(data64))
case 8:
bigenHelper{e.x[:8], e.w}.writeUint64(data64)
case 12:
bigenHelper{e.x[:4], e.w}.writeUint32(uint32(nsec))
bigenHelper{e.x[:8], e.w}.writeUint64(uint64(sec))
}
}
func (e *msgpackEncDriver) EncodeExt(v interface{}, xtag uint64, ext Ext, _ *Encoder) { func (e *msgpackEncDriver) EncodeExt(v interface{}, xtag uint64, ext Ext, _ *Encoder) {
bs := ext.WriteExt(v) bs := ext.WriteExt(v)
if bs == nil { if bs == nil {
@ -246,7 +180,7 @@ func (e *msgpackEncDriver) EncodeExt(v interface{}, xtag uint64, ext Ext, _ *Enc
e.encodeExtPreamble(uint8(xtag), len(bs)) e.encodeExtPreamble(uint8(xtag), len(bs))
e.w.writeb(bs) e.w.writeb(bs)
} else { } else {
e.EncodeStringBytes(cRAW, bs) e.EncodeStringBytes(c_RAW, bs)
} }
} }
@ -290,7 +224,7 @@ func (e *msgpackEncDriver) WriteMapStart(length int) {
func (e *msgpackEncDriver) EncodeString(c charEncoding, s string) { func (e *msgpackEncDriver) EncodeString(c charEncoding, s string) {
slen := len(s) slen := len(s)
if c == cRAW && e.h.WriteExt { if c == c_RAW && e.h.WriteExt {
e.writeContainerLen(msgpackContainerBin, slen) e.writeContainerLen(msgpackContainerBin, slen)
} else { } else {
e.writeContainerLen(msgpackContainerStr, slen) e.writeContainerLen(msgpackContainerStr, slen)
@ -300,13 +234,13 @@ func (e *msgpackEncDriver) EncodeString(c charEncoding, s string) {
} }
} }
func (e *msgpackEncDriver) EncodeSymbol(v string) {
e.EncodeString(c_UTF8, v)
}
func (e *msgpackEncDriver) EncodeStringBytes(c charEncoding, bs []byte) { func (e *msgpackEncDriver) EncodeStringBytes(c charEncoding, bs []byte) {
if bs == nil {
e.EncodeNil()
return
}
slen := len(bs) slen := len(bs)
if c == cRAW && e.h.WriteExt { if c == c_RAW && e.h.WriteExt {
e.writeContainerLen(msgpackContainerBin, slen) e.writeContainerLen(msgpackContainerBin, slen)
} else { } else {
e.writeContainerLen(msgpackContainerStr, slen) e.writeContainerLen(msgpackContainerStr, slen)
@ -333,10 +267,10 @@ func (e *msgpackEncDriver) writeContainerLen(ct msgpackContainerType, l int) {
//--------------------------------------------- //---------------------------------------------
type msgpackDecDriver struct { type msgpackDecDriver struct {
d *Decoder d *Decoder
r decReader // *Decoder decReader decReaderT r decReader // *Decoder decReader decReaderT
h *MsgpackHandle h *MsgpackHandle
// b [scratchByteArrayLen]byte b [scratchByteArrayLen]byte
bd byte bd byte
bdRead bool bdRead bool
br bool // bytes reader br bool // bytes reader
@ -344,7 +278,6 @@ type msgpackDecDriver struct {
// noStreamingCodec // noStreamingCodec
// decNoSeparator // decNoSeparator
decDriverNoopContainerReader decDriverNoopContainerReader
_ [3]uint64 // padding
} }
// Note: This returns either a primitive (int, bool, etc) for non-containers, // Note: This returns either a primitive (int, bool, etc) for non-containers,
@ -435,12 +368,7 @@ func (d *msgpackDecDriver) DecodeNaked() {
n.v = valueTypeExt n.v = valueTypeExt
clen := d.readExtLen() clen := d.readExtLen()
n.u = uint64(d.r.readn1()) n.u = uint64(d.r.readn1())
if n.u == uint64(mpTimeExtTagU) { n.l = d.r.readx(clen)
n.v = valueTypeTime
n.t = d.decodeTime(clen)
} else {
n.l = d.r.readx(clen)
}
default: default:
d.d.errorf("Nil-Deciphered DecodeValue: %s: hex: %x, dec: %d", msgBadDesc, bd, bd) d.d.errorf("Nil-Deciphered DecodeValue: %s: hex: %x, dec: %d", msgBadDesc, bd, bd)
} }
@ -456,7 +384,7 @@ func (d *msgpackDecDriver) DecodeNaked() {
} }
// int can be decoded from msgpack type: intXXX or uintXXX // int can be decoded from msgpack type: intXXX or uintXXX
func (d *msgpackDecDriver) DecodeInt64() (i int64) { func (d *msgpackDecDriver) DecodeInt(bitsize uint8) (i int64) {
if !d.bdRead { if !d.bdRead {
d.readNextBd() d.readNextBd()
} }
@ -488,12 +416,19 @@ func (d *msgpackDecDriver) DecodeInt64() (i int64) {
return return
} }
} }
// check overflow (logic adapted from std pkg reflect/value.go OverflowUint()
if bitsize > 0 {
if trunc := (i << (64 - bitsize)) >> (64 - bitsize); i != trunc {
d.d.errorf("Overflow int value: %v", i)
return
}
}
d.bdRead = false d.bdRead = false
return return
} }
// uint can be decoded from msgpack type: intXXX or uintXXX // uint can be decoded from msgpack type: intXXX or uintXXX
func (d *msgpackDecDriver) DecodeUint64() (ui uint64) { func (d *msgpackDecDriver) DecodeUint(bitsize uint8) (ui uint64) {
if !d.bdRead { if !d.bdRead {
d.readNextBd() d.readNextBd()
} }
@ -546,12 +481,19 @@ func (d *msgpackDecDriver) DecodeUint64() (ui uint64) {
return return
} }
} }
// check overflow (logic adapted from std pkg reflect/value.go OverflowUint()
if bitsize > 0 {
if trunc := (ui << (64 - bitsize)) >> (64 - bitsize); ui != trunc {
d.d.errorf("Overflow uint value: %v", ui)
return
}
}
d.bdRead = false d.bdRead = false
return return
} }
// float can either be decoded from msgpack type: float, double or intX // float can either be decoded from msgpack type: float, double or intX
func (d *msgpackDecDriver) DecodeFloat64() (f float64) { func (d *msgpackDecDriver) DecodeFloat(chkOverflow32 bool) (f float64) {
if !d.bdRead { if !d.bdRead {
d.readNextBd() d.readNextBd()
} }
@ -560,7 +502,11 @@ func (d *msgpackDecDriver) DecodeFloat64() (f float64) {
} else if d.bd == mpDouble { } else if d.bd == mpDouble {
f = math.Float64frombits(bigen.Uint64(d.r.readx(8))) f = math.Float64frombits(bigen.Uint64(d.r.readx(8)))
} else { } else {
f = float64(d.DecodeInt64()) f = float64(d.DecodeInt(0))
}
if chkOverflow32 && chkOvf.Float32(f) {
d.d.errorf("msgpack: float32 overflow: %v", f)
return
} }
d.bdRead = false d.bdRead = false
return return
@ -588,15 +534,13 @@ func (d *msgpackDecDriver) DecodeBytes(bs []byte, zerocopy bool) (bsOut []byte)
d.readNextBd() d.readNextBd()
} }
// check if an "array" of uint8's (see ContainerType for how to infer if an array)
bd := d.bd
// DecodeBytes could be from: bin str fixstr fixarray array ... // DecodeBytes could be from: bin str fixstr fixarray array ...
var clen int var clen int
vt := d.ContainerType() vt := d.ContainerType()
switch vt { switch vt {
case valueTypeBytes: case valueTypeBytes:
// valueTypeBytes may be a mpBin or an mpStr container // valueTypeBytes may be a mpBin or an mpStr container
if bd == mpBin8 || bd == mpBin16 || bd == mpBin32 { if bd := d.bd; bd == mpBin8 || bd == mpBin16 || bd == mpBin32 {
clen = d.readContainerLen(msgpackContainerBin) clen = d.readContainerLen(msgpackContainerBin)
} else { } else {
clen = d.readContainerLen(msgpackContainerStr) clen = d.readContainerLen(msgpackContainerStr)
@ -604,17 +548,28 @@ func (d *msgpackDecDriver) DecodeBytes(bs []byte, zerocopy bool) (bsOut []byte)
case valueTypeString: case valueTypeString:
clen = d.readContainerLen(msgpackContainerStr) clen = d.readContainerLen(msgpackContainerStr)
case valueTypeArray: case valueTypeArray:
if zerocopy && len(bs) == 0 { clen = d.readContainerLen(msgpackContainerList)
bs = d.d.b[:] // ensure everything after is one byte each
for i := 0; i < clen; i++ {
d.readNextBd()
if d.bd == mpNil {
bs = append(bs, 0)
} else if d.bd == mpUint8 {
bs = append(bs, d.r.readn1())
} else {
d.d.errorf("cannot read non-byte into a byte array")
return
}
} }
bsOut, _ = fastpathTV.DecSliceUint8V(bs, true, d.d) d.bdRead = false
return return bs
default: default:
d.d.errorf("invalid container type: expecting bin|str|array, got: 0x%x", uint8(vt)) d.d.errorf("invalid container type: expecting bin|str|array")
return return
} }
// these are (bin|str)(8|16|32) // these are (bin|str)(8|16|32)
// println("DecodeBytes: clen: ", clen)
d.bdRead = false d.bdRead = false
// bytes may be nil, so handle it. if nil, clen=-1. // bytes may be nil, so handle it. if nil, clen=-1.
if clen < 0 { if clen < 0 {
@ -624,18 +579,18 @@ func (d *msgpackDecDriver) DecodeBytes(bs []byte, zerocopy bool) (bsOut []byte)
if d.br { if d.br {
return d.r.readx(clen) return d.r.readx(clen)
} else if len(bs) == 0 { } else if len(bs) == 0 {
bs = d.d.b[:] bs = d.b[:]
} }
} }
return decByteSlice(d.r, clen, d.h.MaxInitLen, bs) return decByteSlice(d.r, clen, d.d.h.MaxInitLen, bs)
} }
func (d *msgpackDecDriver) DecodeString() (s string) { func (d *msgpackDecDriver) DecodeString() (s string) {
return string(d.DecodeBytes(d.d.b[:], true)) return string(d.DecodeBytes(d.b[:], true))
} }
func (d *msgpackDecDriver) DecodeStringAsBytes() (s []byte) { func (d *msgpackDecDriver) DecodeStringAsBytes() (s []byte) {
return d.DecodeBytes(d.d.b[:], true) return d.DecodeBytes(d.b[:], true)
} }
func (d *msgpackDecDriver) readNextBd() { func (d *msgpackDecDriver) readNextBd() {
@ -668,10 +623,9 @@ func (d *msgpackDecDriver) ContainerType() (vt valueType) {
return valueTypeArray return valueTypeArray
} else if bd == mpMap16 || bd == mpMap32 || (bd >= mpFixMapMin && bd <= mpFixMapMax) { } else if bd == mpMap16 || bd == mpMap32 || (bd >= mpFixMapMin && bd <= mpFixMapMax) {
return valueTypeMap return valueTypeMap
} else {
// d.d.errorf("isContainerType: unsupported parameter: %v", vt)
} }
// else {
// d.d.errorf("isContainerType: unsupported parameter: %v", vt)
// }
return valueTypeUnset return valueTypeUnset
} }
@ -681,7 +635,7 @@ func (d *msgpackDecDriver) TryDecodeAsNil() (v bool) {
} }
if d.bd == mpNil { if d.bd == mpNil {
d.bdRead = false d.bdRead = false
return true v = true
} }
return return
} }
@ -747,57 +701,6 @@ func (d *msgpackDecDriver) readExtLen() (clen int) {
return return
} }
func (d *msgpackDecDriver) DecodeTime() (t time.Time) {
// decode time from string bytes or ext
if !d.bdRead {
d.readNextBd()
}
if d.bd == mpNil {
d.bdRead = false
return
}
var clen int
switch d.ContainerType() {
case valueTypeBytes, valueTypeString:
clen = d.readContainerLen(msgpackContainerStr)
default:
// expect to see mpFixExt4,-1 OR mpFixExt8,-1 OR mpExt8,12,-1
d.bdRead = false
b2 := d.r.readn1()
if d.bd == mpFixExt4 && b2 == mpTimeExtTagU {
clen = 4
} else if d.bd == mpFixExt8 && b2 == mpTimeExtTagU {
clen = 8
} else if d.bd == mpExt8 && b2 == 12 && d.r.readn1() == mpTimeExtTagU {
clen = 12
} else {
d.d.errorf("invalid bytes for decoding time as extension: got 0x%x, 0x%x", d.bd, b2)
return
}
}
return d.decodeTime(clen)
}
func (d *msgpackDecDriver) decodeTime(clen int) (t time.Time) {
// bs = d.r.readx(clen)
d.bdRead = false
switch clen {
case 4:
t = time.Unix(int64(bigen.Uint32(d.r.readx(4))), 0).UTC()
case 8:
tv := bigen.Uint64(d.r.readx(8))
t = time.Unix(int64(tv&0x00000003ffffffff), int64(tv>>34)).UTC()
case 12:
nsec := bigen.Uint32(d.r.readx(4))
sec := bigen.Uint64(d.r.readx(8))
t = time.Unix(int64(sec), int64(nsec)).UTC()
default:
d.d.errorf("invalid length of bytes for decoding time - expecting 4 or 8 or 12, got %d", clen)
return
}
return
}
func (d *msgpackDecDriver) DecodeExt(rv interface{}, xtag uint64, ext Ext) (realxtag uint64) { func (d *msgpackDecDriver) DecodeExt(rv interface{}, xtag uint64, ext Ext) (realxtag uint64) {
if xtag > 0xff { if xtag > 0xff {
d.d.errorf("decodeExt: tag must be <= 0xff; got: %v", xtag) d.d.errorf("decodeExt: tag must be <= 0xff; got: %v", xtag)
@ -847,9 +750,6 @@ type MsgpackHandle struct {
// RawToString controls how raw bytes are decoded into a nil interface{}. // RawToString controls how raw bytes are decoded into a nil interface{}.
RawToString bool RawToString bool
// NoFixedNum says to output all signed integers as 2-bytes, never as 1-byte fixednum.
NoFixedNum bool
// WriteExt flag supports encoding configured extensions with extension tags. // WriteExt flag supports encoding configured extensions with extension tags.
// It also controls whether other elements of the new spec are encoded (ie Str8). // It also controls whether other elements of the new spec are encoded (ie Str8).
// //
@ -861,19 +761,12 @@ type MsgpackHandle struct {
// type is provided (e.g. decoding into a nil interface{}), you get back // type is provided (e.g. decoding into a nil interface{}), you get back
// a []byte or string based on the setting of RawToString. // a []byte or string based on the setting of RawToString.
WriteExt bool WriteExt bool
binaryEncodingType binaryEncodingType
noElemSeparators noElemSeparators
_ [1]uint64 // padding
} }
// Name returns the name of the handle: msgpack
func (h *MsgpackHandle) Name() string { return "msgpack" }
// SetBytesExt sets an extension
func (h *MsgpackHandle) SetBytesExt(rt reflect.Type, tag uint64, ext BytesExt) (err error) { func (h *MsgpackHandle) SetBytesExt(rt reflect.Type, tag uint64, ext BytesExt) (err error) {
return h.SetExt(rt, tag, &extWrapper{ext, interfaceExtFailer{}}) return h.SetExt(rt, tag, &setExtWrapper{b: ext})
} }
func (h *MsgpackHandle) newEncDriver(e *Encoder) encDriver { func (h *MsgpackHandle) newEncDriver(e *Encoder) encDriver {
@ -911,7 +804,7 @@ func (c *msgpackSpecRpcCodec) WriteRequest(r *rpc.Request, body interface{}) err
bodyArr = []interface{}{body} bodyArr = []interface{}{body}
} }
r2 := []interface{}{0, uint32(r.Seq), r.ServiceMethod, bodyArr} r2 := []interface{}{0, uint32(r.Seq), r.ServiceMethod, bodyArr}
return c.write(r2, nil, false) return c.write(r2, nil, false, true)
} }
func (c *msgpackSpecRpcCodec) WriteResponse(r *rpc.Response, body interface{}) error { func (c *msgpackSpecRpcCodec) WriteResponse(r *rpc.Response, body interface{}) error {
@ -923,7 +816,7 @@ func (c *msgpackSpecRpcCodec) WriteResponse(r *rpc.Response, body interface{}) e
body = nil body = nil
} }
r2 := []interface{}{1, uint32(r.Seq), moe, body} r2 := []interface{}{1, uint32(r.Seq), moe, body}
return c.write(r2, nil, false) return c.write(r2, nil, false, true)
} }
func (c *msgpackSpecRpcCodec) ReadResponseHeader(r *rpc.Response) error { func (c *msgpackSpecRpcCodec) ReadResponseHeader(r *rpc.Response) error {
@ -943,6 +836,7 @@ func (c *msgpackSpecRpcCodec) ReadRequestBody(body interface{}) error {
} }
func (c *msgpackSpecRpcCodec) parseCustomHeader(expectTypeByte byte, msgid *uint64, methodOrError *string) (err error) { func (c *msgpackSpecRpcCodec) parseCustomHeader(expectTypeByte byte, msgid *uint64, methodOrError *string) (err error) {
if c.isClosed() { if c.isClosed() {
return io.EOF return io.EOF
} }
@ -956,34 +850,28 @@ func (c *msgpackSpecRpcCodec) parseCustomHeader(expectTypeByte byte, msgid *uint
// err = fmt.Errorf("Unexpected value for array descriptor: Expecting %v. Received %v", fia, bs1) // err = fmt.Errorf("Unexpected value for array descriptor: Expecting %v. Received %v", fia, bs1)
// return // return
// } // }
var ba [1]byte var b byte
var n int b, err = c.br.ReadByte()
for { if err != nil {
n, err = c.r.Read(ba[:]) return
if err != nil {
return
}
if n == 1 {
break
}
} }
var b = ba[0]
if b != fia { if b != fia {
err = fmt.Errorf("Unexpected value for array descriptor: Expecting %v. Received %v", fia, b) err = fmt.Errorf("Unexpected value for array descriptor: Expecting %v. Received %v", fia, b)
} else { return
err = c.read(&b) }
if err == nil {
if b != expectTypeByte { if err = c.read(&b); err != nil {
err = fmt.Errorf("Unexpected byte descriptor. Expecting %v; Received %v", return
expectTypeByte, b) }
} else { if b != expectTypeByte {
err = c.read(msgid) err = fmt.Errorf("Unexpected byte descriptor in header. Expecting %v. Received %v", expectTypeByte, b)
if err == nil { return
err = c.read(methodOrError) }
} if err = c.read(msgid); err != nil {
} return
} }
if err = c.read(methodOrError); err != nil {
return
} }
return return
} }
@ -996,8 +884,7 @@ type msgpackSpecRpc struct{}
// MsgpackSpecRpc implements Rpc using the communication protocol defined in // MsgpackSpecRpc implements Rpc using the communication protocol defined in
// the msgpack spec at https://github.com/msgpack-rpc/msgpack-rpc/blob/master/spec.md . // the msgpack spec at https://github.com/msgpack-rpc/msgpack-rpc/blob/master/spec.md .
// // Its methods (ServerCodec and ClientCodec) return values that implement RpcCodecBuffered.
// See GoRpc documentation, for information on buffering for better performance.
var MsgpackSpecRpc msgpackSpecRpc var MsgpackSpecRpc msgpackSpecRpc
func (x msgpackSpecRpc) ServerCodec(conn io.ReadWriteCloser, h Handle) rpc.ServerCodec { func (x msgpackSpecRpc) ServerCodec(conn io.ReadWriteCloser, h Handle) rpc.ServerCodec {

View File

@ -1,4 +1,4 @@
// Copyright (c) 2012-2018 Ugorji Nwoke. All rights reserved. // Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved.
// Use of this source code is governed by a MIT license found in the LICENSE file. // Use of this source code is governed by a MIT license found in the LICENSE file.
// +build ignore // +build ignore
@ -91,9 +91,8 @@ func (h *noopDrv) EncodeArrayStart(length int) { h.start(true) }
func (h *noopDrv) EncodeMapStart(length int) { h.start(false) } func (h *noopDrv) EncodeMapStart(length int) { h.start(false) }
func (h *noopDrv) EncodeEnd() { h.end() } func (h *noopDrv) EncodeEnd() { h.end() }
func (h *noopDrv) EncodeString(c charEncoding, v string) {} func (h *noopDrv) EncodeString(c charEncoding, v string) {}
func (h *noopDrv) EncodeSymbol(v string) {}
// func (h *noopDrv) EncodeSymbol(v string) {}
func (h *noopDrv) EncodeStringBytes(c charEncoding, v []byte) {} func (h *noopDrv) EncodeStringBytes(c charEncoding, v []byte) {}
func (h *noopDrv) EncodeExt(rv interface{}, xtag uint64, ext Ext, e *Encoder) {} func (h *noopDrv) EncodeExt(rv interface{}, xtag uint64, ext Ext, e *Encoder) {}
@ -120,12 +119,9 @@ func (h *noopDrv) ReadArrayStart() int { h.start(false); return h.m(10) }
func (h *noopDrv) ContainerType() (vt valueType) { func (h *noopDrv) ContainerType() (vt valueType) {
// return h.m(2) == 0 // return h.m(2) == 0
// handle kStruct, which will bomb is it calls this and // handle kStruct, which will bomb is it calls this and doesn't get back a map or array.
// doesn't get back a map or array. // consequently, if the return value is not map or array, reset it to one of them based on h.m(7) % 2
// consequently, if the return value is not map or array, // for kstruct: at least one out of every 2 times, return one of valueTypeMap or Array (else kstruct bombs)
// reset it to one of them based on h.m(7) % 2
// for kstruct: at least one out of every 2 times,
// return one of valueTypeMap or Array (else kstruct bombs)
// however, every 10th time it is called, we just return something else. // however, every 10th time it is called, we just return something else.
var vals = [...]valueType{valueTypeArray, valueTypeMap} var vals = [...]valueType{valueTypeArray, valueTypeMap}
// ------------ TAKE ------------ // ------------ TAKE ------------
@ -154,8 +150,7 @@ func (h *noopDrv) ContainerType() (vt valueType) {
// } // }
// return valueTypeUnset // return valueTypeUnset
// TODO: may need to tweak this so it works. // TODO: may need to tweak this so it works.
// if h.ct == valueTypeMap && vt == valueTypeArray || // if h.ct == valueTypeMap && vt == valueTypeArray || h.ct == valueTypeArray && vt == valueTypeMap {
// h.ct == valueTypeArray && vt == valueTypeMap {
// h.cb = !h.cb // h.cb = !h.cb
// h.ct = vt // h.ct = vt
// return h.cb // return h.cb

View File

@ -1,4 +1,4 @@
// Copyright (c) 2012-2018 Ugorji Nwoke. All rights reserved. // Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved.
// Use of this source code is governed by a MIT license found in the LICENSE file. // Use of this source code is governed by a MIT license found in the LICENSE file.
package codec package codec
@ -11,143 +11,124 @@ import (
"sync" "sync"
) )
// // rpcEncodeTerminator allows a handler specify a []byte terminator to send after each Encode.
// //
// // Some codecs like json need to put a space after each encoded value, to serve as a
// // delimiter for things like numbers (else json codec will continue reading till EOF).
// type rpcEncodeTerminator interface {
// rpcEncodeTerminate() []byte
// }
// Rpc provides a rpc Server or Client Codec for rpc communication. // Rpc provides a rpc Server or Client Codec for rpc communication.
type Rpc interface { type Rpc interface {
ServerCodec(conn io.ReadWriteCloser, h Handle) rpc.ServerCodec ServerCodec(conn io.ReadWriteCloser, h Handle) rpc.ServerCodec
ClientCodec(conn io.ReadWriteCloser, h Handle) rpc.ClientCodec ClientCodec(conn io.ReadWriteCloser, h Handle) rpc.ClientCodec
} }
// RPCOptions holds options specific to rpc functionality // RpcCodecBuffered allows access to the underlying bufio.Reader/Writer
type RPCOptions struct { // used by the rpc connection. It accommodates use-cases where the connection
// RPCNoBuffer configures whether we attempt to buffer reads and writes during RPC calls. // should be used by rpc and non-rpc functions, e.g. streaming a file after
// // sending an rpc response.
// Set RPCNoBuffer=true to turn buffering off. type RpcCodecBuffered interface {
// Buffering can still be done if buffered connections are passed in, or BufferedReader() *bufio.Reader
// buffering is configured on the handle. BufferedWriter() *bufio.Writer
RPCNoBuffer bool
} }
// -------------------------------------
// rpcCodec defines the struct members and common methods. // rpcCodec defines the struct members and common methods.
type rpcCodec struct { type rpcCodec struct {
c io.Closer rwc io.ReadWriteCloser
r io.Reader
w io.Writer
f ioFlusher
dec *Decoder dec *Decoder
enc *Encoder enc *Encoder
// bw *bufio.Writer bw *bufio.Writer
// br *bufio.Reader br *bufio.Reader
mu sync.Mutex mu sync.Mutex
h Handle h Handle
cls bool cls bool
clsmu sync.RWMutex clsmu sync.RWMutex
clsErr error
} }
func newRPCCodec(conn io.ReadWriteCloser, h Handle) rpcCodec { func newRPCCodec(conn io.ReadWriteCloser, h Handle) rpcCodec {
// return newRPCCodec2(bufio.NewReader(conn), bufio.NewWriter(conn), conn, h) bw := bufio.NewWriter(conn)
return newRPCCodec2(conn, conn, conn, h) br := bufio.NewReader(conn)
}
func newRPCCodec2(r io.Reader, w io.Writer, c io.Closer, h Handle) rpcCodec {
// defensive: ensure that jsonH has TermWhitespace turned on. // defensive: ensure that jsonH has TermWhitespace turned on.
if jsonH, ok := h.(*JsonHandle); ok && !jsonH.TermWhitespace { if jsonH, ok := h.(*JsonHandle); ok && !jsonH.TermWhitespace {
panic(errors.New("rpc requires a JsonHandle with TermWhitespace set to true")) panic(errors.New("rpc requires a JsonHandle with TermWhitespace set to true"))
} }
// always ensure that we use a flusher, and always flush what was written to the connection.
// we lose nothing by using a buffered writer internally.
f, ok := w.(ioFlusher)
bh := h.getBasicHandle()
if !bh.RPCNoBuffer {
if bh.WriterBufferSize <= 0 {
if !ok {
bw := bufio.NewWriter(w)
f, w = bw, bw
}
}
if bh.ReaderBufferSize <= 0 {
if _, ok = w.(ioPeeker); !ok {
if _, ok = w.(ioBuffered); !ok {
br := bufio.NewReader(r)
r = br
}
}
}
}
return rpcCodec{ return rpcCodec{
c: c, rwc: conn,
w: w, bw: bw,
r: r, br: br,
f: f, enc: NewEncoder(bw, h),
dec: NewDecoder(br, h),
h: h, h: h,
enc: NewEncoder(w, h),
dec: NewDecoder(r, h),
} }
} }
func (c *rpcCodec) write(obj1, obj2 interface{}, writeObj2 bool) (err error) { func (c *rpcCodec) BufferedReader() *bufio.Reader {
return c.br
}
func (c *rpcCodec) BufferedWriter() *bufio.Writer {
return c.bw
}
func (c *rpcCodec) write(obj1, obj2 interface{}, writeObj2, doFlush bool) (err error) {
if c.isClosed() { if c.isClosed() {
return c.clsErr return io.EOF
} }
err = c.enc.Encode(obj1) if err = c.enc.Encode(obj1); err != nil {
if err == nil { return
if writeObj2 { }
err = c.enc.Encode(obj2) // t, tOk := c.h.(rpcEncodeTerminator)
} // if tOk {
if err == nil && c.f != nil { // c.bw.Write(t.rpcEncodeTerminate())
err = c.f.Flush() // }
if writeObj2 {
if err = c.enc.Encode(obj2); err != nil {
return
} }
// if tOk {
// c.bw.Write(t.rpcEncodeTerminate())
// }
}
if doFlush {
return c.bw.Flush()
} }
return return
} }
func (c *rpcCodec) swallow(err *error) {
defer panicToErr(c.dec, err)
c.dec.swallow()
}
func (c *rpcCodec) read(obj interface{}) (err error) { func (c *rpcCodec) read(obj interface{}) (err error) {
if c.isClosed() { if c.isClosed() {
return c.clsErr return io.EOF
} }
//If nil is passed in, we should read and discard //If nil is passed in, we should still attempt to read content to nowhere.
if obj == nil { if obj == nil {
// var obj2 interface{} var obj2 interface{}
// return c.dec.Decode(&obj2) return c.dec.Decode(&obj2)
c.swallow(&err)
return
} }
return c.dec.Decode(obj) return c.dec.Decode(obj)
} }
func (c *rpcCodec) isClosed() (b bool) { func (c *rpcCodec) isClosed() bool {
if c.c != nil { c.clsmu.RLock()
c.clsmu.RLock() x := c.cls
b = c.cls c.clsmu.RUnlock()
c.clsmu.RUnlock() return x
}
return
} }
func (c *rpcCodec) Close() error { func (c *rpcCodec) Close() error {
if c.c == nil || c.isClosed() { if c.isClosed() {
return c.clsErr return io.EOF
} }
c.clsmu.Lock() c.clsmu.Lock()
c.cls = true c.cls = true
var fErr error
if c.f != nil {
fErr = c.f.Flush()
}
_ = fErr
c.clsErr = c.c.Close()
if c.clsErr == nil && fErr != nil {
c.clsErr = fErr
}
c.clsmu.Unlock() c.clsmu.Unlock()
return c.clsErr return c.rwc.Close()
} }
func (c *rpcCodec) ReadResponseBody(body interface{}) error { func (c *rpcCodec) ReadResponseBody(body interface{}) error {
@ -164,13 +145,13 @@ func (c *goRpcCodec) WriteRequest(r *rpc.Request, body interface{}) error {
// Must protect for concurrent access as per API // Must protect for concurrent access as per API
c.mu.Lock() c.mu.Lock()
defer c.mu.Unlock() defer c.mu.Unlock()
return c.write(r, body, true) return c.write(r, body, true, true)
} }
func (c *goRpcCodec) WriteResponse(r *rpc.Response, body interface{}) error { func (c *goRpcCodec) WriteResponse(r *rpc.Response, body interface{}) error {
c.mu.Lock() c.mu.Lock()
defer c.mu.Unlock() defer c.mu.Unlock()
return c.write(r, body, true) return c.write(r, body, true, true)
} }
func (c *goRpcCodec) ReadResponseHeader(r *rpc.Response) error { func (c *goRpcCodec) ReadResponseHeader(r *rpc.Response) error {
@ -192,36 +173,7 @@ func (c *goRpcCodec) ReadRequestBody(body interface{}) error {
type goRpc struct{} type goRpc struct{}
// GoRpc implements Rpc using the communication protocol defined in net/rpc package. // GoRpc implements Rpc using the communication protocol defined in net/rpc package.
// // Its methods (ServerCodec and ClientCodec) return values that implement RpcCodecBuffered.
// Note: network connection (from net.Dial, of type io.ReadWriteCloser) is not buffered.
//
// For performance, you should configure WriterBufferSize and ReaderBufferSize on the handle.
// This ensures we use an adequate buffer during reading and writing.
// If not configured, we will internally initialize and use a buffer during reads and writes.
// This can be turned off via the RPCNoBuffer option on the Handle.
// var handle codec.JsonHandle
// handle.RPCNoBuffer = true // turns off attempt by rpc module to initialize a buffer
//
// Example 1: one way of configuring buffering explicitly:
// var handle codec.JsonHandle // codec handle
// handle.ReaderBufferSize = 1024
// handle.WriterBufferSize = 1024
// var conn io.ReadWriteCloser // connection got from a socket
// var serverCodec = GoRpc.ServerCodec(conn, handle)
// var clientCodec = GoRpc.ClientCodec(conn, handle)
//
// Example 2: you can also explicitly create a buffered connection yourself,
// and not worry about configuring the buffer sizes in the Handle.
// var handle codec.Handle // codec handle
// var conn io.ReadWriteCloser // connection got from a socket
// var bufconn = struct { // bufconn here is a buffered io.ReadWriteCloser
// io.Closer
// *bufio.Reader
// *bufio.Writer
// }{conn, bufio.NewReader(conn), bufio.NewWriter(conn)}
// var serverCodec = GoRpc.ServerCodec(bufconn, handle)
// var clientCodec = GoRpc.ClientCodec(bufconn, handle)
//
var GoRpc goRpc var GoRpc goRpc
func (x goRpc) ServerCodec(conn io.ReadWriteCloser, h Handle) rpc.ServerCodec { func (x goRpc) ServerCodec(conn io.ReadWriteCloser, h Handle) rpc.ServerCodec {
@ -231,3 +183,5 @@ func (x goRpc) ServerCodec(conn io.ReadWriteCloser, h Handle) rpc.ServerCodec {
func (x goRpc) ClientCodec(conn io.ReadWriteCloser, h Handle) rpc.ClientCodec { func (x goRpc) ClientCodec(conn io.ReadWriteCloser, h Handle) rpc.ClientCodec {
return &goRpcCodec{newRPCCodec(conn, h)} return &goRpcCodec{newRPCCodec(conn, h)}
} }
var _ RpcCodecBuffered = (*rpcCodec)(nil) // ensure *rpcCodec implements RpcCodecBuffered

View File

@ -1,4 +1,4 @@
// Copyright (c) 2012-2018 Ugorji Nwoke. All rights reserved. // Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved.
// Use of this source code is governed by a MIT license found in the LICENSE file. // Use of this source code is governed by a MIT license found in the LICENSE file.
package codec package codec
@ -6,7 +6,6 @@ package codec
import ( import (
"math" "math"
"reflect" "reflect"
"time"
) )
const ( const (
@ -21,8 +20,6 @@ const (
simpleVdPosInt = 8 simpleVdPosInt = 8
simpleVdNegInt = 12 simpleVdNegInt = 12
simpleVdTime = 24
// containers: each lasts for 4 (ie n, n+1, n+2, ... n+7) // containers: each lasts for 4 (ie n, n+1, n+2, ... n+7)
simpleVdString = 216 simpleVdString = 216
simpleVdByteArray = 224 simpleVdByteArray = 224
@ -33,15 +30,12 @@ const (
type simpleEncDriver struct { type simpleEncDriver struct {
noBuiltInTypes noBuiltInTypes
encDriverNoopContainerWriter
// encNoSeparator // encNoSeparator
e *Encoder e *Encoder
h *SimpleHandle h *SimpleHandle
w encWriter w encWriter
b [8]byte b [8]byte
// c containerState
encDriverTrackContainerWriter
// encDriverNoopContainerWriter
_ [2]uint64 // padding
} }
func (e *simpleEncDriver) EncodeNil() { func (e *simpleEncDriver) EncodeNil() {
@ -49,10 +43,6 @@ func (e *simpleEncDriver) EncodeNil() {
} }
func (e *simpleEncDriver) EncodeBool(b bool) { func (e *simpleEncDriver) EncodeBool(b bool) {
if e.h.EncZeroValuesAsNil && e.c != containerMapKey && !b {
e.EncodeNil()
return
}
if b { if b {
e.w.writen1(simpleVdTrue) e.w.writen1(simpleVdTrue)
} else { } else {
@ -61,19 +51,11 @@ func (e *simpleEncDriver) EncodeBool(b bool) {
} }
func (e *simpleEncDriver) EncodeFloat32(f float32) { func (e *simpleEncDriver) EncodeFloat32(f float32) {
if e.h.EncZeroValuesAsNil && e.c != containerMapKey && f == 0.0 {
e.EncodeNil()
return
}
e.w.writen1(simpleVdFloat32) e.w.writen1(simpleVdFloat32)
bigenHelper{e.b[:4], e.w}.writeUint32(math.Float32bits(f)) bigenHelper{e.b[:4], e.w}.writeUint32(math.Float32bits(f))
} }
func (e *simpleEncDriver) EncodeFloat64(f float64) { func (e *simpleEncDriver) EncodeFloat64(f float64) {
if e.h.EncZeroValuesAsNil && e.c != containerMapKey && f == 0.0 {
e.EncodeNil()
return
}
e.w.writen1(simpleVdFloat64) e.w.writen1(simpleVdFloat64)
bigenHelper{e.b[:8], e.w}.writeUint64(math.Float64bits(f)) bigenHelper{e.b[:8], e.w}.writeUint64(math.Float64bits(f))
} }
@ -91,10 +73,6 @@ func (e *simpleEncDriver) EncodeUint(v uint64) {
} }
func (e *simpleEncDriver) encUint(v uint64, bd uint8) { func (e *simpleEncDriver) encUint(v uint64, bd uint8) {
if e.h.EncZeroValuesAsNil && e.c != containerMapKey && v == 0 {
e.EncodeNil()
return
}
if v <= math.MaxUint8 { if v <= math.MaxUint8 {
e.w.writen2(bd, uint8(v)) e.w.writen2(bd, uint8(v))
} else if v <= math.MaxUint16 { } else if v <= math.MaxUint16 {
@ -148,51 +126,24 @@ func (e *simpleEncDriver) encodeExtPreamble(xtag byte, length int) {
} }
func (e *simpleEncDriver) WriteArrayStart(length int) { func (e *simpleEncDriver) WriteArrayStart(length int) {
e.c = containerArrayStart
e.encLen(simpleVdArray, length) e.encLen(simpleVdArray, length)
} }
func (e *simpleEncDriver) WriteMapStart(length int) { func (e *simpleEncDriver) WriteMapStart(length int) {
e.c = containerMapStart
e.encLen(simpleVdMap, length) e.encLen(simpleVdMap, length)
} }
func (e *simpleEncDriver) EncodeString(c charEncoding, v string) { func (e *simpleEncDriver) EncodeString(c charEncoding, v string) {
if false && e.h.EncZeroValuesAsNil && e.c != containerMapKey && v == "" {
e.EncodeNil()
return
}
e.encLen(simpleVdString, len(v)) e.encLen(simpleVdString, len(v))
e.w.writestr(v) e.w.writestr(v)
} }
// func (e *simpleEncDriver) EncodeSymbol(v string) { func (e *simpleEncDriver) EncodeSymbol(v string) {
// e.EncodeString(cUTF8, v) e.EncodeString(c_UTF8, v)
// }
func (e *simpleEncDriver) EncodeStringBytes(c charEncoding, v []byte) {
// if e.h.EncZeroValuesAsNil && e.c != containerMapKey && v == nil {
if v == nil {
e.EncodeNil()
return
}
e.encLen(simpleVdByteArray, len(v))
e.w.writeb(v)
} }
func (e *simpleEncDriver) EncodeTime(t time.Time) { func (e *simpleEncDriver) EncodeStringBytes(c charEncoding, v []byte) {
// if e.h.EncZeroValuesAsNil && e.c != containerMapKey && t.IsZero() { e.encLen(simpleVdByteArray, len(v))
if t.IsZero() {
e.EncodeNil()
return
}
v, err := t.MarshalBinary()
if err != nil {
e.e.errorv(err)
return
}
// time.Time marshalbinary takes about 14 bytes.
e.w.writen2(simpleVdTime, uint8(len(v)))
e.w.writeb(v) e.w.writeb(v)
} }
@ -204,13 +155,11 @@ type simpleDecDriver struct {
r decReader r decReader
bdRead bool bdRead bool
bd byte bd byte
br bool // a bytes reader? br bool // bytes reader
c containerState b [scratchByteArrayLen]byte
// b [scratchByteArrayLen]byte
noBuiltInTypes noBuiltInTypes
// noStreamingCodec // noStreamingCodec
decDriverNoopContainerReader decDriverNoopContainerReader
_ [3]uint64 // padding
} }
func (d *simpleDecDriver) readNextBd() { func (d *simpleDecDriver) readNextBd() {
@ -229,27 +178,23 @@ func (d *simpleDecDriver) ContainerType() (vt valueType) {
if !d.bdRead { if !d.bdRead {
d.readNextBd() d.readNextBd()
} }
switch d.bd { if d.bd == simpleVdNil {
case simpleVdNil:
return valueTypeNil return valueTypeNil
case simpleVdByteArray, simpleVdByteArray + 1, } else if d.bd == simpleVdByteArray || d.bd == simpleVdByteArray+1 ||
simpleVdByteArray + 2, simpleVdByteArray + 3, simpleVdByteArray + 4: d.bd == simpleVdByteArray+2 || d.bd == simpleVdByteArray+3 || d.bd == simpleVdByteArray+4 {
return valueTypeBytes return valueTypeBytes
case simpleVdString, simpleVdString + 1, } else if d.bd == simpleVdString || d.bd == simpleVdString+1 ||
simpleVdString + 2, simpleVdString + 3, simpleVdString + 4: d.bd == simpleVdString+2 || d.bd == simpleVdString+3 || d.bd == simpleVdString+4 {
return valueTypeString return valueTypeString
case simpleVdArray, simpleVdArray + 1, } else if d.bd == simpleVdArray || d.bd == simpleVdArray+1 ||
simpleVdArray + 2, simpleVdArray + 3, simpleVdArray + 4: d.bd == simpleVdArray+2 || d.bd == simpleVdArray+3 || d.bd == simpleVdArray+4 {
return valueTypeArray return valueTypeArray
case simpleVdMap, simpleVdMap + 1, } else if d.bd == simpleVdMap || d.bd == simpleVdMap+1 ||
simpleVdMap + 2, simpleVdMap + 3, simpleVdMap + 4: d.bd == simpleVdMap+2 || d.bd == simpleVdMap+3 || d.bd == simpleVdMap+4 {
return valueTypeMap return valueTypeMap
// case simpleVdTime: } else {
// return valueTypeTime // d.d.errorf("isContainerType: unsupported parameter: %v", vt)
} }
// else {
// d.d.errorf("isContainerType: unsupported parameter: %v", vt)
// }
return valueTypeUnset return valueTypeUnset
} }
@ -290,7 +235,7 @@ func (d *simpleDecDriver) decCheckInteger() (ui uint64, neg bool) {
ui = uint64(bigen.Uint64(d.r.readx(8))) ui = uint64(bigen.Uint64(d.r.readx(8)))
neg = true neg = true
default: default:
d.d.errorf("Integer only valid from pos/neg integer1..8. Invalid descriptor: %v", d.bd) d.d.errorf("decIntAny: Integer only valid from pos/neg integer1..8. Invalid descriptor: %v", d.bd)
return return
} }
// don't do this check, because callers may only want the unsigned value. // don't do this check, because callers may only want the unsigned value.
@ -301,27 +246,39 @@ func (d *simpleDecDriver) decCheckInteger() (ui uint64, neg bool) {
return return
} }
func (d *simpleDecDriver) DecodeInt64() (i int64) { func (d *simpleDecDriver) DecodeInt(bitsize uint8) (i int64) {
ui, neg := d.decCheckInteger() ui, neg := d.decCheckInteger()
i = chkOvf.SignedIntV(ui) i, overflow := chkOvf.SignedInt(ui)
if overflow {
d.d.errorf("simple: overflow converting %v to signed integer", ui)
return
}
if neg { if neg {
i = -i i = -i
} }
d.bdRead = false if chkOvf.Int(i, bitsize) {
return d.d.errorf("simple: overflow integer: %v", i)
}
func (d *simpleDecDriver) DecodeUint64() (ui uint64) {
ui, neg := d.decCheckInteger()
if neg {
d.d.errorf("Assigning negative signed value to unsigned type")
return return
} }
d.bdRead = false d.bdRead = false
return return
} }
func (d *simpleDecDriver) DecodeFloat64() (f float64) { func (d *simpleDecDriver) DecodeUint(bitsize uint8) (ui uint64) {
ui, neg := d.decCheckInteger()
if neg {
d.d.errorf("Assigning negative signed value to unsigned type")
return
}
if chkOvf.Uint(ui, bitsize) {
d.d.errorf("simple: overflow integer: %v", ui)
return
}
d.bdRead = false
return
}
func (d *simpleDecDriver) DecodeFloat(chkOverflow32 bool) (f float64) {
if !d.bdRead { if !d.bdRead {
d.readNextBd() d.readNextBd()
} }
@ -331,12 +288,16 @@ func (d *simpleDecDriver) DecodeFloat64() (f float64) {
f = math.Float64frombits(bigen.Uint64(d.r.readx(8))) f = math.Float64frombits(bigen.Uint64(d.r.readx(8)))
} else { } else {
if d.bd >= simpleVdPosInt && d.bd <= simpleVdNegInt+3 { if d.bd >= simpleVdPosInt && d.bd <= simpleVdNegInt+3 {
f = float64(d.DecodeInt64()) f = float64(d.DecodeInt(64))
} else { } else {
d.d.errorf("Float only valid from float32/64: Invalid descriptor: %v", d.bd) d.d.errorf("Float only valid from float32/64: Invalid descriptor: %v", d.bd)
return return
} }
} }
if chkOverflow32 && chkOvf.Float32(f) {
d.d.errorf("msgpack: float32 overflow: %v", f)
return
}
d.bdRead = false d.bdRead = false
return return
} }
@ -362,7 +323,6 @@ func (d *simpleDecDriver) ReadMapStart() (length int) {
d.readNextBd() d.readNextBd()
} }
d.bdRead = false d.bdRead = false
d.c = containerMapStart
return d.decLen() return d.decLen()
} }
@ -371,30 +331,9 @@ func (d *simpleDecDriver) ReadArrayStart() (length int) {
d.readNextBd() d.readNextBd()
} }
d.bdRead = false d.bdRead = false
d.c = containerArrayStart
return d.decLen() return d.decLen()
} }
func (d *simpleDecDriver) ReadArrayElem() {
d.c = containerArrayElem
}
func (d *simpleDecDriver) ReadArrayEnd() {
d.c = containerArrayEnd
}
func (d *simpleDecDriver) ReadMapElemKey() {
d.c = containerMapKey
}
func (d *simpleDecDriver) ReadMapElemValue() {
d.c = containerMapValue
}
func (d *simpleDecDriver) ReadMapEnd() {
d.c = containerMapEnd
}
func (d *simpleDecDriver) decLen() int { func (d *simpleDecDriver) decLen() int {
switch d.bd % 8 { switch d.bd % 8 {
case 0: case 0:
@ -406,14 +345,14 @@ func (d *simpleDecDriver) decLen() int {
case 3: case 3:
ui := uint64(bigen.Uint32(d.r.readx(4))) ui := uint64(bigen.Uint32(d.r.readx(4)))
if chkOvf.Uint(ui, intBitsize) { if chkOvf.Uint(ui, intBitsize) {
d.d.errorf("overflow integer: %v", ui) d.d.errorf("simple: overflow integer: %v", ui)
return 0 return 0
} }
return int(ui) return int(ui)
case 4: case 4:
ui := bigen.Uint64(d.r.readx(8)) ui := bigen.Uint64(d.r.readx(8))
if chkOvf.Uint(ui, intBitsize) { if chkOvf.Uint(ui, intBitsize) {
d.d.errorf("overflow integer: %v", ui) d.d.errorf("simple: overflow integer: %v", ui)
return 0 return 0
} }
return int(ui) return int(ui)
@ -423,11 +362,11 @@ func (d *simpleDecDriver) decLen() int {
} }
func (d *simpleDecDriver) DecodeString() (s string) { func (d *simpleDecDriver) DecodeString() (s string) {
return string(d.DecodeBytes(d.d.b[:], true)) return string(d.DecodeBytes(d.b[:], true))
} }
func (d *simpleDecDriver) DecodeStringAsBytes() (s []byte) { func (d *simpleDecDriver) DecodeStringAsBytes() (s []byte) {
return d.DecodeBytes(d.d.b[:], true) return d.DecodeBytes(d.b[:], true)
} }
func (d *simpleDecDriver) DecodeBytes(bs []byte, zerocopy bool) (bsOut []byte) { func (d *simpleDecDriver) DecodeBytes(bs []byte, zerocopy bool) (bsOut []byte) {
@ -438,48 +377,18 @@ func (d *simpleDecDriver) DecodeBytes(bs []byte, zerocopy bool) (bsOut []byte) {
d.bdRead = false d.bdRead = false
return return
} }
// check if an "array" of uint8's (see ContainerType for how to infer if an array)
if d.bd >= simpleVdArray && d.bd <= simpleVdMap+4 {
if len(bs) == 0 && zerocopy {
bs = d.d.b[:]
}
bsOut, _ = fastpathTV.DecSliceUint8V(bs, true, d.d)
return
}
clen := d.decLen() clen := d.decLen()
d.bdRead = false d.bdRead = false
if zerocopy { if zerocopy {
if d.br { if d.br {
return d.r.readx(clen) return d.r.readx(clen)
} else if len(bs) == 0 { } else if len(bs) == 0 {
bs = d.d.b[:] bs = d.b[:]
} }
} }
return decByteSlice(d.r, clen, d.d.h.MaxInitLen, bs) return decByteSlice(d.r, clen, d.d.h.MaxInitLen, bs)
} }
func (d *simpleDecDriver) DecodeTime() (t time.Time) {
if !d.bdRead {
d.readNextBd()
}
if d.bd == simpleVdNil {
d.bdRead = false
return
}
if d.bd != simpleVdTime {
d.d.errorf("invalid descriptor for time.Time - expect 0x%x, received 0x%x", simpleVdTime, d.bd)
return
}
d.bdRead = false
clen := int(d.r.readn1())
b := d.r.readx(clen)
if err := (&t).UnmarshalBinary(b); err != nil {
d.d.errorv(err)
}
return
}
func (d *simpleDecDriver) DecodeExt(rv interface{}, xtag uint64, ext Ext) (realxtag uint64) { func (d *simpleDecDriver) DecodeExt(rv interface{}, xtag uint64, ext Ext) (realxtag uint64) {
if xtag > 0xff { if xtag > 0xff {
d.d.errorf("decodeExt: tag must be <= 0xff; got: %v", xtag) d.d.errorf("decodeExt: tag must be <= 0xff; got: %v", xtag)
@ -510,11 +419,10 @@ func (d *simpleDecDriver) decodeExtV(verifyTag bool, tag byte) (xtag byte, xbs [
return return
} }
xbs = d.r.readx(l) xbs = d.r.readx(l)
case simpleVdByteArray, simpleVdByteArray + 1, case simpleVdByteArray, simpleVdByteArray + 1, simpleVdByteArray + 2, simpleVdByteArray + 3, simpleVdByteArray + 4:
simpleVdByteArray + 2, simpleVdByteArray + 3, simpleVdByteArray + 4:
xbs = d.DecodeBytes(nil, true) xbs = d.DecodeBytes(nil, true)
default: default:
d.d.errorf("Invalid descriptor - expecting extensions/bytearray, got: 0x%x", d.bd) d.d.errorf("Invalid d.bd for extensions (Expecting extensions or byte array). Got: 0x%x", d.bd)
return return
} }
d.bdRead = false d.bdRead = false
@ -541,29 +449,24 @@ func (d *simpleDecDriver) DecodeNaked() {
case simpleVdPosInt, simpleVdPosInt + 1, simpleVdPosInt + 2, simpleVdPosInt + 3: case simpleVdPosInt, simpleVdPosInt + 1, simpleVdPosInt + 2, simpleVdPosInt + 3:
if d.h.SignedInteger { if d.h.SignedInteger {
n.v = valueTypeInt n.v = valueTypeInt
n.i = d.DecodeInt64() n.i = d.DecodeInt(64)
} else { } else {
n.v = valueTypeUint n.v = valueTypeUint
n.u = d.DecodeUint64() n.u = d.DecodeUint(64)
} }
case simpleVdNegInt, simpleVdNegInt + 1, simpleVdNegInt + 2, simpleVdNegInt + 3: case simpleVdNegInt, simpleVdNegInt + 1, simpleVdNegInt + 2, simpleVdNegInt + 3:
n.v = valueTypeInt n.v = valueTypeInt
n.i = d.DecodeInt64() n.i = d.DecodeInt(64)
case simpleVdFloat32: case simpleVdFloat32:
n.v = valueTypeFloat n.v = valueTypeFloat
n.f = d.DecodeFloat64() n.f = d.DecodeFloat(true)
case simpleVdFloat64: case simpleVdFloat64:
n.v = valueTypeFloat n.v = valueTypeFloat
n.f = d.DecodeFloat64() n.f = d.DecodeFloat(false)
case simpleVdTime: case simpleVdString, simpleVdString + 1, simpleVdString + 2, simpleVdString + 3, simpleVdString + 4:
n.v = valueTypeTime
n.t = d.DecodeTime()
case simpleVdString, simpleVdString + 1,
simpleVdString + 2, simpleVdString + 3, simpleVdString + 4:
n.v = valueTypeString n.v = valueTypeString
n.s = d.DecodeString() n.s = d.DecodeString()
case simpleVdByteArray, simpleVdByteArray + 1, case simpleVdByteArray, simpleVdByteArray + 1, simpleVdByteArray + 2, simpleVdByteArray + 3, simpleVdByteArray + 4:
simpleVdByteArray + 2, simpleVdByteArray + 3, simpleVdByteArray + 4:
n.v = valueTypeBytes n.v = valueTypeBytes
n.l = d.DecodeBytes(nil, false) n.l = d.DecodeBytes(nil, false)
case simpleVdExt, simpleVdExt + 1, simpleVdExt + 2, simpleVdExt + 3, simpleVdExt + 4: case simpleVdExt, simpleVdExt + 1, simpleVdExt + 2, simpleVdExt + 3, simpleVdExt + 4:
@ -571,8 +474,7 @@ func (d *simpleDecDriver) DecodeNaked() {
l := d.decLen() l := d.decLen()
n.u = uint64(d.r.readn1()) n.u = uint64(d.r.readn1())
n.l = d.r.readx(l) n.l = d.r.readx(l)
case simpleVdArray, simpleVdArray + 1, simpleVdArray + 2, case simpleVdArray, simpleVdArray + 1, simpleVdArray + 2, simpleVdArray + 3, simpleVdArray + 4:
simpleVdArray + 3, simpleVdArray + 4:
n.v = valueTypeArray n.v = valueTypeArray
decodeFurther = true decodeFurther = true
case simpleVdMap, simpleVdMap + 1, simpleVdMap + 2, simpleVdMap + 3, simpleVdMap + 4: case simpleVdMap, simpleVdMap + 1, simpleVdMap + 2, simpleVdMap + 3, simpleVdMap + 4:
@ -598,7 +500,7 @@ func (d *simpleDecDriver) DecodeNaked() {
// - Integers (intXXX, uintXXX) are encoded in 1, 2, 4 or 8 bytes (plus a descriptor byte). // - Integers (intXXX, uintXXX) are encoded in 1, 2, 4 or 8 bytes (plus a descriptor byte).
// There are positive (uintXXX and intXXX >= 0) and negative (intXXX < 0) integers. // There are positive (uintXXX and intXXX >= 0) and negative (intXXX < 0) integers.
// - Floats are encoded in 4 or 8 bytes (plus a descriptor byte) // - Floats are encoded in 4 or 8 bytes (plus a descriptor byte)
// - Length of containers (strings, bytes, array, map, extensions) // - Lenght of containers (strings, bytes, array, map, extensions)
// are encoded in 0, 1, 2, 4 or 8 bytes. // are encoded in 0, 1, 2, 4 or 8 bytes.
// Zero-length containers have no length encoded. // Zero-length containers have no length encoded.
// For others, the number of bytes is given by pow(2, bd%3) // For others, the number of bytes is given by pow(2, bd%3)
@ -606,29 +508,18 @@ func (d *simpleDecDriver) DecodeNaked() {
// - arrays are encoded as [bd] [length] [value]... // - arrays are encoded as [bd] [length] [value]...
// - extensions are encoded as [bd] [length] [tag] [byte]... // - extensions are encoded as [bd] [length] [tag] [byte]...
// - strings/bytearrays are encoded as [bd] [length] [byte]... // - strings/bytearrays are encoded as [bd] [length] [byte]...
// - time.Time are encoded as [bd] [length] [byte]...
// //
// The full spec will be published soon. // The full spec will be published soon.
type SimpleHandle struct { type SimpleHandle struct {
BasicHandle BasicHandle
binaryEncodingType binaryEncodingType
noElemSeparators noElemSeparators
// EncZeroValuesAsNil says to encode zero values for numbers, bool, string, etc as nil
EncZeroValuesAsNil bool
_ [1]uint64 // padding
} }
// Name returns the name of the handle: simple
func (h *SimpleHandle) Name() string { return "simple" }
// SetBytesExt sets an extension
func (h *SimpleHandle) SetBytesExt(rt reflect.Type, tag uint64, ext BytesExt) (err error) { func (h *SimpleHandle) SetBytesExt(rt reflect.Type, tag uint64, ext BytesExt) (err error) {
return h.SetExt(rt, tag, &extWrapper{ext, interfaceExtFailer{}}) return h.SetExt(rt, tag, &setExtWrapper{b: ext})
} }
func (h *SimpleHandle) hasElemSeparators() bool { return true } // as it implements Write(Map|Array)XXX
func (h *SimpleHandle) newEncDriver(e *Encoder) encDriver { func (h *SimpleHandle) newEncDriver(e *Encoder) encDriver {
return &simpleEncDriver{e: e, w: e.w, h: h} return &simpleEncDriver{e: e, w: e.w, h: h}
} }
@ -638,12 +529,10 @@ func (h *SimpleHandle) newDecDriver(d *Decoder) decDriver {
} }
func (e *simpleEncDriver) reset() { func (e *simpleEncDriver) reset() {
e.c = 0
e.w = e.e.w e.w = e.e.w
} }
func (d *simpleDecDriver) reset() { func (d *simpleDecDriver) reset() {
d.c = 0
d.r, d.br = d.d.r, d.d.bytes d.r, d.br = d.d.r, d.d.bytes
d.bd, d.bdRead = 0, false d.bd, d.bdRead = 0, false
} }

220
vendor/github.com/ugorji/go/codec/time.go generated vendored Normal file
View File

@ -0,0 +1,220 @@
// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved.
// Use of this source code is governed by a MIT license found in the LICENSE file.
package codec
import (
"fmt"
"time"
)
var timeDigits = [...]byte{'0', '1', '2', '3', '4', '5', '6', '7', '8', '9'}
type timeExt struct{}
func (x timeExt) WriteExt(v interface{}) (bs []byte) {
switch v2 := v.(type) {
case time.Time:
bs = encodeTime(v2)
case *time.Time:
bs = encodeTime(*v2)
default:
panic(fmt.Errorf("unsupported format for time conversion: expecting time.Time; got %T", v2))
}
return
}
func (x timeExt) ReadExt(v interface{}, bs []byte) {
tt, err := decodeTime(bs)
if err != nil {
panic(err)
}
*(v.(*time.Time)) = tt
}
func (x timeExt) ConvertExt(v interface{}) interface{} {
return x.WriteExt(v)
}
func (x timeExt) UpdateExt(v interface{}, src interface{}) {
x.ReadExt(v, src.([]byte))
}
// EncodeTime encodes a time.Time as a []byte, including
// information on the instant in time and UTC offset.
//
// Format Description
//
// A timestamp is composed of 3 components:
//
// - secs: signed integer representing seconds since unix epoch
// - nsces: unsigned integer representing fractional seconds as a
// nanosecond offset within secs, in the range 0 <= nsecs < 1e9
// - tz: signed integer representing timezone offset in minutes east of UTC,
// and a dst (daylight savings time) flag
//
// When encoding a timestamp, the first byte is the descriptor, which
// defines which components are encoded and how many bytes are used to
// encode secs and nsecs components. *If secs/nsecs is 0 or tz is UTC, it
// is not encoded in the byte array explicitly*.
//
// Descriptor 8 bits are of the form `A B C DDD EE`:
// A: Is secs component encoded? 1 = true
// B: Is nsecs component encoded? 1 = true
// C: Is tz component encoded? 1 = true
// DDD: Number of extra bytes for secs (range 0-7).
// If A = 1, secs encoded in DDD+1 bytes.
// If A = 0, secs is not encoded, and is assumed to be 0.
// If A = 1, then we need at least 1 byte to encode secs.
// DDD says the number of extra bytes beyond that 1.
// E.g. if DDD=0, then secs is represented in 1 byte.
// if DDD=2, then secs is represented in 3 bytes.
// EE: Number of extra bytes for nsecs (range 0-3).
// If B = 1, nsecs encoded in EE+1 bytes (similar to secs/DDD above)
//
// Following the descriptor bytes, subsequent bytes are:
//
// secs component encoded in `DDD + 1` bytes (if A == 1)
// nsecs component encoded in `EE + 1` bytes (if B == 1)
// tz component encoded in 2 bytes (if C == 1)
//
// secs and nsecs components are integers encoded in a BigEndian
// 2-complement encoding format.
//
// tz component is encoded as 2 bytes (16 bits). Most significant bit 15 to
// Least significant bit 0 are described below:
//
// Timezone offset has a range of -12:00 to +14:00 (ie -720 to +840 minutes).
// Bit 15 = have\_dst: set to 1 if we set the dst flag.
// Bit 14 = dst\_on: set to 1 if dst is in effect at the time, or 0 if not.
// Bits 13..0 = timezone offset in minutes. It is a signed integer in Big Endian format.
//
func encodeTime(t time.Time) []byte {
//t := rv.Interface().(time.Time)
tsecs, tnsecs := t.Unix(), t.Nanosecond()
var (
bd byte
btmp [8]byte
bs [16]byte
i int = 1
)
l := t.Location()
if l == time.UTC {
l = nil
}
if tsecs != 0 {
bd = bd | 0x80
bigen.PutUint64(btmp[:], uint64(tsecs))
f := pruneSignExt(btmp[:], tsecs >= 0)
bd = bd | (byte(7-f) << 2)
copy(bs[i:], btmp[f:])
i = i + (8 - f)
}
if tnsecs != 0 {
bd = bd | 0x40
bigen.PutUint32(btmp[:4], uint32(tnsecs))
f := pruneSignExt(btmp[:4], true)
bd = bd | byte(3-f)
copy(bs[i:], btmp[f:4])
i = i + (4 - f)
}
if l != nil {
bd = bd | 0x20
// Note that Go Libs do not give access to dst flag.
_, zoneOffset := t.Zone()
//zoneName, zoneOffset := t.Zone()
zoneOffset /= 60
z := uint16(zoneOffset)
bigen.PutUint16(btmp[:2], z)
// clear dst flags
bs[i] = btmp[0] & 0x3f
bs[i+1] = btmp[1]
i = i + 2
}
bs[0] = bd
return bs[0:i]
}
// DecodeTime decodes a []byte into a time.Time.
func decodeTime(bs []byte) (tt time.Time, err error) {
bd := bs[0]
var (
tsec int64
tnsec uint32
tz uint16
i byte = 1
i2 byte
n byte
)
if bd&(1<<7) != 0 {
var btmp [8]byte
n = ((bd >> 2) & 0x7) + 1
i2 = i + n
copy(btmp[8-n:], bs[i:i2])
//if first bit of bs[i] is set, then fill btmp[0..8-n] with 0xff (ie sign extend it)
if bs[i]&(1<<7) != 0 {
copy(btmp[0:8-n], bsAll0xff)
//for j,k := byte(0), 8-n; j < k; j++ { btmp[j] = 0xff }
}
i = i2
tsec = int64(bigen.Uint64(btmp[:]))
}
if bd&(1<<6) != 0 {
var btmp [4]byte
n = (bd & 0x3) + 1
i2 = i + n
copy(btmp[4-n:], bs[i:i2])
i = i2
tnsec = bigen.Uint32(btmp[:])
}
if bd&(1<<5) == 0 {
tt = time.Unix(tsec, int64(tnsec)).UTC()
return
}
// In stdlib time.Parse, when a date is parsed without a zone name, it uses "" as zone name.
// However, we need name here, so it can be shown when time is printed.
// Zone name is in form: UTC-08:00.
// Note that Go Libs do not give access to dst flag, so we ignore dst bits
i2 = i + 2
tz = bigen.Uint16(bs[i:i2])
i = i2
// sign extend sign bit into top 2 MSB (which were dst bits):
if tz&(1<<13) == 0 { // positive
tz = tz & 0x3fff //clear 2 MSBs: dst bits
} else { // negative
tz = tz | 0xc000 //set 2 MSBs: dst bits
//tzname[3] = '-' (TODO: verify. this works here)
}
tzint := int16(tz)
if tzint == 0 {
tt = time.Unix(tsec, int64(tnsec)).UTC()
} else {
// For Go Time, do not use a descriptive timezone.
// It's unnecessary, and makes it harder to do a reflect.DeepEqual.
// The Offset already tells what the offset should be, if not on UTC and unknown zone name.
// var zoneName = timeLocUTCName(tzint)
tt = time.Unix(tsec, int64(tnsec)).In(time.FixedZone("", int(tzint)*60))
}
return
}
// func timeLocUTCName(tzint int16) string {
// if tzint == 0 {
// return "UTC"
// }
// var tzname = []byte("UTC+00:00")
// //tzname := fmt.Sprintf("UTC%s%02d:%02d", tzsign, tz/60, tz%60) //perf issue using Sprintf. inline below.
// //tzhr, tzmin := tz/60, tz%60 //faster if u convert to int first
// var tzhr, tzmin int16
// if tzint < 0 {
// tzname[3] = '-' // (TODO: verify. this works here)
// tzhr, tzmin = -tzint/60, (-tzint)%60
// } else {
// tzhr, tzmin = tzint/60, tzint%60
// }
// tzname[4] = timeDigits[tzhr/10]
// tzname[5] = timeDigits[tzhr%10]
// tzname[7] = timeDigits[tzmin/10]
// tzname[8] = timeDigits[tzmin%10]
// return string(tzname)
// //return time.FixedZone(string(tzname), int(tzint)*60)
// }

View File

@ -1,6 +1,3 @@
// Copyright (c) 2012-2018 Ugorji Nwoke. All rights reserved.
// Use of this source code is governed by a MIT license found in the LICENSE file.
// +build ignore // +build ignore
package codec package codec
@ -27,6 +24,7 @@ It is a replacement, based on the simplicity and performance of codec.
Look at it like JAXB for Go. Look at it like JAXB for Go.
Challenges: Challenges:
- Need to output XML preamble, with all namespaces at the right location in the output. - Need to output XML preamble, with all namespaces at the right location in the output.
- Each "end" block is dynamic, so we need to maintain a context-aware stack - Each "end" block is dynamic, so we need to maintain a context-aware stack
- How to decide when to use an attribute VS an element - How to decide when to use an attribute VS an element
@ -36,26 +34,24 @@ Challenges:
Extend the struct tag. See representative example: Extend the struct tag. See representative example:
type X struct { type X struct {
ID uint8 `codec:"http://ugorji.net/x-namespace xid id,omitempty,toarray,attr,cdata"` ID uint8 codec:"xid|http://ugorji.net/x-namespace id,omitempty,toarray,attr,cdata"
// format: [namespace-uri ][namespace-prefix ]local-name, ...
} }
Based on this, we encode Based on this, we encode
- fields as elements, BUT - fields as elements, BUT encode as attributes if struct tag contains ",attr".
encode as attributes if struct tag contains ",attr" and is a scalar (bool, number or string)
- text as entity-escaped text, BUT encode as CDATA if struct tag contains ",cdata". - text as entity-escaped text, BUT encode as CDATA if struct tag contains ",cdata".
In this mode, we only encode as attribute if ",attr" is found, and only encode as CDATA
if ",cdata" is found in the struct tag.
To handle namespaces: To handle namespaces:
- XMLHandle is denoted as being namespace-aware. - XMLHandle is denoted as being namespace-aware.
Consequently, we WILL use the ns:name pair to encode and decode if defined, else use the plain name. Consequently, we WILL use the ns:name pair to encode and decode if defined, else use the plain name.
- *Encoder and *Decoder know whether the Handle "prefers" namespaces. - *Encoder and *Decoder know whether the Handle "prefers" namespaces.
- add *Encoder.getEncName(*structFieldInfo). - add *Encoder.getEncName(*structFieldInfo).
No one calls *structFieldInfo.indexForEncName directly anymore No one calls *structFieldInfo.indexForEncName directly anymore
- OR better yet: indexForEncName is namespace-aware, and helper.go is all namespace-aware
indexForEncName takes a parameter of the form namespace:local-name OR local-name
- add *Decoder.getStructFieldInfo(encName string) // encName here is either like abc, or h1:nsabc - add *Decoder.getStructFieldInfo(encName string) // encName here is either like abc, or h1:nsabc
by being a method on *Decoder, or maybe a method on the Handle itself. No one accesses .encName anymore except in
No one accesses .encName anymore
- let encode.go and decode.go use these (for consistency) - let encode.go and decode.go use these (for consistency)
- only problem exists for gen.go, where we create a big switch on encName. - only problem exists for gen.go, where we create a big switch on encName.
Now, we also have to add a switch on strings.endsWith(kName, encNsName) Now, we also have to add a switch on strings.endsWith(kName, encNsName)
@ -66,14 +62,13 @@ To handle namespaces:
default { default {
switch { switch {
case !nsAware: panic(...) case !nsAware: panic(...)
case strings.endsWith(":abc"): x.abc() case strings.endsWith("nsabc"): x.abc()
case strings.endsWith(":def"): x.def()
default: panic(...) default: panic(...)
} }
} }
} }
The structure below accommodates this: The structure below accomodates this:
type typeInfo struct { type typeInfo struct {
sfi []*structFieldInfo // sorted by encName sfi []*structFieldInfo // sorted by encName
@ -93,10 +88,7 @@ indexForEncName is now an internal helper function that takes a sorted array
(one of ti.sfins or ti.sfi). It is only used by *Encoder.getStructFieldInfo(...) (one of ti.sfins or ti.sfi). It is only used by *Encoder.getStructFieldInfo(...)
There will be a separate parser from the builder. There will be a separate parser from the builder.
The parser will have a method: next() xmlToken method. It has lookahead support, The parser will have a method: next() xmlToken method.
so you can pop multiple tokens, make a determination, and push them back in the order popped.
This will be needed to determine whether we are "nakedly" decoding a container or not.
The stack will be implemented using a slice and push/pop happens at the [0] element.
xmlToken has fields: xmlToken has fields:
- type uint8: 0 | ElementStart | ElementEnd | AttrKey | AttrVal | Text - type uint8: 0 | ElementStart | ElementEnd | AttrKey | AttrVal | Text
@ -140,7 +132,7 @@ At decode time, a structure containing the following is kept
- all internal entities (<>&"' and others written in the document) - all internal entities (<>&"' and others written in the document)
When decode starts, it parses XML namespace declarations and creates a map in the When decode starts, it parses XML namespace declarations and creates a map in the
xmlDecDriver. While parsing, that map continuously gets updated. xmlDecDriver. While parsing, that map continously gets updated.
The only problem happens when a namespace declaration happens on the node that it defines. The only problem happens when a namespace declaration happens on the node that it defines.
e.g. <hn:name xmlns:hn="http://www.ugorji.net" > e.g. <hn:name xmlns:hn="http://www.ugorji.net" >
To handle this, each Element must be fully parsed at a time, To handle this, each Element must be fully parsed at a time,
@ -152,7 +144,7 @@ xmlns is a special attribute name.
*We may decide later to allow user to use it e.g. you want to parse the xmlns mappings into a field.* *We may decide later to allow user to use it e.g. you want to parse the xmlns mappings into a field.*
Number, bool, null, mapKey, etc can all be decoded from any xmlToken. Number, bool, null, mapKey, etc can all be decoded from any xmlToken.
This accommodates map[int]string for example. This accomodates map[int]string for example.
It should be possible to create a schema from the types, It should be possible to create a schema from the types,
or vice versa (generate types from schema with appropriate tags). or vice versa (generate types from schema with appropriate tags).
@ -186,8 +178,8 @@ An XML document is a name, a map of attributes and a list of children.
Consequently, we cannot "DecodeNaked" into a map[string]interface{} (for example). Consequently, we cannot "DecodeNaked" into a map[string]interface{} (for example).
We have to "DecodeNaked" into something that resembles XML data. We have to "DecodeNaked" into something that resembles XML data.
To support DecodeNaked (decode into nil interface{}), we have to define some "supporting" types: To support DecodeNaked (decode into nil interface{}) we have to define some "supporting" types:
type Name struct { // Preferred. Less allocations due to conversions. type Name struct { // Prefered. Less allocations due to conversions.
Local string Local string
Space string Space string
} }
@ -198,8 +190,6 @@ To support DecodeNaked (decode into nil interface{}), we have to define some "su
} }
Only two "supporting" types are exposed for XML: Name and Element. Only two "supporting" types are exposed for XML: Name and Element.
// ------------------
We considered 'type Name string' where Name is like "Space Local" (space-separated). We considered 'type Name string' where Name is like "Space Local" (space-separated).
We decided against it, because each creation of a name would lead to We decided against it, because each creation of a name would lead to
double allocation (first convert []byte to string, then concatenate them into a string). double allocation (first convert []byte to string, then concatenate them into a string).
@ -225,16 +215,16 @@ intelligent accessor methods to extract information and for performance.
} }
func (x *Element) child(i) interface{} // returns string or *Element func (x *Element) child(i) interface{} // returns string or *Element
// ------------------ Per XML spec and our default handling, white space is insignificant between elements,
specifically between parent-child or siblings. White space occuring alone between start
Per XML spec and our default handling, white space is always treated as and end element IS significant. However, if xml:space='preserve', then we 'preserve'
insignificant between elements, except in a text node. The xml:space='preserve' all whitespace. This is more critical when doing a DecodeNaked, but MAY not be as critical
attribute is ignored. when decoding into a typed value.
**Note: there is no xml: namespace. The xml: attributes were defined before namespaces.** **Note: there is no xml: namespace. The xml: attributes were defined before namespaces.**
**So treat them as just "directives" that should be interpreted to mean something**. **So treat them as just "directives" that should be interpreted to mean something**.
On encoding, we support indenting aka prettifying markup in the same way we support it for json. On encoding, we don't add any prettifying markup (indenting, etc).
A document or element can only be encoded/decoded from/to a struct. In this mode: A document or element can only be encoded/decoded from/to a struct. In this mode:
- struct name maps to element name (or tag-info from _struct field) - struct name maps to element name (or tag-info from _struct field)
@ -268,14 +258,15 @@ the struct tag signifying it should be attr, then all its fields are encoded as
e.g. e.g.
type X struct { type X struct {
M map[string]int `codec:"m,attr"` // encode keys as attributes named M map[string]int `codec:"m,attr"` // encode as attributes
} }
Question: Question:
- if encoding a map, what if map keys have spaces in them??? - if encoding a map, what if map keys have spaces in them???
Then they cannot be attributes or child elements. Error. Then they cannot be attributes or child elements. Error.
Options to consider adding later: Misc:
- For attribute values, normalize by trimming beginning and ending white space, - For attribute values, normalize by trimming beginning and ending white space,
and converting every white space sequence to a single space. and converting every white space sequence to a single space.
- ATTLIST restrictions are enforced. - ATTLIST restrictions are enforced.
@ -293,8 +284,6 @@ Options to consider adding later:
CheckName bool CheckName bool
} }
Misc:
ROADMAP (1 weeks): ROADMAP (1 weeks):
- build encoder (1 day) - build encoder (1 day)
- build decoder (based off xmlParser) (1 day) - build decoder (based off xmlParser) (1 day)
@ -303,78 +292,7 @@ ROADMAP (1 weeks):
- integrate and TEST (1 days) - integrate and TEST (1 days)
- write article and post it (1 day) - write article and post it (1 day)
// ---------- MORE NOTES FROM 2017-11-30 ------------
when parsing
- parse the attributes first
- then parse the nodes
basically:
- if encoding a field: we use the field name for the wrapper
- if encoding a non-field, then just use the element type name
map[string]string ==> <map><key>abc</key><value>val</value></map>... or
<map key="abc">val</map>... OR
<key1>val1</key1><key2>val2</key2>... <- PREFERED
[]string ==> <string>v1</string><string>v2</string>...
string v1 ==> <string>v1</string>
bool true ==> <bool>true</bool>
float 1.0 ==> <float>1.0</float>
...
F1 map[string]string ==> <F1><key>abc</key><value>val</value></F1>... OR
<F1 key="abc">val</F1>... OR
<F1><abc>val</abc>...</F1> <- PREFERED
F2 []string ==> <F2>v1</F2><F2>v2</F2>...
F3 bool ==> <F3>true</F3>
...
- a scalar is encoded as:
(value) of type T ==> <T><value/></T>
(value) of field F ==> <F><value/></F>
- A kv-pair is encoded as:
(key,value) ==> <map><key><value/></key></map> OR <map key="value">
(key,value) of field F ==> <F><key><value/></key></F> OR <F key="value">
- A map or struct is just a list of kv-pairs
- A list is encoded as sequences of same node e.g.
<F1 key1="value11">
<F1 key2="value12">
<F2>value21</F2>
<F2>value22</F2>
- we may have to singularize the field name, when entering into xml,
and pluralize them when encoding.
- bi-directional encode->decode->encode is not a MUST.
even encoding/xml cannot decode correctly what was encoded:
see https://play.golang.org/p/224V_nyhMS
func main() {
fmt.Println("Hello, playground")
v := []interface{}{"hello", 1, true, nil, time.Now()}
s, err := xml.Marshal(v)
fmt.Printf("err: %v, \ns: %s\n", err, s)
var v2 []interface{}
err = xml.Unmarshal(s, &v2)
fmt.Printf("err: %v, \nv2: %v\n", err, v2)
type T struct {
V []interface{}
}
v3 := T{V: v}
s, err = xml.Marshal(v3)
fmt.Printf("err: %v, \ns: %s\n", err, s)
var v4 T
err = xml.Unmarshal(s, &v4)
fmt.Printf("err: %v, \nv4: %v\n", err, v4)
}
Output:
err: <nil>,
s: <string>hello</string><int>1</int><bool>true</bool><Time>2009-11-10T23:00:00Z</Time>
err: <nil>,
v2: [<nil>]
err: <nil>,
s: <T><V>hello</V><V>1</V><V>true</V><V>2009-11-10T23:00:00Z</V></T>
err: <nil>,
v4: {[<nil> <nil> <nil> <nil>]}
-
*/ */
// ----------- PARSER ------------------- // ----------- PARSER -------------------
@ -501,7 +419,7 @@ func (h *XMLHandle) newDecDriver(d *Decoder) decDriver {
} }
func (h *XMLHandle) SetInterfaceExt(rt reflect.Type, tag uint64, ext InterfaceExt) (err error) { func (h *XMLHandle) SetInterfaceExt(rt reflect.Type, tag uint64, ext InterfaceExt) (err error) {
return h.SetExt(rt, tag, &extWrapper{bytesExtFailer{}, ext}) return h.SetExt(rt, tag, &setExtWrapper{i: ext})
} }
var _ decDriver = (*xmlDecDriver)(nil) var _ decDriver = (*xmlDecDriver)(nil)

23
vendor/github.com/ugorji/go/codec/z.go generated vendored Normal file
View File

@ -0,0 +1,23 @@
// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved.
// Use of this source code is governed by a MIT license found in the LICENSE file.
package codec
import "sort"
// TODO: this is brittle, as it depends on z.go's init() being called last.
// The current build tools all honor that files are passed in lexical order.
// However, we should consider using an init_channel,
// that each person doing init will write to.
func init() {
if !useLookupRecognizedTypes {
return
}
sort.Sort(uintptrSlice(recognizedRtids))
sort.Sort(uintptrSlice(recognizedRtidPtrs))
recognizedRtidOrPtrs = make([]uintptr, len(recognizedRtids)+len(recognizedRtidPtrs))
copy(recognizedRtidOrPtrs, recognizedRtids)
copy(recognizedRtidOrPtrs[len(recognizedRtids):], recognizedRtidPtrs)
sort.Sort(uintptrSlice(recognizedRtidOrPtrs))
}

47
vendor/github.com/ugorji/go/msgpack.org.md generated vendored Normal file
View File

@ -0,0 +1,47 @@
**MessagePack and [Binc](http://github.com/ugorji/binc) Codec for [Go](http://golang.org) Language.**
*A High Performance, Feature-Rich, Idiomatic encode/decode and rpc library*.
To install:
go get github.com/ugorji/go/codec
Source: [http://github.com/ugorji/go]
Online documentation: [http://godoc.org/github.com/ugorji/go/codec]
Typical usage:
```go
// create and use decoder/encoder
var (
v interface{} // value to decode/encode into
r io.Reader
w io.Writer
b []byte
mh codec.MsgpackHandle
)
dec = codec.NewDecoder(r, &mh)
dec = codec.NewDecoderBytes(b, &mh)
err = dec.Decode(&v)
enc = codec.NewEncoder(w, &mh)
enc = codec.NewEncoderBytes(&b, &mh)
err = enc.Encode(v)
//RPC Server
go func() {
for {
conn, err := listener.Accept()
rpcCodec := codec.GoRpc.ServerCodec(conn, h)
//OR rpcCodec := codec.MsgpackSpecRpc.ServerCodec(conn, h)
rpc.ServeCodec(rpcCodec)
}
}()
//RPC Communication (client side)
conn, err = net.Dial("tcp", "localhost:5555")
rpcCodec := codec.GoRpc.ClientCodec(conn, h)
//OR rpcCodec := codec.MsgpackSpecRpc.ClientCodec(conn, h)
client := rpc.NewClientWithCodec(rpcCodec)
```