diff --git a/apis/apps/v1beta2/zz_generated_deepcopy.go b/apis/apps/v1beta2/zz_generated_deepcopy.go index feedb944..c9e190b2 100644 --- a/apis/apps/v1beta2/zz_generated_deepcopy.go +++ b/apis/apps/v1beta2/zz_generated_deepcopy.go @@ -1,7 +1,7 @@ package v1beta2 import ( - apps_v1beta2 "k8s.io/api/apps/v1beta2" + appsv1beta2 "k8s.io/api/apps/v1beta2" runtime "k8s.io/apimachinery/pkg/runtime" ) @@ -12,7 +12,7 @@ func (in *DaemonSetList) DeepCopyInto(out *DaemonSetList) { out.ListMeta = in.ListMeta if in.Items != nil { in, out := &in.Items, &out.Items - *out = make([]apps_v1beta2.DaemonSet, len(*in)) + *out = make([]appsv1beta2.DaemonSet, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } @@ -45,7 +45,7 @@ func (in *DeploymentList) DeepCopyInto(out *DeploymentList) { out.ListMeta = in.ListMeta if in.Items != nil { in, out := &in.Items, &out.Items - *out = make([]apps_v1beta2.Deployment, len(*in)) + *out = make([]appsv1beta2.Deployment, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } @@ -78,7 +78,7 @@ func (in *ReplicaSetList) DeepCopyInto(out *ReplicaSetList) { out.ListMeta = in.ListMeta if in.Items != nil { in, out := &in.Items, &out.Items - *out = make([]apps_v1beta2.ReplicaSet, len(*in)) + *out = make([]appsv1beta2.ReplicaSet, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } @@ -111,7 +111,7 @@ func (in *StatefulSetList) DeepCopyInto(out *StatefulSetList) { out.ListMeta = in.ListMeta if in.Items != nil { in, out := &in.Items, &out.Items - *out = make([]apps_v1beta2.StatefulSet, len(*in)) + *out = make([]appsv1beta2.StatefulSet, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } diff --git a/apis/apps/v1beta2/zz_generated_k8s_client.go b/apis/apps/v1beta2/zz_generated_k8s_client.go index aebd5b79..bff25c3e 100644 --- a/apis/apps/v1beta2/zz_generated_k8s_client.go +++ b/apis/apps/v1beta2/zz_generated_k8s_client.go @@ -11,6 +11,8 @@ import ( "k8s.io/client-go/rest" ) +type contextKeyType struct{} + type Interface interface { RESTClient() rest.Interface controller.Starter @@ -32,6 +34,19 @@ type Client struct { replicaSetControllers map[string]ReplicaSetController } +func Factory(ctx context.Context, config rest.Config) (context.Context, controller.Starter, error) { + c, err := NewForConfig(config) + if err != nil { + return ctx, nil, err + } + + return context.WithValue(ctx, contextKeyType{}, c), c, nil +} + +func From(ctx context.Context) Interface { + return ctx.Value(contextKeyType{}).(Interface) +} + func NewForConfig(config rest.Config) (Interface, error) { if config.NegotiatedSerializer == nil { config.NegotiatedSerializer = dynamic.NegotiatedSerializer diff --git a/apis/batch/v1/zz_generated_deepcopy.go b/apis/batch/v1/zz_generated_deepcopy.go index 03136de3..7d0d3c14 100644 --- a/apis/batch/v1/zz_generated_deepcopy.go +++ b/apis/batch/v1/zz_generated_deepcopy.go @@ -1,7 +1,7 @@ package v1 import ( - batch_v1 "k8s.io/api/batch/v1" + batchv1 "k8s.io/api/batch/v1" runtime "k8s.io/apimachinery/pkg/runtime" ) @@ -12,7 +12,7 @@ func (in *JobList) DeepCopyInto(out *JobList) { out.ListMeta = in.ListMeta if in.Items != nil { in, out := &in.Items, &out.Items - *out = make([]batch_v1.Job, len(*in)) + *out = make([]batchv1.Job, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } diff --git a/apis/batch/v1/zz_generated_k8s_client.go b/apis/batch/v1/zz_generated_k8s_client.go index adb21a7a..46d2eed0 100644 --- a/apis/batch/v1/zz_generated_k8s_client.go +++ b/apis/batch/v1/zz_generated_k8s_client.go @@ -11,6 +11,8 @@ import ( "k8s.io/client-go/rest" ) +type contextKeyType struct{} + type Interface interface { RESTClient() rest.Interface controller.Starter @@ -26,6 +28,19 @@ type Client struct { jobControllers map[string]JobController } +func Factory(ctx context.Context, config rest.Config) (context.Context, controller.Starter, error) { + c, err := NewForConfig(config) + if err != nil { + return ctx, nil, err + } + + return context.WithValue(ctx, contextKeyType{}, c), c, nil +} + +func From(ctx context.Context) Interface { + return ctx.Value(contextKeyType{}).(Interface) +} + func NewForConfig(config rest.Config) (Interface, error) { if config.NegotiatedSerializer == nil { config.NegotiatedSerializer = dynamic.NegotiatedSerializer diff --git a/apis/batch/v1beta1/zz_generated_deepcopy.go b/apis/batch/v1beta1/zz_generated_deepcopy.go index 06268a0e..d4217014 100644 --- a/apis/batch/v1beta1/zz_generated_deepcopy.go +++ b/apis/batch/v1beta1/zz_generated_deepcopy.go @@ -1,7 +1,7 @@ package v1beta1 import ( - batch_v1beta1 "k8s.io/api/batch/v1beta1" + batchv1beta1 "k8s.io/api/batch/v1beta1" runtime "k8s.io/apimachinery/pkg/runtime" ) @@ -12,7 +12,7 @@ func (in *CronJobList) DeepCopyInto(out *CronJobList) { out.ListMeta = in.ListMeta if in.Items != nil { in, out := &in.Items, &out.Items - *out = make([]batch_v1beta1.CronJob, len(*in)) + *out = make([]batchv1beta1.CronJob, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } diff --git a/apis/batch/v1beta1/zz_generated_k8s_client.go b/apis/batch/v1beta1/zz_generated_k8s_client.go index 29646019..f65bac59 100644 --- a/apis/batch/v1beta1/zz_generated_k8s_client.go +++ b/apis/batch/v1beta1/zz_generated_k8s_client.go @@ -11,6 +11,8 @@ import ( "k8s.io/client-go/rest" ) +type contextKeyType struct{} + type Interface interface { RESTClient() rest.Interface controller.Starter @@ -26,6 +28,19 @@ type Client struct { cronJobControllers map[string]CronJobController } +func Factory(ctx context.Context, config rest.Config) (context.Context, controller.Starter, error) { + c, err := NewForConfig(config) + if err != nil { + return ctx, nil, err + } + + return context.WithValue(ctx, contextKeyType{}, c), c, nil +} + +func From(ctx context.Context) Interface { + return ctx.Value(contextKeyType{}).(Interface) +} + func NewForConfig(config rest.Config) (Interface, error) { if config.NegotiatedSerializer == nil { config.NegotiatedSerializer = dynamic.NegotiatedSerializer diff --git a/apis/core/v1/zz_generated_deepcopy.go b/apis/core/v1/zz_generated_deepcopy.go index 5a97f966..73d15e6a 100644 --- a/apis/core/v1/zz_generated_deepcopy.go +++ b/apis/core/v1/zz_generated_deepcopy.go @@ -1,7 +1,7 @@ package v1 import ( - core_v1 "k8s.io/api/core/v1" + corev1 "k8s.io/api/core/v1" runtime "k8s.io/apimachinery/pkg/runtime" ) @@ -12,7 +12,7 @@ func (in *ComponentStatusList) DeepCopyInto(out *ComponentStatusList) { out.ListMeta = in.ListMeta if in.Items != nil { in, out := &in.Items, &out.Items - *out = make([]core_v1.ComponentStatus, len(*in)) + *out = make([]corev1.ComponentStatus, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } @@ -45,7 +45,7 @@ func (in *ConfigMapList) DeepCopyInto(out *ConfigMapList) { out.ListMeta = in.ListMeta if in.Items != nil { in, out := &in.Items, &out.Items - *out = make([]core_v1.ConfigMap, len(*in)) + *out = make([]corev1.ConfigMap, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } @@ -78,7 +78,7 @@ func (in *EndpointsList) DeepCopyInto(out *EndpointsList) { out.ListMeta = in.ListMeta if in.Items != nil { in, out := &in.Items, &out.Items - *out = make([]core_v1.Endpoints, len(*in)) + *out = make([]corev1.Endpoints, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } @@ -111,7 +111,7 @@ func (in *EventList) DeepCopyInto(out *EventList) { out.ListMeta = in.ListMeta if in.Items != nil { in, out := &in.Items, &out.Items - *out = make([]core_v1.Event, len(*in)) + *out = make([]corev1.Event, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } @@ -144,7 +144,7 @@ func (in *NamespaceList) DeepCopyInto(out *NamespaceList) { out.ListMeta = in.ListMeta if in.Items != nil { in, out := &in.Items, &out.Items - *out = make([]core_v1.Namespace, len(*in)) + *out = make([]corev1.Namespace, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } @@ -177,7 +177,7 @@ func (in *NodeList) DeepCopyInto(out *NodeList) { out.ListMeta = in.ListMeta if in.Items != nil { in, out := &in.Items, &out.Items - *out = make([]core_v1.Node, len(*in)) + *out = make([]corev1.Node, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } @@ -210,7 +210,7 @@ func (in *PersistentVolumeClaimList) DeepCopyInto(out *PersistentVolumeClaimList out.ListMeta = in.ListMeta if in.Items != nil { in, out := &in.Items, &out.Items - *out = make([]core_v1.PersistentVolumeClaim, len(*in)) + *out = make([]corev1.PersistentVolumeClaim, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } @@ -243,7 +243,7 @@ func (in *PodList) DeepCopyInto(out *PodList) { out.ListMeta = in.ListMeta if in.Items != nil { in, out := &in.Items, &out.Items - *out = make([]core_v1.Pod, len(*in)) + *out = make([]corev1.Pod, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } @@ -276,7 +276,7 @@ func (in *ReplicationControllerList) DeepCopyInto(out *ReplicationControllerList out.ListMeta = in.ListMeta if in.Items != nil { in, out := &in.Items, &out.Items - *out = make([]core_v1.ReplicationController, len(*in)) + *out = make([]corev1.ReplicationController, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } @@ -309,7 +309,7 @@ func (in *ResourceQuotaList) DeepCopyInto(out *ResourceQuotaList) { out.ListMeta = in.ListMeta if in.Items != nil { in, out := &in.Items, &out.Items - *out = make([]core_v1.ResourceQuota, len(*in)) + *out = make([]corev1.ResourceQuota, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } @@ -342,7 +342,7 @@ func (in *SecretList) DeepCopyInto(out *SecretList) { out.ListMeta = in.ListMeta if in.Items != nil { in, out := &in.Items, &out.Items - *out = make([]core_v1.Secret, len(*in)) + *out = make([]corev1.Secret, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } @@ -375,7 +375,7 @@ func (in *ServiceAccountList) DeepCopyInto(out *ServiceAccountList) { out.ListMeta = in.ListMeta if in.Items != nil { in, out := &in.Items, &out.Items - *out = make([]core_v1.ServiceAccount, len(*in)) + *out = make([]corev1.ServiceAccount, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } @@ -408,7 +408,7 @@ func (in *ServiceList) DeepCopyInto(out *ServiceList) { out.ListMeta = in.ListMeta if in.Items != nil { in, out := &in.Items, &out.Items - *out = make([]core_v1.Service, len(*in)) + *out = make([]corev1.Service, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } diff --git a/apis/core/v1/zz_generated_k8s_client.go b/apis/core/v1/zz_generated_k8s_client.go index 43d8ab58..6c8aa26a 100644 --- a/apis/core/v1/zz_generated_k8s_client.go +++ b/apis/core/v1/zz_generated_k8s_client.go @@ -11,6 +11,8 @@ import ( "k8s.io/client-go/rest" ) +type contextKeyType struct{} + type Interface interface { RESTClient() rest.Interface controller.Starter @@ -50,6 +52,19 @@ type Client struct { resourceQuotaControllers map[string]ResourceQuotaController } +func Factory(ctx context.Context, config rest.Config) (context.Context, controller.Starter, error) { + c, err := NewForConfig(config) + if err != nil { + return ctx, nil, err + } + + return context.WithValue(ctx, contextKeyType{}, c), c, nil +} + +func From(ctx context.Context) Interface { + return ctx.Value(contextKeyType{}).(Interface) +} + func NewForConfig(config rest.Config) (Interface, error) { if config.NegotiatedSerializer == nil { config.NegotiatedSerializer = dynamic.NegotiatedSerializer diff --git a/apis/extensions/v1beta1/zz_generated_deepcopy.go b/apis/extensions/v1beta1/zz_generated_deepcopy.go index fb47897d..dd26dde6 100644 --- a/apis/extensions/v1beta1/zz_generated_deepcopy.go +++ b/apis/extensions/v1beta1/zz_generated_deepcopy.go @@ -1,7 +1,7 @@ package v1beta1 import ( - extensions_v1beta1 "k8s.io/api/extensions/v1beta1" + extensionsv1beta1 "k8s.io/api/extensions/v1beta1" runtime "k8s.io/apimachinery/pkg/runtime" ) @@ -12,7 +12,7 @@ func (in *IngressList) DeepCopyInto(out *IngressList) { out.ListMeta = in.ListMeta if in.Items != nil { in, out := &in.Items, &out.Items - *out = make([]extensions_v1beta1.Ingress, len(*in)) + *out = make([]extensionsv1beta1.Ingress, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } @@ -45,7 +45,7 @@ func (in *PodSecurityPolicyList) DeepCopyInto(out *PodSecurityPolicyList) { out.ListMeta = in.ListMeta if in.Items != nil { in, out := &in.Items, &out.Items - *out = make([]extensions_v1beta1.PodSecurityPolicy, len(*in)) + *out = make([]extensionsv1beta1.PodSecurityPolicy, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } diff --git a/apis/extensions/v1beta1/zz_generated_k8s_client.go b/apis/extensions/v1beta1/zz_generated_k8s_client.go index c8f93a1b..02121b62 100644 --- a/apis/extensions/v1beta1/zz_generated_k8s_client.go +++ b/apis/extensions/v1beta1/zz_generated_k8s_client.go @@ -11,6 +11,8 @@ import ( "k8s.io/client-go/rest" ) +type contextKeyType struct{} + type Interface interface { RESTClient() rest.Interface controller.Starter @@ -28,6 +30,19 @@ type Client struct { ingressControllers map[string]IngressController } +func Factory(ctx context.Context, config rest.Config) (context.Context, controller.Starter, error) { + c, err := NewForConfig(config) + if err != nil { + return ctx, nil, err + } + + return context.WithValue(ctx, contextKeyType{}, c), c, nil +} + +func From(ctx context.Context) Interface { + return ctx.Value(contextKeyType{}).(Interface) +} + func NewForConfig(config rest.Config) (Interface, error) { if config.NegotiatedSerializer == nil { config.NegotiatedSerializer = dynamic.NegotiatedSerializer diff --git a/apis/management.cattle.io/v3/zz_generated_deepcopy.go b/apis/management.cattle.io/v3/zz_generated_deepcopy.go index a5574f40..6a06c42c 100644 --- a/apis/management.cattle.io/v3/zz_generated_deepcopy.go +++ b/apis/management.cattle.io/v3/zz_generated_deepcopy.go @@ -2,7 +2,7 @@ package v3 import ( v1 "k8s.io/api/core/v1" - rbac_v1 "k8s.io/api/rbac/v1" + rbacv1 "k8s.io/api/rbac/v1" runtime "k8s.io/apimachinery/pkg/runtime" version "k8s.io/apimachinery/pkg/version" ) @@ -77,12 +77,8 @@ func (in *ActiveDirectoryConfig) DeepCopyInto(out *ActiveDirectoryConfig) { } if in.NestedGroupMembershipEnabled != nil { in, out := &in.NestedGroupMembershipEnabled, &out.NestedGroupMembershipEnabled - if *in == nil { - *out = nil - } else { - *out = new(bool) - **out = **in - } + *out = new(bool) + **out = **in } return } @@ -190,12 +186,8 @@ func (in *AmazonElasticContainerServiceConfig) DeepCopyInto(out *AmazonElasticCo } if in.AssociateWorkerNodePublicIP != nil { in, out := &in.AssociateWorkerNodePublicIP, &out.AssociateWorkerNodePublicIP - if *in == nil { - *out = nil - } else { - *out = new(bool) - **out = **in - } + *out = new(bool) + **out = **in } return } @@ -686,39 +678,23 @@ func (in *CloudProvider) DeepCopyInto(out *CloudProvider) { *out = *in if in.AWSCloudProvider != nil { in, out := &in.AWSCloudProvider, &out.AWSCloudProvider - if *in == nil { - *out = nil - } else { - *out = new(AWSCloudProvider) - **out = **in - } + *out = new(AWSCloudProvider) + **out = **in } if in.AzureCloudProvider != nil { in, out := &in.AzureCloudProvider, &out.AzureCloudProvider - if *in == nil { - *out = nil - } else { - *out = new(AzureCloudProvider) - **out = **in - } + *out = new(AzureCloudProvider) + **out = **in } if in.OpenstackCloudProvider != nil { in, out := &in.OpenstackCloudProvider, &out.OpenstackCloudProvider - if *in == nil { - *out = nil - } else { - *out = new(OpenstackCloudProvider) - **out = **in - } + *out = new(OpenstackCloudProvider) + **out = **in } if in.VsphereCloudProvider != nil { in, out := &in.VsphereCloudProvider, &out.VsphereCloudProvider - if *in == nil { - *out = nil - } else { - *out = new(VsphereCloudProvider) - (*in).DeepCopyInto(*out) - } + *out = new(VsphereCloudProvider) + (*in).DeepCopyInto(*out) } return } @@ -829,30 +805,18 @@ func (in *ClusterAlertSpec) DeepCopyInto(out *ClusterAlertSpec) { in.AlertCommonSpec.DeepCopyInto(&out.AlertCommonSpec) if in.TargetNode != nil { in, out := &in.TargetNode, &out.TargetNode - if *in == nil { - *out = nil - } else { - *out = new(TargetNode) - (*in).DeepCopyInto(*out) - } + *out = new(TargetNode) + (*in).DeepCopyInto(*out) } if in.TargetSystemService != nil { in, out := &in.TargetSystemService, &out.TargetSystemService - if *in == nil { - *out = nil - } else { - *out = new(TargetSystemService) - **out = **in - } + *out = new(TargetSystemService) + **out = **in } if in.TargetEvent != nil { in, out := &in.TargetEvent, &out.TargetEvent - if *in == nil { - *out = nil - } else { - *out = new(TargetEvent) - **out = **in - } + *out = new(TargetEvent) + **out = **in } return } @@ -1145,12 +1109,8 @@ func (in *ClusterLoggingStatus) DeepCopyInto(out *ClusterLoggingStatus) { in.AppliedSpec.DeepCopyInto(&out.AppliedSpec) if in.FailedSpec != nil { in, out := &in.FailedSpec, &out.FailedSpec - if *in == nil { - *out = nil - } else { - *out = new(ClusterLoggingSpec) - (*in).DeepCopyInto(*out) - } + *out = new(ClusterLoggingSpec) + (*in).DeepCopyInto(*out) } return } @@ -1324,57 +1284,33 @@ func (in *ClusterSpec) DeepCopyInto(out *ClusterSpec) { *out = *in if in.ImportedConfig != nil { in, out := &in.ImportedConfig, &out.ImportedConfig - if *in == nil { - *out = nil - } else { - *out = new(ImportedConfig) - **out = **in - } + *out = new(ImportedConfig) + **out = **in } if in.GoogleKubernetesEngineConfig != nil { in, out := &in.GoogleKubernetesEngineConfig, &out.GoogleKubernetesEngineConfig - if *in == nil { - *out = nil - } else { - *out = new(GoogleKubernetesEngineConfig) - (*in).DeepCopyInto(*out) - } + *out = new(GoogleKubernetesEngineConfig) + (*in).DeepCopyInto(*out) } if in.AzureKubernetesServiceConfig != nil { in, out := &in.AzureKubernetesServiceConfig, &out.AzureKubernetesServiceConfig - if *in == nil { - *out = nil - } else { - *out = new(AzureKubernetesServiceConfig) - (*in).DeepCopyInto(*out) - } + *out = new(AzureKubernetesServiceConfig) + (*in).DeepCopyInto(*out) } if in.RancherKubernetesEngineConfig != nil { in, out := &in.RancherKubernetesEngineConfig, &out.RancherKubernetesEngineConfig - if *in == nil { - *out = nil - } else { - *out = new(RancherKubernetesEngineConfig) - (*in).DeepCopyInto(*out) - } + *out = new(RancherKubernetesEngineConfig) + (*in).DeepCopyInto(*out) } if in.AmazonElasticContainerServiceConfig != nil { in, out := &in.AmazonElasticContainerServiceConfig, &out.AmazonElasticContainerServiceConfig - if *in == nil { - *out = nil - } else { - *out = new(AmazonElasticContainerServiceConfig) - (*in).DeepCopyInto(*out) - } + *out = new(AmazonElasticContainerServiceConfig) + (*in).DeepCopyInto(*out) } if in.EnableNetworkPolicy != nil { in, out := &in.EnableNetworkPolicy, &out.EnableNetworkPolicy - if *in == nil { - *out = nil - } else { - *out = new(bool) - **out = **in - } + *out = new(bool) + **out = **in } return } @@ -1421,12 +1357,8 @@ func (in *ClusterStatus) DeepCopyInto(out *ClusterStatus) { in.AppliedSpec.DeepCopyInto(&out.AppliedSpec) if in.FailedSpec != nil { in, out := &in.FailedSpec, &out.FailedSpec - if *in == nil { - *out = nil - } else { - *out = new(ClusterSpec) - (*in).DeepCopyInto(*out) - } + *out = new(ClusterSpec) + (*in).DeepCopyInto(*out) } if in.Requested != nil { in, out := &in.Requested, &out.Requested @@ -1444,12 +1376,8 @@ func (in *ClusterStatus) DeepCopyInto(out *ClusterStatus) { } if in.Version != nil { in, out := &in.Version, &out.Version - if *in == nil { - *out = nil - } else { - *out = new(version.Info) - **out = **in - } + *out = new(version.Info) + **out = **in } in.Capabilities.DeepCopyInto(&out.Capabilities) return @@ -2113,7 +2041,7 @@ func (in *GlobalRole) DeepCopyInto(out *GlobalRole) { in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) if in.Rules != nil { in, out := &in.Rules, &out.Rules - *out = make([]rbac_v1.PolicyRule, len(*in)) + *out = make([]rbacv1.PolicyRule, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } @@ -2259,30 +2187,18 @@ func (in *GoogleKubernetesEngineConfig) DeepCopyInto(out *GoogleKubernetesEngine } if in.EnableHTTPLoadBalancing != nil { in, out := &in.EnableHTTPLoadBalancing, &out.EnableHTTPLoadBalancing - if *in == nil { - *out = nil - } else { - *out = new(bool) - **out = **in - } + *out = new(bool) + **out = **in } if in.EnableHorizontalPodAutoscaling != nil { in, out := &in.EnableHorizontalPodAutoscaling, &out.EnableHorizontalPodAutoscaling - if *in == nil { - *out = nil - } else { - *out = new(bool) - **out = **in - } + *out = new(bool) + **out = **in } if in.EnableNetworkPolicyConfig != nil { in, out := &in.EnableNetworkPolicyConfig, &out.EnableNetworkPolicyConfig - if *in == nil { - *out = nil - } else { - *out = new(bool) - **out = **in - } + *out = new(bool) + **out = **in } if in.Locations != nil { in, out := &in.Locations, &out.Locations @@ -2291,21 +2207,13 @@ func (in *GoogleKubernetesEngineConfig) DeepCopyInto(out *GoogleKubernetesEngine } if in.EnableStackdriverLogging != nil { in, out := &in.EnableStackdriverLogging, &out.EnableStackdriverLogging - if *in == nil { - *out = nil - } else { - *out = new(bool) - **out = **in - } + *out = new(bool) + **out = **in } if in.EnableStackdriverMonitoring != nil { in, out := &in.EnableStackdriverMonitoring, &out.EnableStackdriverMonitoring - if *in == nil { - *out = nil - } else { - *out = new(bool) - **out = **in - } + *out = new(bool) + **out = **in } return } @@ -2995,48 +2903,28 @@ func (in *LoggingCommonSpec) DeepCopyInto(out *LoggingCommonSpec) { } if in.ElasticsearchConfig != nil { in, out := &in.ElasticsearchConfig, &out.ElasticsearchConfig - if *in == nil { - *out = nil - } else { - *out = new(ElasticsearchConfig) - **out = **in - } + *out = new(ElasticsearchConfig) + **out = **in } if in.SplunkConfig != nil { in, out := &in.SplunkConfig, &out.SplunkConfig - if *in == nil { - *out = nil - } else { - *out = new(SplunkConfig) - **out = **in - } + *out = new(SplunkConfig) + **out = **in } if in.KafkaConfig != nil { in, out := &in.KafkaConfig, &out.KafkaConfig - if *in == nil { - *out = nil - } else { - *out = new(KafkaConfig) - (*in).DeepCopyInto(*out) - } + *out = new(KafkaConfig) + (*in).DeepCopyInto(*out) } if in.SyslogConfig != nil { in, out := &in.SyslogConfig, &out.SyslogConfig - if *in == nil { - *out = nil - } else { - *out = new(SyslogConfig) - **out = **in - } + *out = new(SyslogConfig) + **out = **in } if in.FluentForwarderConfig != nil { in, out := &in.FluentForwarderConfig, &out.FluentForwarderConfig - if *in == nil { - *out = nil - } else { - *out = new(FluentForwarderConfig) - (*in).DeepCopyInto(*out) - } + *out = new(FluentForwarderConfig) + (*in).DeepCopyInto(*out) } return } @@ -3151,30 +3039,18 @@ func (in *NetworkConfig) DeepCopyInto(out *NetworkConfig) { } if in.CalicoNetworkProvider != nil { in, out := &in.CalicoNetworkProvider, &out.CalicoNetworkProvider - if *in == nil { - *out = nil - } else { - *out = new(CalicoNetworkProvider) - **out = **in - } + *out = new(CalicoNetworkProvider) + **out = **in } if in.CanalNetworkProvider != nil { in, out := &in.CanalNetworkProvider, &out.CanalNetworkProvider - if *in == nil { - *out = nil - } else { - *out = new(CanalNetworkProvider) - **out = **in - } + *out = new(CanalNetworkProvider) + **out = **in } if in.FlannelNetworkProvider != nil { in, out := &in.FlannelNetworkProvider, &out.FlannelNetworkProvider - if *in == nil { - *out = nil - } else { - *out = new(FlannelNetworkProvider) - **out = **in - } + *out = new(FlannelNetworkProvider) + **out = **in } return } @@ -3567,12 +3443,8 @@ func (in *NodeSpec) DeepCopyInto(out *NodeSpec) { *out = *in if in.CustomConfig != nil { in, out := &in.CustomConfig, &out.CustomConfig - if *in == nil { - *out = nil - } else { - *out = new(CustomConfig) - (*in).DeepCopyInto(*out) - } + *out = new(CustomConfig) + (*in).DeepCopyInto(*out) } in.InternalNodeSpec.DeepCopyInto(&out.InternalNodeSpec) if in.DesiredNodeLabels != nil { @@ -3591,12 +3463,8 @@ func (in *NodeSpec) DeepCopyInto(out *NodeSpec) { } if in.NodeDrainInput != nil { in, out := &in.NodeDrainInput, &out.NodeDrainInput - if *in == nil { - *out = nil - } else { - *out = new(NodeDrainInput) - **out = **in - } + *out = new(NodeDrainInput) + **out = **in } return } @@ -3636,21 +3504,13 @@ func (in *NodeStatus) DeepCopyInto(out *NodeStatus) { } if in.NodeTemplateSpec != nil { in, out := &in.NodeTemplateSpec, &out.NodeTemplateSpec - if *in == nil { - *out = nil - } else { - *out = new(NodeTemplateSpec) - (*in).DeepCopyInto(*out) - } + *out = new(NodeTemplateSpec) + (*in).DeepCopyInto(*out) } if in.NodeConfig != nil { in, out := &in.NodeConfig, &out.NodeConfig - if *in == nil { - *out = nil - } else { - *out = new(RKEConfigNode) - (*in).DeepCopyInto(*out) - } + *out = new(RKEConfigNode) + (*in).DeepCopyInto(*out) } if in.NodeAnnotations != nil { in, out := &in.NodeAnnotations, &out.NodeAnnotations @@ -3675,12 +3535,8 @@ func (in *NodeStatus) DeepCopyInto(out *NodeStatus) { } if in.DockerInfo != nil { in, out := &in.DockerInfo, &out.DockerInfo - if *in == nil { - *out = nil - } else { - *out = new(DockerInfo) - (*in).DeepCopyInto(*out) - } + *out = new(DockerInfo) + (*in).DeepCopyInto(*out) } return } @@ -3816,39 +3672,23 @@ func (in *Notification) DeepCopyInto(out *Notification) { *out = *in if in.SMTPConfig != nil { in, out := &in.SMTPConfig, &out.SMTPConfig - if *in == nil { - *out = nil - } else { - *out = new(SMTPConfig) - **out = **in - } + *out = new(SMTPConfig) + **out = **in } if in.SlackConfig != nil { in, out := &in.SlackConfig, &out.SlackConfig - if *in == nil { - *out = nil - } else { - *out = new(SlackConfig) - **out = **in - } + *out = new(SlackConfig) + **out = **in } if in.PagerdutyConfig != nil { in, out := &in.PagerdutyConfig, &out.PagerdutyConfig - if *in == nil { - *out = nil - } else { - *out = new(PagerdutyConfig) - **out = **in - } + *out = new(PagerdutyConfig) + **out = **in } if in.WebhookConfig != nil { in, out := &in.WebhookConfig, &out.WebhookConfig - if *in == nil { - *out = nil - } else { - *out = new(WebhookConfig) - **out = **in - } + *out = new(WebhookConfig) + **out = **in } return } @@ -3930,39 +3770,23 @@ func (in *NotifierSpec) DeepCopyInto(out *NotifierSpec) { *out = *in if in.SMTPConfig != nil { in, out := &in.SMTPConfig, &out.SMTPConfig - if *in == nil { - *out = nil - } else { - *out = new(SMTPConfig) - **out = **in - } + *out = new(SMTPConfig) + **out = **in } if in.SlackConfig != nil { in, out := &in.SlackConfig, &out.SlackConfig - if *in == nil { - *out = nil - } else { - *out = new(SlackConfig) - **out = **in - } + *out = new(SlackConfig) + **out = **in } if in.PagerdutyConfig != nil { in, out := &in.PagerdutyConfig, &out.PagerdutyConfig - if *in == nil { - *out = nil - } else { - *out = new(PagerdutyConfig) - **out = **in - } + *out = new(PagerdutyConfig) + **out = **in } if in.WebhookConfig != nil { in, out := &in.WebhookConfig, &out.WebhookConfig - if *in == nil { - *out = nil - } else { - *out = new(WebhookConfig) - **out = **in - } + *out = new(WebhookConfig) + **out = **in } return } @@ -4557,21 +4381,13 @@ func (in *ProjectAlertSpec) DeepCopyInto(out *ProjectAlertSpec) { in.AlertCommonSpec.DeepCopyInto(&out.AlertCommonSpec) if in.TargetWorkload != nil { in, out := &in.TargetWorkload, &out.TargetWorkload - if *in == nil { - *out = nil - } else { - *out = new(TargetWorkload) - (*in).DeepCopyInto(*out) - } + *out = new(TargetWorkload) + (*in).DeepCopyInto(*out) } if in.TargetPod != nil { in, out := &in.TargetPod, &out.TargetPod - if *in == nil { - *out = nil - } else { - *out = new(TargetPod) - **out = **in - } + *out = new(TargetPod) + **out = **in } return } @@ -4804,12 +4620,8 @@ func (in *ProjectNetworkPolicy) DeepCopyInto(out *ProjectNetworkPolicy) { out.Spec = in.Spec if in.Status != nil { in, out := &in.Status, &out.Status - if *in == nil { - *out = nil - } else { - *out = new(ProjectNetworkPolicyStatus) - **out = **in - } + *out = new(ProjectNetworkPolicyStatus) + **out = **in } return } @@ -4980,21 +4792,13 @@ func (in *ProjectSpec) DeepCopyInto(out *ProjectSpec) { *out = *in if in.ResourceQuota != nil { in, out := &in.ResourceQuota, &out.ResourceQuota - if *in == nil { - *out = nil - } else { - *out = new(ProjectResourceQuota) - **out = **in - } + *out = new(ProjectResourceQuota) + **out = **in } if in.NamespaceDefaultResourceQuota != nil { in, out := &in.NamespaceDefaultResourceQuota, &out.NamespaceDefaultResourceQuota - if *in == nil { - *out = nil - } else { - *out = new(NamespaceResourceQuota) - **out = **in - } + *out = new(NamespaceResourceQuota) + **out = **in } return } @@ -5296,7 +5100,7 @@ func (in *RoleTemplate) DeepCopyInto(out *RoleTemplate) { in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) if in.Rules != nil { in, out := &in.Rules, &out.Rules - *out = make([]rbac_v1.PolicyRule, len(*in)) + *out = make([]rbacv1.PolicyRule, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } @@ -6102,12 +5906,8 @@ func (in *User) DeepCopyInto(out *User) { } if in.Enabled != nil { in, out := &in.Enabled, &out.Enabled - if *in == nil { - *out = nil - } else { - *out = new(bool) - **out = **in - } + *out = new(bool) + **out = **in } out.Spec = in.Spec in.Status.DeepCopyInto(&out.Status) diff --git a/apis/management.cattle.io/v3/zz_generated_k8s_client.go b/apis/management.cattle.io/v3/zz_generated_k8s_client.go index 18931cfe..c3f2737a 100644 --- a/apis/management.cattle.io/v3/zz_generated_k8s_client.go +++ b/apis/management.cattle.io/v3/zz_generated_k8s_client.go @@ -11,6 +11,8 @@ import ( "k8s.io/client-go/rest" ) +type contextKeyType struct{} + type Interface interface { RESTClient() rest.Interface controller.Starter @@ -104,6 +106,19 @@ type Client struct { clusterCatalogControllers map[string]ClusterCatalogController } +func Factory(ctx context.Context, config rest.Config) (context.Context, controller.Starter, error) { + c, err := NewForConfig(config) + if err != nil { + return ctx, nil, err + } + + return context.WithValue(ctx, contextKeyType{}, c), c, nil +} + +func From(ctx context.Context) Interface { + return ctx.Value(contextKeyType{}).(Interface) +} + func NewForConfig(config rest.Config) (Interface, error) { if config.NegotiatedSerializer == nil { config.NegotiatedSerializer = dynamic.NegotiatedSerializer diff --git a/apis/management.cattle.io/v3public/zz_generated_k8s_client.go b/apis/management.cattle.io/v3public/zz_generated_k8s_client.go index 870f38e6..dabf1411 100644 --- a/apis/management.cattle.io/v3public/zz_generated_k8s_client.go +++ b/apis/management.cattle.io/v3public/zz_generated_k8s_client.go @@ -11,6 +11,8 @@ import ( "k8s.io/client-go/rest" ) +type contextKeyType struct{} + type Interface interface { RESTClient() rest.Interface controller.Starter @@ -26,6 +28,19 @@ type Client struct { authProviderControllers map[string]AuthProviderController } +func Factory(ctx context.Context, config rest.Config) (context.Context, controller.Starter, error) { + c, err := NewForConfig(config) + if err != nil { + return ctx, nil, err + } + + return context.WithValue(ctx, contextKeyType{}, c), c, nil +} + +func From(ctx context.Context) Interface { + return ctx.Value(contextKeyType{}).(Interface) +} + func NewForConfig(config rest.Config) (Interface, error) { if config.NegotiatedSerializer == nil { config.NegotiatedSerializer = dynamic.NegotiatedSerializer diff --git a/apis/networking.k8s.io/v1/zz_generated_deepcopy.go b/apis/networking.k8s.io/v1/zz_generated_deepcopy.go index 3d67128e..79199c22 100644 --- a/apis/networking.k8s.io/v1/zz_generated_deepcopy.go +++ b/apis/networking.k8s.io/v1/zz_generated_deepcopy.go @@ -1,7 +1,7 @@ package v1 import ( - networking_v1 "k8s.io/api/networking/v1" + networkingv1 "k8s.io/api/networking/v1" runtime "k8s.io/apimachinery/pkg/runtime" ) @@ -12,7 +12,7 @@ func (in *NetworkPolicyList) DeepCopyInto(out *NetworkPolicyList) { out.ListMeta = in.ListMeta if in.Items != nil { in, out := &in.Items, &out.Items - *out = make([]networking_v1.NetworkPolicy, len(*in)) + *out = make([]networkingv1.NetworkPolicy, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } diff --git a/apis/networking.k8s.io/v1/zz_generated_k8s_client.go b/apis/networking.k8s.io/v1/zz_generated_k8s_client.go index 403ec4c4..de98ec6d 100644 --- a/apis/networking.k8s.io/v1/zz_generated_k8s_client.go +++ b/apis/networking.k8s.io/v1/zz_generated_k8s_client.go @@ -11,6 +11,8 @@ import ( "k8s.io/client-go/rest" ) +type contextKeyType struct{} + type Interface interface { RESTClient() rest.Interface controller.Starter @@ -26,6 +28,19 @@ type Client struct { networkPolicyControllers map[string]NetworkPolicyController } +func Factory(ctx context.Context, config rest.Config) (context.Context, controller.Starter, error) { + c, err := NewForConfig(config) + if err != nil { + return ctx, nil, err + } + + return context.WithValue(ctx, contextKeyType{}, c), c, nil +} + +func From(ctx context.Context) Interface { + return ctx.Value(contextKeyType{}).(Interface) +} + func NewForConfig(config rest.Config) (Interface, error) { if config.NegotiatedSerializer == nil { config.NegotiatedSerializer = dynamic.NegotiatedSerializer diff --git a/apis/project.cattle.io/v3/zz_generated_deepcopy.go b/apis/project.cattle.io/v3/zz_generated_deepcopy.go index 9c491cfc..7059f6e7 100644 --- a/apis/project.cattle.io/v3/zz_generated_deepcopy.go +++ b/apis/project.cattle.io/v3/zz_generated_deepcopy.go @@ -468,21 +468,13 @@ func (in *Constraints) DeepCopyInto(out *Constraints) { *out = *in if in.Branch != nil { in, out := &in.Branch, &out.Branch - if *in == nil { - *out = nil - } else { - *out = new(Constraint) - (*in).DeepCopyInto(*out) - } + *out = new(Constraint) + (*in).DeepCopyInto(*out) } if in.Event != nil { in, out := &in.Event, &out.Event - if *in == nil { - *out = nil - } else { - *out = new(Constraint) - (*in).DeepCopyInto(*out) - } + *out = new(Constraint) + (*in).DeepCopyInto(*out) } return } @@ -1162,12 +1154,8 @@ func (in *PipelineConfig) DeepCopyInto(out *PipelineConfig) { } if in.Branch != nil { in, out := &in.Branch, &out.Branch - if *in == nil { - *out = nil - } else { - *out = new(Constraint) - (*in).DeepCopyInto(*out) - } + *out = new(Constraint) + (*in).DeepCopyInto(*out) } return } @@ -1403,12 +1391,8 @@ func (in *PipelineStatus) DeepCopyInto(out *PipelineStatus) { *out = *in if in.SourceCodeCredential != nil { in, out := &in.SourceCodeCredential, &out.SourceCodeCredential - if *in == nil { - *out = nil - } else { - *out = new(SourceCodeCredential) - (*in).DeepCopyInto(*out) - } + *out = new(SourceCodeCredential) + (*in).DeepCopyInto(*out) } return } @@ -2035,12 +2019,8 @@ func (in *Stage) DeepCopyInto(out *Stage) { } if in.When != nil { in, out := &in.When, &out.When - if *in == nil { - *out = nil - } else { - *out = new(Constraints) - (*in).DeepCopyInto(*out) - } + *out = new(Constraints) + (*in).DeepCopyInto(*out) } return } @@ -2081,39 +2061,23 @@ func (in *Step) DeepCopyInto(out *Step) { *out = *in if in.SourceCodeConfig != nil { in, out := &in.SourceCodeConfig, &out.SourceCodeConfig - if *in == nil { - *out = nil - } else { - *out = new(SourceCodeConfig) - **out = **in - } + *out = new(SourceCodeConfig) + **out = **in } if in.RunScriptConfig != nil { in, out := &in.RunScriptConfig, &out.RunScriptConfig - if *in == nil { - *out = nil - } else { - *out = new(RunScriptConfig) - **out = **in - } + *out = new(RunScriptConfig) + **out = **in } if in.PublishImageConfig != nil { in, out := &in.PublishImageConfig, &out.PublishImageConfig - if *in == nil { - *out = nil - } else { - *out = new(PublishImageConfig) - **out = **in - } + *out = new(PublishImageConfig) + **out = **in } if in.ApplyYamlConfig != nil { in, out := &in.ApplyYamlConfig, &out.ApplyYamlConfig - if *in == nil { - *out = nil - } else { - *out = new(ApplyYamlConfig) - **out = **in - } + *out = new(ApplyYamlConfig) + **out = **in } if in.Env != nil { in, out := &in.Env, &out.Env @@ -2129,12 +2093,8 @@ func (in *Step) DeepCopyInto(out *Step) { } if in.When != nil { in, out := &in.When, &out.When - if *in == nil { - *out = nil - } else { - *out = new(Constraints) - (*in).DeepCopyInto(*out) - } + *out = new(Constraints) + (*in).DeepCopyInto(*out) } return } diff --git a/apis/project.cattle.io/v3/zz_generated_k8s_client.go b/apis/project.cattle.io/v3/zz_generated_k8s_client.go index 62f857aa..53b53662 100644 --- a/apis/project.cattle.io/v3/zz_generated_k8s_client.go +++ b/apis/project.cattle.io/v3/zz_generated_k8s_client.go @@ -11,6 +11,8 @@ import ( "k8s.io/client-go/rest" ) +type contextKeyType struct{} + type Interface interface { RESTClient() rest.Interface controller.Starter @@ -64,6 +66,19 @@ type Client struct { sourceCodeRepositoryControllers map[string]SourceCodeRepositoryController } +func Factory(ctx context.Context, config rest.Config) (context.Context, controller.Starter, error) { + c, err := NewForConfig(config) + if err != nil { + return ctx, nil, err + } + + return context.WithValue(ctx, contextKeyType{}, c), c, nil +} + +func From(ctx context.Context) Interface { + return ctx.Value(contextKeyType{}).(Interface) +} + func NewForConfig(config rest.Config) (Interface, error) { if config.NegotiatedSerializer == nil { config.NegotiatedSerializer = dynamic.NegotiatedSerializer diff --git a/apis/rbac.authorization.k8s.io/v1/zz_generated_deepcopy.go b/apis/rbac.authorization.k8s.io/v1/zz_generated_deepcopy.go index a08fab35..4decfb4a 100644 --- a/apis/rbac.authorization.k8s.io/v1/zz_generated_deepcopy.go +++ b/apis/rbac.authorization.k8s.io/v1/zz_generated_deepcopy.go @@ -1,7 +1,7 @@ package v1 import ( - rbac_v1 "k8s.io/api/rbac/v1" + rbacv1 "k8s.io/api/rbac/v1" runtime "k8s.io/apimachinery/pkg/runtime" ) @@ -12,7 +12,7 @@ func (in *ClusterRoleBindingList) DeepCopyInto(out *ClusterRoleBindingList) { out.ListMeta = in.ListMeta if in.Items != nil { in, out := &in.Items, &out.Items - *out = make([]rbac_v1.ClusterRoleBinding, len(*in)) + *out = make([]rbacv1.ClusterRoleBinding, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } @@ -45,7 +45,7 @@ func (in *ClusterRoleList) DeepCopyInto(out *ClusterRoleList) { out.ListMeta = in.ListMeta if in.Items != nil { in, out := &in.Items, &out.Items - *out = make([]rbac_v1.ClusterRole, len(*in)) + *out = make([]rbacv1.ClusterRole, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } @@ -78,7 +78,7 @@ func (in *RoleBindingList) DeepCopyInto(out *RoleBindingList) { out.ListMeta = in.ListMeta if in.Items != nil { in, out := &in.Items, &out.Items - *out = make([]rbac_v1.RoleBinding, len(*in)) + *out = make([]rbacv1.RoleBinding, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } @@ -111,7 +111,7 @@ func (in *RoleList) DeepCopyInto(out *RoleList) { out.ListMeta = in.ListMeta if in.Items != nil { in, out := &in.Items, &out.Items - *out = make([]rbac_v1.Role, len(*in)) + *out = make([]rbacv1.Role, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } diff --git a/apis/rbac.authorization.k8s.io/v1/zz_generated_k8s_client.go b/apis/rbac.authorization.k8s.io/v1/zz_generated_k8s_client.go index ce7e25f7..ff5e7464 100644 --- a/apis/rbac.authorization.k8s.io/v1/zz_generated_k8s_client.go +++ b/apis/rbac.authorization.k8s.io/v1/zz_generated_k8s_client.go @@ -11,6 +11,8 @@ import ( "k8s.io/client-go/rest" ) +type contextKeyType struct{} + type Interface interface { RESTClient() rest.Interface controller.Starter @@ -32,6 +34,19 @@ type Client struct { roleControllers map[string]RoleController } +func Factory(ctx context.Context, config rest.Config) (context.Context, controller.Starter, error) { + c, err := NewForConfig(config) + if err != nil { + return ctx, nil, err + } + + return context.WithValue(ctx, contextKeyType{}, c), c, nil +} + +func From(ctx context.Context) Interface { + return ctx.Value(contextKeyType{}).(Interface) +} + func NewForConfig(config rest.Config) (Interface, error) { if config.NegotiatedSerializer == nil { config.NegotiatedSerializer = dynamic.NegotiatedSerializer diff --git a/client/cluster/v3/zz_generated_cinder_persistent_volume_source.go b/client/cluster/v3/zz_generated_cinder_persistent_volume_source.go new file mode 100644 index 00000000..e36020dc --- /dev/null +++ b/client/cluster/v3/zz_generated_cinder_persistent_volume_source.go @@ -0,0 +1,16 @@ +package client + +const ( + CinderPersistentVolumeSourceType = "cinderPersistentVolumeSource" + CinderPersistentVolumeSourceFieldFSType = "fsType" + CinderPersistentVolumeSourceFieldReadOnly = "readOnly" + CinderPersistentVolumeSourceFieldSecretRef = "secretRef" + CinderPersistentVolumeSourceFieldVolumeID = "volumeID" +) + +type CinderPersistentVolumeSource struct { + FSType string `json:"fsType,omitempty" yaml:"fsType,omitempty"` + ReadOnly bool `json:"readOnly,omitempty" yaml:"readOnly,omitempty"` + SecretRef *SecretReference `json:"secretRef,omitempty" yaml:"secretRef,omitempty"` + VolumeID string `json:"volumeID,omitempty" yaml:"volumeID,omitempty"` +} diff --git a/client/cluster/v3/zz_generated_cinder_volume_source.go b/client/cluster/v3/zz_generated_cinder_volume_source.go deleted file mode 100644 index e880ab57..00000000 --- a/client/cluster/v3/zz_generated_cinder_volume_source.go +++ /dev/null @@ -1,14 +0,0 @@ -package client - -const ( - CinderVolumeSourceType = "cinderVolumeSource" - CinderVolumeSourceFieldFSType = "fsType" - CinderVolumeSourceFieldReadOnly = "readOnly" - CinderVolumeSourceFieldVolumeID = "volumeID" -) - -type CinderVolumeSource struct { - FSType string `json:"fsType,omitempty" yaml:"fsType,omitempty"` - ReadOnly bool `json:"readOnly,omitempty" yaml:"readOnly,omitempty"` - VolumeID string `json:"volumeID,omitempty" yaml:"volumeID,omitempty"` -} diff --git a/client/cluster/v3/zz_generated_local_volume_source.go b/client/cluster/v3/zz_generated_local_volume_source.go index 83eaec3b..6dea350b 100644 --- a/client/cluster/v3/zz_generated_local_volume_source.go +++ b/client/cluster/v3/zz_generated_local_volume_source.go @@ -1,10 +1,12 @@ package client const ( - LocalVolumeSourceType = "localVolumeSource" - LocalVolumeSourceFieldPath = "path" + LocalVolumeSourceType = "localVolumeSource" + LocalVolumeSourceFieldFSType = "fsType" + LocalVolumeSourceFieldPath = "path" ) type LocalVolumeSource struct { - Path string `json:"path,omitempty" yaml:"path,omitempty"` + FSType string `json:"fsType,omitempty" yaml:"fsType,omitempty"` + Path string `json:"path,omitempty" yaml:"path,omitempty"` } diff --git a/client/cluster/v3/zz_generated_node_selector_term.go b/client/cluster/v3/zz_generated_node_selector_term.go index 648cf8e3..11bf2562 100644 --- a/client/cluster/v3/zz_generated_node_selector_term.go +++ b/client/cluster/v3/zz_generated_node_selector_term.go @@ -3,8 +3,10 @@ package client const ( NodeSelectorTermType = "nodeSelectorTerm" NodeSelectorTermFieldMatchExpressions = "matchExpressions" + NodeSelectorTermFieldMatchFields = "matchFields" ) type NodeSelectorTerm struct { MatchExpressions []NodeSelectorRequirement `json:"matchExpressions,omitempty" yaml:"matchExpressions,omitempty"` + MatchFields []NodeSelectorRequirement `json:"matchFields,omitempty" yaml:"matchFields,omitempty"` } diff --git a/client/cluster/v3/zz_generated_persistent_volume.go b/client/cluster/v3/zz_generated_persistent_volume.go index 12e8759f..613bc3ab 100644 --- a/client/cluster/v3/zz_generated_persistent_volume.go +++ b/client/cluster/v3/zz_generated_persistent_volume.go @@ -61,7 +61,7 @@ type PersistentVolume struct { CSI *CSIPersistentVolumeSource `json:"csi,omitempty" yaml:"csi,omitempty"` Capacity map[string]string `json:"capacity,omitempty" yaml:"capacity,omitempty"` CephFS *CephFSPersistentVolumeSource `json:"cephfs,omitempty" yaml:"cephfs,omitempty"` - Cinder *CinderVolumeSource `json:"cinder,omitempty" yaml:"cinder,omitempty"` + Cinder *CinderPersistentVolumeSource `json:"cinder,omitempty" yaml:"cinder,omitempty"` ClaimRef *ObjectReference `json:"claimRef,omitempty" yaml:"claimRef,omitempty"` Created string `json:"created,omitempty" yaml:"created,omitempty"` CreatorID string `json:"creatorId,omitempty" yaml:"creatorId,omitempty"` diff --git a/client/cluster/v3/zz_generated_persistent_volume_spec.go b/client/cluster/v3/zz_generated_persistent_volume_spec.go index 54643bf6..a1e6529a 100644 --- a/client/cluster/v3/zz_generated_persistent_volume_spec.go +++ b/client/cluster/v3/zz_generated_persistent_volume_spec.go @@ -42,7 +42,7 @@ type PersistentVolumeSpec struct { CSI *CSIPersistentVolumeSource `json:"csi,omitempty" yaml:"csi,omitempty"` Capacity map[string]string `json:"capacity,omitempty" yaml:"capacity,omitempty"` CephFS *CephFSPersistentVolumeSource `json:"cephfs,omitempty" yaml:"cephfs,omitempty"` - Cinder *CinderVolumeSource `json:"cinder,omitempty" yaml:"cinder,omitempty"` + Cinder *CinderPersistentVolumeSource `json:"cinder,omitempty" yaml:"cinder,omitempty"` ClaimRef *ObjectReference `json:"claimRef,omitempty" yaml:"claimRef,omitempty"` FC *FCVolumeSource `json:"fc,omitempty" yaml:"fc,omitempty"` FlexVolume *FlexPersistentVolumeSource `json:"flexVolume,omitempty" yaml:"flexVolume,omitempty"` diff --git a/client/cluster/v3/zz_generated_storage_class.go b/client/cluster/v3/zz_generated_storage_class.go index ef4fe934..9bc894e0 100644 --- a/client/cluster/v3/zz_generated_storage_class.go +++ b/client/cluster/v3/zz_generated_storage_class.go @@ -7,6 +7,7 @@ import ( const ( StorageClassType = "storageClass" StorageClassFieldAllowVolumeExpansion = "allowVolumeExpansion" + StorageClassFieldAllowedTopologies = "allowedTopologies" StorageClassFieldAnnotations = "annotations" StorageClassFieldCreated = "created" StorageClassFieldCreatorID = "creatorId" @@ -25,21 +26,22 @@ const ( type StorageClass struct { types.Resource - AllowVolumeExpansion *bool `json:"allowVolumeExpansion,omitempty" yaml:"allowVolumeExpansion,omitempty"` - Annotations map[string]string `json:"annotations,omitempty" yaml:"annotations,omitempty"` - Created string `json:"created,omitempty" yaml:"created,omitempty"` - CreatorID string `json:"creatorId,omitempty" yaml:"creatorId,omitempty"` - Description string `json:"description,omitempty" yaml:"description,omitempty"` - Labels map[string]string `json:"labels,omitempty" yaml:"labels,omitempty"` - MountOptions []string `json:"mountOptions,omitempty" yaml:"mountOptions,omitempty"` - Name string `json:"name,omitempty" yaml:"name,omitempty"` - OwnerReferences []OwnerReference `json:"ownerReferences,omitempty" yaml:"ownerReferences,omitempty"` - Parameters map[string]string `json:"parameters,omitempty" yaml:"parameters,omitempty"` - Provisioner string `json:"provisioner,omitempty" yaml:"provisioner,omitempty"` - ReclaimPolicy string `json:"reclaimPolicy,omitempty" yaml:"reclaimPolicy,omitempty"` - Removed string `json:"removed,omitempty" yaml:"removed,omitempty"` - UUID string `json:"uuid,omitempty" yaml:"uuid,omitempty"` - VolumeBindingMode string `json:"volumeBindingMode,omitempty" yaml:"volumeBindingMode,omitempty"` + AllowVolumeExpansion *bool `json:"allowVolumeExpansion,omitempty" yaml:"allowVolumeExpansion,omitempty"` + AllowedTopologies []TopologySelectorTerm `json:"allowedTopologies,omitempty" yaml:"allowedTopologies,omitempty"` + Annotations map[string]string `json:"annotations,omitempty" yaml:"annotations,omitempty"` + Created string `json:"created,omitempty" yaml:"created,omitempty"` + CreatorID string `json:"creatorId,omitempty" yaml:"creatorId,omitempty"` + Description string `json:"description,omitempty" yaml:"description,omitempty"` + Labels map[string]string `json:"labels,omitempty" yaml:"labels,omitempty"` + MountOptions []string `json:"mountOptions,omitempty" yaml:"mountOptions,omitempty"` + Name string `json:"name,omitempty" yaml:"name,omitempty"` + OwnerReferences []OwnerReference `json:"ownerReferences,omitempty" yaml:"ownerReferences,omitempty"` + Parameters map[string]string `json:"parameters,omitempty" yaml:"parameters,omitempty"` + Provisioner string `json:"provisioner,omitempty" yaml:"provisioner,omitempty"` + ReclaimPolicy string `json:"reclaimPolicy,omitempty" yaml:"reclaimPolicy,omitempty"` + Removed string `json:"removed,omitempty" yaml:"removed,omitempty"` + UUID string `json:"uuid,omitempty" yaml:"uuid,omitempty"` + VolumeBindingMode string `json:"volumeBindingMode,omitempty" yaml:"volumeBindingMode,omitempty"` } type StorageClassCollection struct { diff --git a/client/cluster/v3/zz_generated_topology_selector_label_requirement.go b/client/cluster/v3/zz_generated_topology_selector_label_requirement.go new file mode 100644 index 00000000..2d63b43a --- /dev/null +++ b/client/cluster/v3/zz_generated_topology_selector_label_requirement.go @@ -0,0 +1,12 @@ +package client + +const ( + TopologySelectorLabelRequirementType = "topologySelectorLabelRequirement" + TopologySelectorLabelRequirementFieldKey = "key" + TopologySelectorLabelRequirementFieldValues = "values" +) + +type TopologySelectorLabelRequirement struct { + Key string `json:"key,omitempty" yaml:"key,omitempty"` + Values []string `json:"values,omitempty" yaml:"values,omitempty"` +} diff --git a/client/cluster/v3/zz_generated_topology_selector_term.go b/client/cluster/v3/zz_generated_topology_selector_term.go new file mode 100644 index 00000000..6a78c4b1 --- /dev/null +++ b/client/cluster/v3/zz_generated_topology_selector_term.go @@ -0,0 +1,10 @@ +package client + +const ( + TopologySelectorTermType = "topologySelectorTerm" + TopologySelectorTermFieldMatchLabelExpressions = "matchLabelExpressions" +) + +type TopologySelectorTerm struct { + MatchLabelExpressions []TopologySelectorLabelRequirement `json:"matchLabelExpressions,omitempty" yaml:"matchLabelExpressions,omitempty"` +} diff --git a/client/management/v3/zz_generated_allowed_host_path.go b/client/management/v3/zz_generated_allowed_host_path.go index 228374dc..3249f332 100644 --- a/client/management/v3/zz_generated_allowed_host_path.go +++ b/client/management/v3/zz_generated_allowed_host_path.go @@ -3,8 +3,10 @@ package client const ( AllowedHostPathType = "allowedHostPath" AllowedHostPathFieldPathPrefix = "pathPrefix" + AllowedHostPathFieldReadOnly = "readOnly" ) type AllowedHostPath struct { PathPrefix string `json:"pathPrefix,omitempty" yaml:"pathPrefix,omitempty"` + ReadOnly bool `json:"readOnly,omitempty" yaml:"readOnly,omitempty"` } diff --git a/client/management/v3/zz_generated_config_map_node_config_source.go b/client/management/v3/zz_generated_config_map_node_config_source.go new file mode 100644 index 00000000..1f6d0900 --- /dev/null +++ b/client/management/v3/zz_generated_config_map_node_config_source.go @@ -0,0 +1,18 @@ +package client + +const ( + ConfigMapNodeConfigSourceType = "configMapNodeConfigSource" + ConfigMapNodeConfigSourceFieldKubeletConfigKey = "kubeletConfigKey" + ConfigMapNodeConfigSourceFieldName = "name" + ConfigMapNodeConfigSourceFieldNamespace = "namespace" + ConfigMapNodeConfigSourceFieldResourceVersion = "resourceVersion" + ConfigMapNodeConfigSourceFieldUID = "uid" +) + +type ConfigMapNodeConfigSource struct { + KubeletConfigKey string `json:"kubeletConfigKey,omitempty" yaml:"kubeletConfigKey,omitempty"` + Name string `json:"name,omitempty" yaml:"name,omitempty"` + Namespace string `json:"namespace,omitempty" yaml:"namespace,omitempty"` + ResourceVersion string `json:"resourceVersion,omitempty" yaml:"resourceVersion,omitempty"` + UID string `json:"uid,omitempty" yaml:"uid,omitempty"` +} diff --git a/client/management/v3/zz_generated_internal_node_status.go b/client/management/v3/zz_generated_internal_node_status.go index 2302154d..963eefb2 100644 --- a/client/management/v3/zz_generated_internal_node_status.go +++ b/client/management/v3/zz_generated_internal_node_status.go @@ -4,6 +4,7 @@ const ( InternalNodeStatusType = "internalNodeStatus" InternalNodeStatusFieldAllocatable = "allocatable" InternalNodeStatusFieldCapacity = "capacity" + InternalNodeStatusFieldConfig = "config" InternalNodeStatusFieldExternalIPAddress = "externalIpAddress" InternalNodeStatusFieldHostname = "hostname" InternalNodeStatusFieldIPAddress = "ipAddress" @@ -16,6 +17,7 @@ const ( type InternalNodeStatus struct { Allocatable map[string]string `json:"allocatable,omitempty" yaml:"allocatable,omitempty"` Capacity map[string]string `json:"capacity,omitempty" yaml:"capacity,omitempty"` + Config *NodeConfigStatus `json:"config,omitempty" yaml:"config,omitempty"` ExternalIPAddress string `json:"externalIpAddress,omitempty" yaml:"externalIpAddress,omitempty"` Hostname string `json:"hostname,omitempty" yaml:"hostname,omitempty"` IPAddress string `json:"ipAddress,omitempty" yaml:"ipAddress,omitempty"` diff --git a/client/management/v3/zz_generated_node.go b/client/management/v3/zz_generated_node.go index 6511bd04..e82bc18c 100644 --- a/client/management/v3/zz_generated_node.go +++ b/client/management/v3/zz_generated_node.go @@ -11,6 +11,7 @@ const ( NodeFieldCapacity = "capacity" NodeFieldClusterID = "clusterId" NodeFieldConditions = "conditions" + NodeFieldConfig = "config" NodeFieldControlPlane = "controlPlane" NodeFieldCreated = "created" NodeFieldCreatorID = "creatorId" @@ -57,6 +58,7 @@ type Node struct { Capacity map[string]string `json:"capacity,omitempty" yaml:"capacity,omitempty"` ClusterID string `json:"clusterId,omitempty" yaml:"clusterId,omitempty"` Conditions []NodeCondition `json:"conditions,omitempty" yaml:"conditions,omitempty"` + Config *NodeConfigStatus `json:"config,omitempty" yaml:"config,omitempty"` ControlPlane bool `json:"controlPlane,omitempty" yaml:"controlPlane,omitempty"` Created string `json:"created,omitempty" yaml:"created,omitempty"` CreatorID string `json:"creatorId,omitempty" yaml:"creatorId,omitempty"` diff --git a/client/management/v3/zz_generated_node_config_source.go b/client/management/v3/zz_generated_node_config_source.go index 6d064f7d..fc84f349 100644 --- a/client/management/v3/zz_generated_node_config_source.go +++ b/client/management/v3/zz_generated_node_config_source.go @@ -1,14 +1,10 @@ package client const ( - NodeConfigSourceType = "nodeConfigSource" - NodeConfigSourceFieldAPIVersion = "apiVersion" - NodeConfigSourceFieldConfigMapRef = "configMapRef" - NodeConfigSourceFieldKind = "kind" + NodeConfigSourceType = "nodeConfigSource" + NodeConfigSourceFieldConfigMap = "configMap" ) type NodeConfigSource struct { - APIVersion string `json:"apiVersion,omitempty" yaml:"apiVersion,omitempty"` - ConfigMapRef *ObjectReference `json:"configMapRef,omitempty" yaml:"configMapRef,omitempty"` - Kind string `json:"kind,omitempty" yaml:"kind,omitempty"` + ConfigMap *ConfigMapNodeConfigSource `json:"configMap,omitempty" yaml:"configMap,omitempty"` } diff --git a/client/management/v3/zz_generated_node_config_status.go b/client/management/v3/zz_generated_node_config_status.go new file mode 100644 index 00000000..919108f3 --- /dev/null +++ b/client/management/v3/zz_generated_node_config_status.go @@ -0,0 +1,16 @@ +package client + +const ( + NodeConfigStatusType = "nodeConfigStatus" + NodeConfigStatusFieldActive = "active" + NodeConfigStatusFieldAssigned = "assigned" + NodeConfigStatusFieldError = "error" + NodeConfigStatusFieldLastKnownGood = "lastKnownGood" +) + +type NodeConfigStatus struct { + Active *NodeConfigSource `json:"active,omitempty" yaml:"active,omitempty"` + Assigned *NodeConfigSource `json:"assigned,omitempty" yaml:"assigned,omitempty"` + Error string `json:"error,omitempty" yaml:"error,omitempty"` + LastKnownGood *NodeConfigSource `json:"lastKnownGood,omitempty" yaml:"lastKnownGood,omitempty"` +} diff --git a/client/management/v3/zz_generated_node_status.go b/client/management/v3/zz_generated_node_status.go index 3fbc7da0..7af2df5b 100644 --- a/client/management/v3/zz_generated_node_status.go +++ b/client/management/v3/zz_generated_node_status.go @@ -5,6 +5,7 @@ const ( NodeStatusFieldAllocatable = "allocatable" NodeStatusFieldCapacity = "capacity" NodeStatusFieldConditions = "conditions" + NodeStatusFieldConfig = "config" NodeStatusFieldDockerInfo = "dockerInfo" NodeStatusFieldExternalIPAddress = "externalIpAddress" NodeStatusFieldHostname = "hostname" @@ -25,6 +26,7 @@ type NodeStatus struct { Allocatable map[string]string `json:"allocatable,omitempty" yaml:"allocatable,omitempty"` Capacity map[string]string `json:"capacity,omitempty" yaml:"capacity,omitempty"` Conditions []NodeCondition `json:"conditions,omitempty" yaml:"conditions,omitempty"` + Config *NodeConfigStatus `json:"config,omitempty" yaml:"config,omitempty"` DockerInfo *DockerInfo `json:"dockerInfo,omitempty" yaml:"dockerInfo,omitempty"` ExternalIPAddress string `json:"externalIpAddress,omitempty" yaml:"externalIpAddress,omitempty"` Hostname string `json:"hostname,omitempty" yaml:"hostname,omitempty"` diff --git a/client/management/v3/zz_generated_pod_security_policy_spec.go b/client/management/v3/zz_generated_pod_security_policy_spec.go index 22d0a80b..84e91da9 100644 --- a/client/management/v3/zz_generated_pod_security_policy_spec.go +++ b/client/management/v3/zz_generated_pod_security_policy_spec.go @@ -6,9 +6,12 @@ const ( PodSecurityPolicySpecFieldAllowedCapabilities = "allowedCapabilities" PodSecurityPolicySpecFieldAllowedFlexVolumes = "allowedFlexVolumes" PodSecurityPolicySpecFieldAllowedHostPaths = "allowedHostPaths" + PodSecurityPolicySpecFieldAllowedProcMountTypes = "allowedProcMountTypes" + PodSecurityPolicySpecFieldAllowedUnsafeSysctls = "allowedUnsafeSysctls" PodSecurityPolicySpecFieldDefaultAddCapabilities = "defaultAddCapabilities" PodSecurityPolicySpecFieldDefaultAllowPrivilegeEscalation = "defaultAllowPrivilegeEscalation" PodSecurityPolicySpecFieldFSGroup = "fsGroup" + PodSecurityPolicySpecFieldForbiddenSysctls = "forbiddenSysctls" PodSecurityPolicySpecFieldHostIPC = "hostIPC" PodSecurityPolicySpecFieldHostNetwork = "hostNetwork" PodSecurityPolicySpecFieldHostPID = "hostPID" @@ -27,9 +30,12 @@ type PodSecurityPolicySpec struct { AllowedCapabilities []string `json:"allowedCapabilities,omitempty" yaml:"allowedCapabilities,omitempty"` AllowedFlexVolumes []AllowedFlexVolume `json:"allowedFlexVolumes,omitempty" yaml:"allowedFlexVolumes,omitempty"` AllowedHostPaths []AllowedHostPath `json:"allowedHostPaths,omitempty" yaml:"allowedHostPaths,omitempty"` + AllowedProcMountTypes []string `json:"allowedProcMountTypes,omitempty" yaml:"allowedProcMountTypes,omitempty"` + AllowedUnsafeSysctls []string `json:"allowedUnsafeSysctls,omitempty" yaml:"allowedUnsafeSysctls,omitempty"` DefaultAddCapabilities []string `json:"defaultAddCapabilities,omitempty" yaml:"defaultAddCapabilities,omitempty"` DefaultAllowPrivilegeEscalation *bool `json:"defaultAllowPrivilegeEscalation,omitempty" yaml:"defaultAllowPrivilegeEscalation,omitempty"` FSGroup *FSGroupStrategyOptions `json:"fsGroup,omitempty" yaml:"fsGroup,omitempty"` + ForbiddenSysctls []string `json:"forbiddenSysctls,omitempty" yaml:"forbiddenSysctls,omitempty"` HostIPC bool `json:"hostIPC,omitempty" yaml:"hostIPC,omitempty"` HostNetwork bool `json:"hostNetwork,omitempty" yaml:"hostNetwork,omitempty"` HostPID bool `json:"hostPID,omitempty" yaml:"hostPID,omitempty"` diff --git a/client/management/v3/zz_generated_pod_security_policy_template.go b/client/management/v3/zz_generated_pod_security_policy_template.go index a2a77b66..5904f9f3 100644 --- a/client/management/v3/zz_generated_pod_security_policy_template.go +++ b/client/management/v3/zz_generated_pod_security_policy_template.go @@ -10,6 +10,8 @@ const ( PodSecurityPolicyTemplateFieldAllowedCapabilities = "allowedCapabilities" PodSecurityPolicyTemplateFieldAllowedFlexVolumes = "allowedFlexVolumes" PodSecurityPolicyTemplateFieldAllowedHostPaths = "allowedHostPaths" + PodSecurityPolicyTemplateFieldAllowedProcMountTypes = "allowedProcMountTypes" + PodSecurityPolicyTemplateFieldAllowedUnsafeSysctls = "allowedUnsafeSysctls" PodSecurityPolicyTemplateFieldAnnotations = "annotations" PodSecurityPolicyTemplateFieldCreated = "created" PodSecurityPolicyTemplateFieldCreatorID = "creatorId" @@ -17,6 +19,7 @@ const ( PodSecurityPolicyTemplateFieldDefaultAllowPrivilegeEscalation = "defaultAllowPrivilegeEscalation" PodSecurityPolicyTemplateFieldDescription = "description" PodSecurityPolicyTemplateFieldFSGroup = "fsGroup" + PodSecurityPolicyTemplateFieldForbiddenSysctls = "forbiddenSysctls" PodSecurityPolicyTemplateFieldHostIPC = "hostIPC" PodSecurityPolicyTemplateFieldHostNetwork = "hostNetwork" PodSecurityPolicyTemplateFieldHostPID = "hostPID" @@ -41,6 +44,8 @@ type PodSecurityPolicyTemplate struct { AllowedCapabilities []string `json:"allowedCapabilities,omitempty" yaml:"allowedCapabilities,omitempty"` AllowedFlexVolumes []AllowedFlexVolume `json:"allowedFlexVolumes,omitempty" yaml:"allowedFlexVolumes,omitempty"` AllowedHostPaths []AllowedHostPath `json:"allowedHostPaths,omitempty" yaml:"allowedHostPaths,omitempty"` + AllowedProcMountTypes []string `json:"allowedProcMountTypes,omitempty" yaml:"allowedProcMountTypes,omitempty"` + AllowedUnsafeSysctls []string `json:"allowedUnsafeSysctls,omitempty" yaml:"allowedUnsafeSysctls,omitempty"` Annotations map[string]string `json:"annotations,omitempty" yaml:"annotations,omitempty"` Created string `json:"created,omitempty" yaml:"created,omitempty"` CreatorID string `json:"creatorId,omitempty" yaml:"creatorId,omitempty"` @@ -48,6 +53,7 @@ type PodSecurityPolicyTemplate struct { DefaultAllowPrivilegeEscalation *bool `json:"defaultAllowPrivilegeEscalation,omitempty" yaml:"defaultAllowPrivilegeEscalation,omitempty"` Description string `json:"description,omitempty" yaml:"description,omitempty"` FSGroup *FSGroupStrategyOptions `json:"fsGroup,omitempty" yaml:"fsGroup,omitempty"` + ForbiddenSysctls []string `json:"forbiddenSysctls,omitempty" yaml:"forbiddenSysctls,omitempty"` HostIPC bool `json:"hostIPC,omitempty" yaml:"hostIPC,omitempty"` HostNetwork bool `json:"hostNetwork,omitempty" yaml:"hostNetwork,omitempty"` HostPID bool `json:"hostPID,omitempty" yaml:"hostPID,omitempty"` diff --git a/client/project/v3/zz_generated_cinder_persistent_volume_source.go b/client/project/v3/zz_generated_cinder_persistent_volume_source.go new file mode 100644 index 00000000..e36020dc --- /dev/null +++ b/client/project/v3/zz_generated_cinder_persistent_volume_source.go @@ -0,0 +1,16 @@ +package client + +const ( + CinderPersistentVolumeSourceType = "cinderPersistentVolumeSource" + CinderPersistentVolumeSourceFieldFSType = "fsType" + CinderPersistentVolumeSourceFieldReadOnly = "readOnly" + CinderPersistentVolumeSourceFieldSecretRef = "secretRef" + CinderPersistentVolumeSourceFieldVolumeID = "volumeID" +) + +type CinderPersistentVolumeSource struct { + FSType string `json:"fsType,omitempty" yaml:"fsType,omitempty"` + ReadOnly bool `json:"readOnly,omitempty" yaml:"readOnly,omitempty"` + SecretRef *SecretReference `json:"secretRef,omitempty" yaml:"secretRef,omitempty"` + VolumeID string `json:"volumeID,omitempty" yaml:"volumeID,omitempty"` +} diff --git a/client/project/v3/zz_generated_cinder_volume_source.go b/client/project/v3/zz_generated_cinder_volume_source.go index e880ab57..ac0a6211 100644 --- a/client/project/v3/zz_generated_cinder_volume_source.go +++ b/client/project/v3/zz_generated_cinder_volume_source.go @@ -1,14 +1,16 @@ package client const ( - CinderVolumeSourceType = "cinderVolumeSource" - CinderVolumeSourceFieldFSType = "fsType" - CinderVolumeSourceFieldReadOnly = "readOnly" - CinderVolumeSourceFieldVolumeID = "volumeID" + CinderVolumeSourceType = "cinderVolumeSource" + CinderVolumeSourceFieldFSType = "fsType" + CinderVolumeSourceFieldReadOnly = "readOnly" + CinderVolumeSourceFieldSecretRef = "secretRef" + CinderVolumeSourceFieldVolumeID = "volumeID" ) type CinderVolumeSource struct { - FSType string `json:"fsType,omitempty" yaml:"fsType,omitempty"` - ReadOnly bool `json:"readOnly,omitempty" yaml:"readOnly,omitempty"` - VolumeID string `json:"volumeID,omitempty" yaml:"volumeID,omitempty"` + FSType string `json:"fsType,omitempty" yaml:"fsType,omitempty"` + ReadOnly bool `json:"readOnly,omitempty" yaml:"readOnly,omitempty"` + SecretRef *LocalObjectReference `json:"secretRef,omitempty" yaml:"secretRef,omitempty"` + VolumeID string `json:"volumeID,omitempty" yaml:"volumeID,omitempty"` } diff --git a/client/project/v3/zz_generated_container.go b/client/project/v3/zz_generated_container.go index 4e76cc77..18f41d13 100644 --- a/client/project/v3/zz_generated_container.go +++ b/client/project/v3/zz_generated_container.go @@ -19,6 +19,7 @@ const ( ContainerFieldPostStart = "postStart" ContainerFieldPreStop = "preStop" ContainerFieldPrivileged = "privileged" + ContainerFieldProcMount = "procMount" ContainerFieldReadOnly = "readOnly" ContainerFieldReadinessProbe = "readinessProbe" ContainerFieldResources = "resources" @@ -57,6 +58,7 @@ type Container struct { PostStart *Handler `json:"postStart,omitempty" yaml:"postStart,omitempty"` PreStop *Handler `json:"preStop,omitempty" yaml:"preStop,omitempty"` Privileged *bool `json:"privileged,omitempty" yaml:"privileged,omitempty"` + ProcMount string `json:"procMount,omitempty" yaml:"procMount,omitempty"` ReadOnly *bool `json:"readOnly,omitempty" yaml:"readOnly,omitempty"` ReadinessProbe *Probe `json:"readinessProbe,omitempty" yaml:"readinessProbe,omitempty"` Resources *ResourceRequirements `json:"resources,omitempty" yaml:"resources,omitempty"` diff --git a/client/project/v3/zz_generated_cron_job.go b/client/project/v3/zz_generated_cron_job.go index b1619fd2..3dcb2aa7 100644 --- a/client/project/v3/zz_generated_cron_job.go +++ b/client/project/v3/zz_generated_cron_job.go @@ -33,10 +33,12 @@ const ( CronJobFieldPriorityClassName = "priorityClassName" CronJobFieldProjectID = "projectId" CronJobFieldPublicEndpoints = "publicEndpoints" + CronJobFieldReadinessGates = "readinessGates" CronJobFieldRemoved = "removed" CronJobFieldRestartPolicy = "restartPolicy" CronJobFieldRunAsGroup = "runAsGroup" CronJobFieldRunAsNonRoot = "runAsNonRoot" + CronJobFieldRuntimeClassName = "runtimeClassName" CronJobFieldSchedulerName = "schedulerName" CronJobFieldScheduling = "scheduling" CronJobFieldSelector = "selector" @@ -44,6 +46,8 @@ const ( CronJobFieldShareProcessNamespace = "shareProcessNamespace" CronJobFieldState = "state" CronJobFieldSubdomain = "subdomain" + CronJobFieldSysctls = "sysctls" + CronJobFieldTTLSecondsAfterFinished = "ttlSecondsAfterFinished" CronJobFieldTerminationGracePeriodSeconds = "terminationGracePeriodSeconds" CronJobFieldTransitioning = "transitioning" CronJobFieldTransitioningMessage = "transitioningMessage" @@ -83,10 +87,12 @@ type CronJob struct { PriorityClassName string `json:"priorityClassName,omitempty" yaml:"priorityClassName,omitempty"` ProjectID string `json:"projectId,omitempty" yaml:"projectId,omitempty"` PublicEndpoints []PublicEndpoint `json:"publicEndpoints,omitempty" yaml:"publicEndpoints,omitempty"` + ReadinessGates []PodReadinessGate `json:"readinessGates,omitempty" yaml:"readinessGates,omitempty"` Removed string `json:"removed,omitempty" yaml:"removed,omitempty"` RestartPolicy string `json:"restartPolicy,omitempty" yaml:"restartPolicy,omitempty"` RunAsGroup *int64 `json:"runAsGroup,omitempty" yaml:"runAsGroup,omitempty"` RunAsNonRoot *bool `json:"runAsNonRoot,omitempty" yaml:"runAsNonRoot,omitempty"` + RuntimeClassName string `json:"runtimeClassName,omitempty" yaml:"runtimeClassName,omitempty"` SchedulerName string `json:"schedulerName,omitempty" yaml:"schedulerName,omitempty"` Scheduling *Scheduling `json:"scheduling,omitempty" yaml:"scheduling,omitempty"` Selector *LabelSelector `json:"selector,omitempty" yaml:"selector,omitempty"` @@ -94,6 +100,8 @@ type CronJob struct { ShareProcessNamespace *bool `json:"shareProcessNamespace,omitempty" yaml:"shareProcessNamespace,omitempty"` State string `json:"state,omitempty" yaml:"state,omitempty"` Subdomain string `json:"subdomain,omitempty" yaml:"subdomain,omitempty"` + Sysctls []Sysctl `json:"sysctls,omitempty" yaml:"sysctls,omitempty"` + TTLSecondsAfterFinished *int64 `json:"ttlSecondsAfterFinished,omitempty" yaml:"ttlSecondsAfterFinished,omitempty"` TerminationGracePeriodSeconds *int64 `json:"terminationGracePeriodSeconds,omitempty" yaml:"terminationGracePeriodSeconds,omitempty"` Transitioning string `json:"transitioning,omitempty" yaml:"transitioning,omitempty"` TransitioningMessage string `json:"transitioningMessage,omitempty" yaml:"transitioningMessage,omitempty"` diff --git a/client/project/v3/zz_generated_cron_job_spec.go b/client/project/v3/zz_generated_cron_job_spec.go index 6b239194..259e4709 100644 --- a/client/project/v3/zz_generated_cron_job_spec.go +++ b/client/project/v3/zz_generated_cron_job_spec.go @@ -20,15 +20,19 @@ const ( CronJobSpecFieldObjectMeta = "metadata" CronJobSpecFieldPriority = "priority" CronJobSpecFieldPriorityClassName = "priorityClassName" + CronJobSpecFieldReadinessGates = "readinessGates" CronJobSpecFieldRestartPolicy = "restartPolicy" CronJobSpecFieldRunAsGroup = "runAsGroup" CronJobSpecFieldRunAsNonRoot = "runAsNonRoot" + CronJobSpecFieldRuntimeClassName = "runtimeClassName" CronJobSpecFieldSchedulerName = "schedulerName" CronJobSpecFieldScheduling = "scheduling" CronJobSpecFieldSelector = "selector" CronJobSpecFieldServiceAccountName = "serviceAccountName" CronJobSpecFieldShareProcessNamespace = "shareProcessNamespace" CronJobSpecFieldSubdomain = "subdomain" + CronJobSpecFieldSysctls = "sysctls" + CronJobSpecFieldTTLSecondsAfterFinished = "ttlSecondsAfterFinished" CronJobSpecFieldTerminationGracePeriodSeconds = "terminationGracePeriodSeconds" CronJobSpecFieldUid = "uid" CronJobSpecFieldVolumes = "volumes" @@ -53,15 +57,19 @@ type CronJobSpec struct { ObjectMeta *ObjectMeta `json:"metadata,omitempty" yaml:"metadata,omitempty"` Priority *int64 `json:"priority,omitempty" yaml:"priority,omitempty"` PriorityClassName string `json:"priorityClassName,omitempty" yaml:"priorityClassName,omitempty"` + ReadinessGates []PodReadinessGate `json:"readinessGates,omitempty" yaml:"readinessGates,omitempty"` RestartPolicy string `json:"restartPolicy,omitempty" yaml:"restartPolicy,omitempty"` RunAsGroup *int64 `json:"runAsGroup,omitempty" yaml:"runAsGroup,omitempty"` RunAsNonRoot *bool `json:"runAsNonRoot,omitempty" yaml:"runAsNonRoot,omitempty"` + RuntimeClassName string `json:"runtimeClassName,omitempty" yaml:"runtimeClassName,omitempty"` SchedulerName string `json:"schedulerName,omitempty" yaml:"schedulerName,omitempty"` Scheduling *Scheduling `json:"scheduling,omitempty" yaml:"scheduling,omitempty"` Selector *LabelSelector `json:"selector,omitempty" yaml:"selector,omitempty"` ServiceAccountName string `json:"serviceAccountName,omitempty" yaml:"serviceAccountName,omitempty"` ShareProcessNamespace *bool `json:"shareProcessNamespace,omitempty" yaml:"shareProcessNamespace,omitempty"` Subdomain string `json:"subdomain,omitempty" yaml:"subdomain,omitempty"` + Sysctls []Sysctl `json:"sysctls,omitempty" yaml:"sysctls,omitempty"` + TTLSecondsAfterFinished *int64 `json:"ttlSecondsAfterFinished,omitempty" yaml:"ttlSecondsAfterFinished,omitempty"` TerminationGracePeriodSeconds *int64 `json:"terminationGracePeriodSeconds,omitempty" yaml:"terminationGracePeriodSeconds,omitempty"` Uid *int64 `json:"uid,omitempty" yaml:"uid,omitempty"` Volumes []Volume `json:"volumes,omitempty" yaml:"volumes,omitempty"` diff --git a/client/project/v3/zz_generated_daemon_set.go b/client/project/v3/zz_generated_daemon_set.go index 122b1c48..17a25c73 100644 --- a/client/project/v3/zz_generated_daemon_set.go +++ b/client/project/v3/zz_generated_daemon_set.go @@ -33,10 +33,12 @@ const ( DaemonSetFieldPriorityClassName = "priorityClassName" DaemonSetFieldProjectID = "projectId" DaemonSetFieldPublicEndpoints = "publicEndpoints" + DaemonSetFieldReadinessGates = "readinessGates" DaemonSetFieldRemoved = "removed" DaemonSetFieldRestartPolicy = "restartPolicy" DaemonSetFieldRunAsGroup = "runAsGroup" DaemonSetFieldRunAsNonRoot = "runAsNonRoot" + DaemonSetFieldRuntimeClassName = "runtimeClassName" DaemonSetFieldSchedulerName = "schedulerName" DaemonSetFieldScheduling = "scheduling" DaemonSetFieldSelector = "selector" @@ -44,6 +46,7 @@ const ( DaemonSetFieldShareProcessNamespace = "shareProcessNamespace" DaemonSetFieldState = "state" DaemonSetFieldSubdomain = "subdomain" + DaemonSetFieldSysctls = "sysctls" DaemonSetFieldTerminationGracePeriodSeconds = "terminationGracePeriodSeconds" DaemonSetFieldTransitioning = "transitioning" DaemonSetFieldTransitioningMessage = "transitioningMessage" @@ -83,10 +86,12 @@ type DaemonSet struct { PriorityClassName string `json:"priorityClassName,omitempty" yaml:"priorityClassName,omitempty"` ProjectID string `json:"projectId,omitempty" yaml:"projectId,omitempty"` PublicEndpoints []PublicEndpoint `json:"publicEndpoints,omitempty" yaml:"publicEndpoints,omitempty"` + ReadinessGates []PodReadinessGate `json:"readinessGates,omitempty" yaml:"readinessGates,omitempty"` Removed string `json:"removed,omitempty" yaml:"removed,omitempty"` RestartPolicy string `json:"restartPolicy,omitempty" yaml:"restartPolicy,omitempty"` RunAsGroup *int64 `json:"runAsGroup,omitempty" yaml:"runAsGroup,omitempty"` RunAsNonRoot *bool `json:"runAsNonRoot,omitempty" yaml:"runAsNonRoot,omitempty"` + RuntimeClassName string `json:"runtimeClassName,omitempty" yaml:"runtimeClassName,omitempty"` SchedulerName string `json:"schedulerName,omitempty" yaml:"schedulerName,omitempty"` Scheduling *Scheduling `json:"scheduling,omitempty" yaml:"scheduling,omitempty"` Selector *LabelSelector `json:"selector,omitempty" yaml:"selector,omitempty"` @@ -94,6 +99,7 @@ type DaemonSet struct { ShareProcessNamespace *bool `json:"shareProcessNamespace,omitempty" yaml:"shareProcessNamespace,omitempty"` State string `json:"state,omitempty" yaml:"state,omitempty"` Subdomain string `json:"subdomain,omitempty" yaml:"subdomain,omitempty"` + Sysctls []Sysctl `json:"sysctls,omitempty" yaml:"sysctls,omitempty"` TerminationGracePeriodSeconds *int64 `json:"terminationGracePeriodSeconds,omitempty" yaml:"terminationGracePeriodSeconds,omitempty"` Transitioning string `json:"transitioning,omitempty" yaml:"transitioning,omitempty"` TransitioningMessage string `json:"transitioningMessage,omitempty" yaml:"transitioningMessage,omitempty"` diff --git a/client/project/v3/zz_generated_daemon_set_spec.go b/client/project/v3/zz_generated_daemon_set_spec.go index 861ee689..5249e415 100644 --- a/client/project/v3/zz_generated_daemon_set_spec.go +++ b/client/project/v3/zz_generated_daemon_set_spec.go @@ -20,15 +20,18 @@ const ( DaemonSetSpecFieldObjectMeta = "metadata" DaemonSetSpecFieldPriority = "priority" DaemonSetSpecFieldPriorityClassName = "priorityClassName" + DaemonSetSpecFieldReadinessGates = "readinessGates" DaemonSetSpecFieldRestartPolicy = "restartPolicy" DaemonSetSpecFieldRunAsGroup = "runAsGroup" DaemonSetSpecFieldRunAsNonRoot = "runAsNonRoot" + DaemonSetSpecFieldRuntimeClassName = "runtimeClassName" DaemonSetSpecFieldSchedulerName = "schedulerName" DaemonSetSpecFieldScheduling = "scheduling" DaemonSetSpecFieldSelector = "selector" DaemonSetSpecFieldServiceAccountName = "serviceAccountName" DaemonSetSpecFieldShareProcessNamespace = "shareProcessNamespace" DaemonSetSpecFieldSubdomain = "subdomain" + DaemonSetSpecFieldSysctls = "sysctls" DaemonSetSpecFieldTerminationGracePeriodSeconds = "terminationGracePeriodSeconds" DaemonSetSpecFieldUid = "uid" DaemonSetSpecFieldVolumes = "volumes" @@ -53,15 +56,18 @@ type DaemonSetSpec struct { ObjectMeta *ObjectMeta `json:"metadata,omitempty" yaml:"metadata,omitempty"` Priority *int64 `json:"priority,omitempty" yaml:"priority,omitempty"` PriorityClassName string `json:"priorityClassName,omitempty" yaml:"priorityClassName,omitempty"` + ReadinessGates []PodReadinessGate `json:"readinessGates,omitempty" yaml:"readinessGates,omitempty"` RestartPolicy string `json:"restartPolicy,omitempty" yaml:"restartPolicy,omitempty"` RunAsGroup *int64 `json:"runAsGroup,omitempty" yaml:"runAsGroup,omitempty"` RunAsNonRoot *bool `json:"runAsNonRoot,omitempty" yaml:"runAsNonRoot,omitempty"` + RuntimeClassName string `json:"runtimeClassName,omitempty" yaml:"runtimeClassName,omitempty"` SchedulerName string `json:"schedulerName,omitempty" yaml:"schedulerName,omitempty"` Scheduling *Scheduling `json:"scheduling,omitempty" yaml:"scheduling,omitempty"` Selector *LabelSelector `json:"selector,omitempty" yaml:"selector,omitempty"` ServiceAccountName string `json:"serviceAccountName,omitempty" yaml:"serviceAccountName,omitempty"` ShareProcessNamespace *bool `json:"shareProcessNamespace,omitempty" yaml:"shareProcessNamespace,omitempty"` Subdomain string `json:"subdomain,omitempty" yaml:"subdomain,omitempty"` + Sysctls []Sysctl `json:"sysctls,omitempty" yaml:"sysctls,omitempty"` TerminationGracePeriodSeconds *int64 `json:"terminationGracePeriodSeconds,omitempty" yaml:"terminationGracePeriodSeconds,omitempty"` Uid *int64 `json:"uid,omitempty" yaml:"uid,omitempty"` Volumes []Volume `json:"volumes,omitempty" yaml:"volumes,omitempty"` diff --git a/client/project/v3/zz_generated_deployment.go b/client/project/v3/zz_generated_deployment.go index ceac49c7..63b8126b 100644 --- a/client/project/v3/zz_generated_deployment.go +++ b/client/project/v3/zz_generated_deployment.go @@ -34,10 +34,12 @@ const ( DeploymentFieldPriorityClassName = "priorityClassName" DeploymentFieldProjectID = "projectId" DeploymentFieldPublicEndpoints = "publicEndpoints" + DeploymentFieldReadinessGates = "readinessGates" DeploymentFieldRemoved = "removed" DeploymentFieldRestartPolicy = "restartPolicy" DeploymentFieldRunAsGroup = "runAsGroup" DeploymentFieldRunAsNonRoot = "runAsNonRoot" + DeploymentFieldRuntimeClassName = "runtimeClassName" DeploymentFieldScale = "scale" DeploymentFieldSchedulerName = "schedulerName" DeploymentFieldScheduling = "scheduling" @@ -46,6 +48,7 @@ const ( DeploymentFieldShareProcessNamespace = "shareProcessNamespace" DeploymentFieldState = "state" DeploymentFieldSubdomain = "subdomain" + DeploymentFieldSysctls = "sysctls" DeploymentFieldTerminationGracePeriodSeconds = "terminationGracePeriodSeconds" DeploymentFieldTransitioning = "transitioning" DeploymentFieldTransitioningMessage = "transitioningMessage" @@ -86,10 +89,12 @@ type Deployment struct { PriorityClassName string `json:"priorityClassName,omitempty" yaml:"priorityClassName,omitempty"` ProjectID string `json:"projectId,omitempty" yaml:"projectId,omitempty"` PublicEndpoints []PublicEndpoint `json:"publicEndpoints,omitempty" yaml:"publicEndpoints,omitempty"` + ReadinessGates []PodReadinessGate `json:"readinessGates,omitempty" yaml:"readinessGates,omitempty"` Removed string `json:"removed,omitempty" yaml:"removed,omitempty"` RestartPolicy string `json:"restartPolicy,omitempty" yaml:"restartPolicy,omitempty"` RunAsGroup *int64 `json:"runAsGroup,omitempty" yaml:"runAsGroup,omitempty"` RunAsNonRoot *bool `json:"runAsNonRoot,omitempty" yaml:"runAsNonRoot,omitempty"` + RuntimeClassName string `json:"runtimeClassName,omitempty" yaml:"runtimeClassName,omitempty"` Scale *int64 `json:"scale,omitempty" yaml:"scale,omitempty"` SchedulerName string `json:"schedulerName,omitempty" yaml:"schedulerName,omitempty"` Scheduling *Scheduling `json:"scheduling,omitempty" yaml:"scheduling,omitempty"` @@ -98,6 +103,7 @@ type Deployment struct { ShareProcessNamespace *bool `json:"shareProcessNamespace,omitempty" yaml:"shareProcessNamespace,omitempty"` State string `json:"state,omitempty" yaml:"state,omitempty"` Subdomain string `json:"subdomain,omitempty" yaml:"subdomain,omitempty"` + Sysctls []Sysctl `json:"sysctls,omitempty" yaml:"sysctls,omitempty"` TerminationGracePeriodSeconds *int64 `json:"terminationGracePeriodSeconds,omitempty" yaml:"terminationGracePeriodSeconds,omitempty"` Transitioning string `json:"transitioning,omitempty" yaml:"transitioning,omitempty"` TransitioningMessage string `json:"transitioningMessage,omitempty" yaml:"transitioningMessage,omitempty"` diff --git a/client/project/v3/zz_generated_deployment_spec.go b/client/project/v3/zz_generated_deployment_spec.go index 7ae41410..f15d5040 100644 --- a/client/project/v3/zz_generated_deployment_spec.go +++ b/client/project/v3/zz_generated_deployment_spec.go @@ -21,9 +21,11 @@ const ( DeploymentSpecFieldPaused = "paused" DeploymentSpecFieldPriority = "priority" DeploymentSpecFieldPriorityClassName = "priorityClassName" + DeploymentSpecFieldReadinessGates = "readinessGates" DeploymentSpecFieldRestartPolicy = "restartPolicy" DeploymentSpecFieldRunAsGroup = "runAsGroup" DeploymentSpecFieldRunAsNonRoot = "runAsNonRoot" + DeploymentSpecFieldRuntimeClassName = "runtimeClassName" DeploymentSpecFieldScale = "scale" DeploymentSpecFieldSchedulerName = "schedulerName" DeploymentSpecFieldScheduling = "scheduling" @@ -31,6 +33,7 @@ const ( DeploymentSpecFieldServiceAccountName = "serviceAccountName" DeploymentSpecFieldShareProcessNamespace = "shareProcessNamespace" DeploymentSpecFieldSubdomain = "subdomain" + DeploymentSpecFieldSysctls = "sysctls" DeploymentSpecFieldTerminationGracePeriodSeconds = "terminationGracePeriodSeconds" DeploymentSpecFieldUid = "uid" DeploymentSpecFieldVolumes = "volumes" @@ -56,9 +59,11 @@ type DeploymentSpec struct { Paused bool `json:"paused,omitempty" yaml:"paused,omitempty"` Priority *int64 `json:"priority,omitempty" yaml:"priority,omitempty"` PriorityClassName string `json:"priorityClassName,omitempty" yaml:"priorityClassName,omitempty"` + ReadinessGates []PodReadinessGate `json:"readinessGates,omitempty" yaml:"readinessGates,omitempty"` RestartPolicy string `json:"restartPolicy,omitempty" yaml:"restartPolicy,omitempty"` RunAsGroup *int64 `json:"runAsGroup,omitempty" yaml:"runAsGroup,omitempty"` RunAsNonRoot *bool `json:"runAsNonRoot,omitempty" yaml:"runAsNonRoot,omitempty"` + RuntimeClassName string `json:"runtimeClassName,omitempty" yaml:"runtimeClassName,omitempty"` Scale *int64 `json:"scale,omitempty" yaml:"scale,omitempty"` SchedulerName string `json:"schedulerName,omitempty" yaml:"schedulerName,omitempty"` Scheduling *Scheduling `json:"scheduling,omitempty" yaml:"scheduling,omitempty"` @@ -66,6 +71,7 @@ type DeploymentSpec struct { ServiceAccountName string `json:"serviceAccountName,omitempty" yaml:"serviceAccountName,omitempty"` ShareProcessNamespace *bool `json:"shareProcessNamespace,omitempty" yaml:"shareProcessNamespace,omitempty"` Subdomain string `json:"subdomain,omitempty" yaml:"subdomain,omitempty"` + Sysctls []Sysctl `json:"sysctls,omitempty" yaml:"sysctls,omitempty"` TerminationGracePeriodSeconds *int64 `json:"terminationGracePeriodSeconds,omitempty" yaml:"terminationGracePeriodSeconds,omitempty"` Uid *int64 `json:"uid,omitempty" yaml:"uid,omitempty"` Volumes []Volume `json:"volumes,omitempty" yaml:"volumes,omitempty"` diff --git a/client/project/v3/zz_generated_job.go b/client/project/v3/zz_generated_job.go index ffb2834c..e4cbae8b 100644 --- a/client/project/v3/zz_generated_job.go +++ b/client/project/v3/zz_generated_job.go @@ -33,10 +33,12 @@ const ( JobFieldPriorityClassName = "priorityClassName" JobFieldProjectID = "projectId" JobFieldPublicEndpoints = "publicEndpoints" + JobFieldReadinessGates = "readinessGates" JobFieldRemoved = "removed" JobFieldRestartPolicy = "restartPolicy" JobFieldRunAsGroup = "runAsGroup" JobFieldRunAsNonRoot = "runAsNonRoot" + JobFieldRuntimeClassName = "runtimeClassName" JobFieldSchedulerName = "schedulerName" JobFieldScheduling = "scheduling" JobFieldSelector = "selector" @@ -44,6 +46,8 @@ const ( JobFieldShareProcessNamespace = "shareProcessNamespace" JobFieldState = "state" JobFieldSubdomain = "subdomain" + JobFieldSysctls = "sysctls" + JobFieldTTLSecondsAfterFinished = "ttlSecondsAfterFinished" JobFieldTerminationGracePeriodSeconds = "terminationGracePeriodSeconds" JobFieldTransitioning = "transitioning" JobFieldTransitioningMessage = "transitioningMessage" @@ -83,10 +87,12 @@ type Job struct { PriorityClassName string `json:"priorityClassName,omitempty" yaml:"priorityClassName,omitempty"` ProjectID string `json:"projectId,omitempty" yaml:"projectId,omitempty"` PublicEndpoints []PublicEndpoint `json:"publicEndpoints,omitempty" yaml:"publicEndpoints,omitempty"` + ReadinessGates []PodReadinessGate `json:"readinessGates,omitempty" yaml:"readinessGates,omitempty"` Removed string `json:"removed,omitempty" yaml:"removed,omitempty"` RestartPolicy string `json:"restartPolicy,omitempty" yaml:"restartPolicy,omitempty"` RunAsGroup *int64 `json:"runAsGroup,omitempty" yaml:"runAsGroup,omitempty"` RunAsNonRoot *bool `json:"runAsNonRoot,omitempty" yaml:"runAsNonRoot,omitempty"` + RuntimeClassName string `json:"runtimeClassName,omitempty" yaml:"runtimeClassName,omitempty"` SchedulerName string `json:"schedulerName,omitempty" yaml:"schedulerName,omitempty"` Scheduling *Scheduling `json:"scheduling,omitempty" yaml:"scheduling,omitempty"` Selector *LabelSelector `json:"selector,omitempty" yaml:"selector,omitempty"` @@ -94,6 +100,8 @@ type Job struct { ShareProcessNamespace *bool `json:"shareProcessNamespace,omitempty" yaml:"shareProcessNamespace,omitempty"` State string `json:"state,omitempty" yaml:"state,omitempty"` Subdomain string `json:"subdomain,omitempty" yaml:"subdomain,omitempty"` + Sysctls []Sysctl `json:"sysctls,omitempty" yaml:"sysctls,omitempty"` + TTLSecondsAfterFinished *int64 `json:"ttlSecondsAfterFinished,omitempty" yaml:"ttlSecondsAfterFinished,omitempty"` TerminationGracePeriodSeconds *int64 `json:"terminationGracePeriodSeconds,omitempty" yaml:"terminationGracePeriodSeconds,omitempty"` Transitioning string `json:"transitioning,omitempty" yaml:"transitioning,omitempty"` TransitioningMessage string `json:"transitioningMessage,omitempty" yaml:"transitioningMessage,omitempty"` diff --git a/client/project/v3/zz_generated_job_spec.go b/client/project/v3/zz_generated_job_spec.go index 116b0928..a60f6a73 100644 --- a/client/project/v3/zz_generated_job_spec.go +++ b/client/project/v3/zz_generated_job_spec.go @@ -20,15 +20,19 @@ const ( JobSpecFieldObjectMeta = "metadata" JobSpecFieldPriority = "priority" JobSpecFieldPriorityClassName = "priorityClassName" + JobSpecFieldReadinessGates = "readinessGates" JobSpecFieldRestartPolicy = "restartPolicy" JobSpecFieldRunAsGroup = "runAsGroup" JobSpecFieldRunAsNonRoot = "runAsNonRoot" + JobSpecFieldRuntimeClassName = "runtimeClassName" JobSpecFieldSchedulerName = "schedulerName" JobSpecFieldScheduling = "scheduling" JobSpecFieldSelector = "selector" JobSpecFieldServiceAccountName = "serviceAccountName" JobSpecFieldShareProcessNamespace = "shareProcessNamespace" JobSpecFieldSubdomain = "subdomain" + JobSpecFieldSysctls = "sysctls" + JobSpecFieldTTLSecondsAfterFinished = "ttlSecondsAfterFinished" JobSpecFieldTerminationGracePeriodSeconds = "terminationGracePeriodSeconds" JobSpecFieldUid = "uid" JobSpecFieldVolumes = "volumes" @@ -53,15 +57,19 @@ type JobSpec struct { ObjectMeta *ObjectMeta `json:"metadata,omitempty" yaml:"metadata,omitempty"` Priority *int64 `json:"priority,omitempty" yaml:"priority,omitempty"` PriorityClassName string `json:"priorityClassName,omitempty" yaml:"priorityClassName,omitempty"` + ReadinessGates []PodReadinessGate `json:"readinessGates,omitempty" yaml:"readinessGates,omitempty"` RestartPolicy string `json:"restartPolicy,omitempty" yaml:"restartPolicy,omitempty"` RunAsGroup *int64 `json:"runAsGroup,omitempty" yaml:"runAsGroup,omitempty"` RunAsNonRoot *bool `json:"runAsNonRoot,omitempty" yaml:"runAsNonRoot,omitempty"` + RuntimeClassName string `json:"runtimeClassName,omitempty" yaml:"runtimeClassName,omitempty"` SchedulerName string `json:"schedulerName,omitempty" yaml:"schedulerName,omitempty"` Scheduling *Scheduling `json:"scheduling,omitempty" yaml:"scheduling,omitempty"` Selector *LabelSelector `json:"selector,omitempty" yaml:"selector,omitempty"` ServiceAccountName string `json:"serviceAccountName,omitempty" yaml:"serviceAccountName,omitempty"` ShareProcessNamespace *bool `json:"shareProcessNamespace,omitempty" yaml:"shareProcessNamespace,omitempty"` Subdomain string `json:"subdomain,omitempty" yaml:"subdomain,omitempty"` + Sysctls []Sysctl `json:"sysctls,omitempty" yaml:"sysctls,omitempty"` + TTLSecondsAfterFinished *int64 `json:"ttlSecondsAfterFinished,omitempty" yaml:"ttlSecondsAfterFinished,omitempty"` TerminationGracePeriodSeconds *int64 `json:"terminationGracePeriodSeconds,omitempty" yaml:"terminationGracePeriodSeconds,omitempty"` Uid *int64 `json:"uid,omitempty" yaml:"uid,omitempty"` Volumes []Volume `json:"volumes,omitempty" yaml:"volumes,omitempty"` diff --git a/client/project/v3/zz_generated_job_template_spec.go b/client/project/v3/zz_generated_job_template_spec.go index 746ce1bb..ae10d19e 100644 --- a/client/project/v3/zz_generated_job_template_spec.go +++ b/client/project/v3/zz_generated_job_template_spec.go @@ -21,15 +21,19 @@ const ( JobTemplateSpecFieldObjectMeta = "metadata" JobTemplateSpecFieldPriority = "priority" JobTemplateSpecFieldPriorityClassName = "priorityClassName" + JobTemplateSpecFieldReadinessGates = "readinessGates" JobTemplateSpecFieldRestartPolicy = "restartPolicy" JobTemplateSpecFieldRunAsGroup = "runAsGroup" JobTemplateSpecFieldRunAsNonRoot = "runAsNonRoot" + JobTemplateSpecFieldRuntimeClassName = "runtimeClassName" JobTemplateSpecFieldSchedulerName = "schedulerName" JobTemplateSpecFieldScheduling = "scheduling" JobTemplateSpecFieldSelector = "selector" JobTemplateSpecFieldServiceAccountName = "serviceAccountName" JobTemplateSpecFieldShareProcessNamespace = "shareProcessNamespace" JobTemplateSpecFieldSubdomain = "subdomain" + JobTemplateSpecFieldSysctls = "sysctls" + JobTemplateSpecFieldTTLSecondsAfterFinished = "ttlSecondsAfterFinished" JobTemplateSpecFieldTerminationGracePeriodSeconds = "terminationGracePeriodSeconds" JobTemplateSpecFieldUid = "uid" JobTemplateSpecFieldVolumes = "volumes" @@ -55,15 +59,19 @@ type JobTemplateSpec struct { ObjectMeta *ObjectMeta `json:"metadata,omitempty" yaml:"metadata,omitempty"` Priority *int64 `json:"priority,omitempty" yaml:"priority,omitempty"` PriorityClassName string `json:"priorityClassName,omitempty" yaml:"priorityClassName,omitempty"` + ReadinessGates []PodReadinessGate `json:"readinessGates,omitempty" yaml:"readinessGates,omitempty"` RestartPolicy string `json:"restartPolicy,omitempty" yaml:"restartPolicy,omitempty"` RunAsGroup *int64 `json:"runAsGroup,omitempty" yaml:"runAsGroup,omitempty"` RunAsNonRoot *bool `json:"runAsNonRoot,omitempty" yaml:"runAsNonRoot,omitempty"` + RuntimeClassName string `json:"runtimeClassName,omitempty" yaml:"runtimeClassName,omitempty"` SchedulerName string `json:"schedulerName,omitempty" yaml:"schedulerName,omitempty"` Scheduling *Scheduling `json:"scheduling,omitempty" yaml:"scheduling,omitempty"` Selector *LabelSelector `json:"selector,omitempty" yaml:"selector,omitempty"` ServiceAccountName string `json:"serviceAccountName,omitempty" yaml:"serviceAccountName,omitempty"` ShareProcessNamespace *bool `json:"shareProcessNamespace,omitempty" yaml:"shareProcessNamespace,omitempty"` Subdomain string `json:"subdomain,omitempty" yaml:"subdomain,omitempty"` + Sysctls []Sysctl `json:"sysctls,omitempty" yaml:"sysctls,omitempty"` + TTLSecondsAfterFinished *int64 `json:"ttlSecondsAfterFinished,omitempty" yaml:"ttlSecondsAfterFinished,omitempty"` TerminationGracePeriodSeconds *int64 `json:"terminationGracePeriodSeconds,omitempty" yaml:"terminationGracePeriodSeconds,omitempty"` Uid *int64 `json:"uid,omitempty" yaml:"uid,omitempty"` Volumes []Volume `json:"volumes,omitempty" yaml:"volumes,omitempty"` diff --git a/client/project/v3/zz_generated_local_volume_source.go b/client/project/v3/zz_generated_local_volume_source.go index 83eaec3b..6dea350b 100644 --- a/client/project/v3/zz_generated_local_volume_source.go +++ b/client/project/v3/zz_generated_local_volume_source.go @@ -1,10 +1,12 @@ package client const ( - LocalVolumeSourceType = "localVolumeSource" - LocalVolumeSourceFieldPath = "path" + LocalVolumeSourceType = "localVolumeSource" + LocalVolumeSourceFieldFSType = "fsType" + LocalVolumeSourceFieldPath = "path" ) type LocalVolumeSource struct { - Path string `json:"path,omitempty" yaml:"path,omitempty"` + FSType string `json:"fsType,omitempty" yaml:"fsType,omitempty"` + Path string `json:"path,omitempty" yaml:"path,omitempty"` } diff --git a/client/project/v3/zz_generated_node_selector_term.go b/client/project/v3/zz_generated_node_selector_term.go index 648cf8e3..11bf2562 100644 --- a/client/project/v3/zz_generated_node_selector_term.go +++ b/client/project/v3/zz_generated_node_selector_term.go @@ -3,8 +3,10 @@ package client const ( NodeSelectorTermType = "nodeSelectorTerm" NodeSelectorTermFieldMatchExpressions = "matchExpressions" + NodeSelectorTermFieldMatchFields = "matchFields" ) type NodeSelectorTerm struct { MatchExpressions []NodeSelectorRequirement `json:"matchExpressions,omitempty" yaml:"matchExpressions,omitempty"` + MatchFields []NodeSelectorRequirement `json:"matchFields,omitempty" yaml:"matchFields,omitempty"` } diff --git a/client/project/v3/zz_generated_persistent_volume_claim.go b/client/project/v3/zz_generated_persistent_volume_claim.go index 06f5fdb8..bba8d6fe 100644 --- a/client/project/v3/zz_generated_persistent_volume_claim.go +++ b/client/project/v3/zz_generated_persistent_volume_claim.go @@ -10,6 +10,7 @@ const ( PersistentVolumeClaimFieldAnnotations = "annotations" PersistentVolumeClaimFieldCreated = "created" PersistentVolumeClaimFieldCreatorID = "creatorId" + PersistentVolumeClaimFieldDataSource = "dataSource" PersistentVolumeClaimFieldLabels = "labels" PersistentVolumeClaimFieldName = "name" PersistentVolumeClaimFieldNamespaceId = "namespaceId" @@ -34,6 +35,7 @@ type PersistentVolumeClaim struct { Annotations map[string]string `json:"annotations,omitempty" yaml:"annotations,omitempty"` Created string `json:"created,omitempty" yaml:"created,omitempty"` CreatorID string `json:"creatorId,omitempty" yaml:"creatorId,omitempty"` + DataSource *TypedLocalObjectReference `json:"dataSource,omitempty" yaml:"dataSource,omitempty"` Labels map[string]string `json:"labels,omitempty" yaml:"labels,omitempty"` Name string `json:"name,omitempty" yaml:"name,omitempty"` NamespaceId string `json:"namespaceId,omitempty" yaml:"namespaceId,omitempty"` diff --git a/client/project/v3/zz_generated_persistent_volume_claim_spec.go b/client/project/v3/zz_generated_persistent_volume_claim_spec.go index 428aaf61..e3057d45 100644 --- a/client/project/v3/zz_generated_persistent_volume_claim_spec.go +++ b/client/project/v3/zz_generated_persistent_volume_claim_spec.go @@ -3,6 +3,7 @@ package client const ( PersistentVolumeClaimSpecType = "persistentVolumeClaimSpec" PersistentVolumeClaimSpecFieldAccessModes = "accessModes" + PersistentVolumeClaimSpecFieldDataSource = "dataSource" PersistentVolumeClaimSpecFieldResources = "resources" PersistentVolumeClaimSpecFieldSelector = "selector" PersistentVolumeClaimSpecFieldStorageClassID = "storageClassId" @@ -11,10 +12,11 @@ const ( ) type PersistentVolumeClaimSpec struct { - AccessModes []string `json:"accessModes,omitempty" yaml:"accessModes,omitempty"` - Resources *ResourceRequirements `json:"resources,omitempty" yaml:"resources,omitempty"` - Selector *LabelSelector `json:"selector,omitempty" yaml:"selector,omitempty"` - StorageClassID string `json:"storageClassId,omitempty" yaml:"storageClassId,omitempty"` - VolumeID string `json:"volumeId,omitempty" yaml:"volumeId,omitempty"` - VolumeMode string `json:"volumeMode,omitempty" yaml:"volumeMode,omitempty"` + AccessModes []string `json:"accessModes,omitempty" yaml:"accessModes,omitempty"` + DataSource *TypedLocalObjectReference `json:"dataSource,omitempty" yaml:"dataSource,omitempty"` + Resources *ResourceRequirements `json:"resources,omitempty" yaml:"resources,omitempty"` + Selector *LabelSelector `json:"selector,omitempty" yaml:"selector,omitempty"` + StorageClassID string `json:"storageClassId,omitempty" yaml:"storageClassId,omitempty"` + VolumeID string `json:"volumeId,omitempty" yaml:"volumeId,omitempty"` + VolumeMode string `json:"volumeMode,omitempty" yaml:"volumeMode,omitempty"` } diff --git a/client/project/v3/zz_generated_persistent_volume_spec.go b/client/project/v3/zz_generated_persistent_volume_spec.go index 54643bf6..a1e6529a 100644 --- a/client/project/v3/zz_generated_persistent_volume_spec.go +++ b/client/project/v3/zz_generated_persistent_volume_spec.go @@ -42,7 +42,7 @@ type PersistentVolumeSpec struct { CSI *CSIPersistentVolumeSource `json:"csi,omitempty" yaml:"csi,omitempty"` Capacity map[string]string `json:"capacity,omitempty" yaml:"capacity,omitempty"` CephFS *CephFSPersistentVolumeSource `json:"cephfs,omitempty" yaml:"cephfs,omitempty"` - Cinder *CinderVolumeSource `json:"cinder,omitempty" yaml:"cinder,omitempty"` + Cinder *CinderPersistentVolumeSource `json:"cinder,omitempty" yaml:"cinder,omitempty"` ClaimRef *ObjectReference `json:"claimRef,omitempty" yaml:"claimRef,omitempty"` FC *FCVolumeSource `json:"fc,omitempty" yaml:"fc,omitempty"` FlexVolume *FlexPersistentVolumeSource `json:"flexVolume,omitempty" yaml:"flexVolume,omitempty"` diff --git a/client/project/v3/zz_generated_pod.go b/client/project/v3/zz_generated_pod.go index b95a1200..245ceea9 100644 --- a/client/project/v3/zz_generated_pod.go +++ b/client/project/v3/zz_generated_pod.go @@ -32,10 +32,12 @@ const ( PodFieldPriorityClassName = "priorityClassName" PodFieldProjectID = "projectId" PodFieldPublicEndpoints = "publicEndpoints" + PodFieldReadinessGates = "readinessGates" PodFieldRemoved = "removed" PodFieldRestartPolicy = "restartPolicy" PodFieldRunAsGroup = "runAsGroup" PodFieldRunAsNonRoot = "runAsNonRoot" + PodFieldRuntimeClassName = "runtimeClassName" PodFieldSchedulerName = "schedulerName" PodFieldScheduling = "scheduling" PodFieldServiceAccountName = "serviceAccountName" @@ -43,6 +45,7 @@ const ( PodFieldState = "state" PodFieldStatus = "status" PodFieldSubdomain = "subdomain" + PodFieldSysctls = "sysctls" PodFieldTerminationGracePeriodSeconds = "terminationGracePeriodSeconds" PodFieldTransitioning = "transitioning" PodFieldTransitioningMessage = "transitioningMessage" @@ -80,10 +83,12 @@ type Pod struct { PriorityClassName string `json:"priorityClassName,omitempty" yaml:"priorityClassName,omitempty"` ProjectID string `json:"projectId,omitempty" yaml:"projectId,omitempty"` PublicEndpoints []PublicEndpoint `json:"publicEndpoints,omitempty" yaml:"publicEndpoints,omitempty"` + ReadinessGates []PodReadinessGate `json:"readinessGates,omitempty" yaml:"readinessGates,omitempty"` Removed string `json:"removed,omitempty" yaml:"removed,omitempty"` RestartPolicy string `json:"restartPolicy,omitempty" yaml:"restartPolicy,omitempty"` RunAsGroup *int64 `json:"runAsGroup,omitempty" yaml:"runAsGroup,omitempty"` RunAsNonRoot *bool `json:"runAsNonRoot,omitempty" yaml:"runAsNonRoot,omitempty"` + RuntimeClassName string `json:"runtimeClassName,omitempty" yaml:"runtimeClassName,omitempty"` SchedulerName string `json:"schedulerName,omitempty" yaml:"schedulerName,omitempty"` Scheduling *Scheduling `json:"scheduling,omitempty" yaml:"scheduling,omitempty"` ServiceAccountName string `json:"serviceAccountName,omitempty" yaml:"serviceAccountName,omitempty"` @@ -91,6 +96,7 @@ type Pod struct { State string `json:"state,omitempty" yaml:"state,omitempty"` Status *PodStatus `json:"status,omitempty" yaml:"status,omitempty"` Subdomain string `json:"subdomain,omitempty" yaml:"subdomain,omitempty"` + Sysctls []Sysctl `json:"sysctls,omitempty" yaml:"sysctls,omitempty"` TerminationGracePeriodSeconds *int64 `json:"terminationGracePeriodSeconds,omitempty" yaml:"terminationGracePeriodSeconds,omitempty"` Transitioning string `json:"transitioning,omitempty" yaml:"transitioning,omitempty"` TransitioningMessage string `json:"transitioningMessage,omitempty" yaml:"transitioningMessage,omitempty"` diff --git a/client/project/v3/zz_generated_pod_readiness_gate.go b/client/project/v3/zz_generated_pod_readiness_gate.go new file mode 100644 index 00000000..5909a4db --- /dev/null +++ b/client/project/v3/zz_generated_pod_readiness_gate.go @@ -0,0 +1,10 @@ +package client + +const ( + PodReadinessGateType = "podReadinessGate" + PodReadinessGateFieldConditionType = "conditionType" +) + +type PodReadinessGate struct { + ConditionType string `json:"conditionType,omitempty" yaml:"conditionType,omitempty"` +} diff --git a/client/project/v3/zz_generated_pod_security_context.go b/client/project/v3/zz_generated_pod_security_context.go index b49f9d6d..66d425ac 100644 --- a/client/project/v3/zz_generated_pod_security_context.go +++ b/client/project/v3/zz_generated_pod_security_context.go @@ -6,13 +6,15 @@ const ( PodSecurityContextFieldGids = "gids" PodSecurityContextFieldRunAsGroup = "runAsGroup" PodSecurityContextFieldRunAsNonRoot = "runAsNonRoot" + PodSecurityContextFieldSysctls = "sysctls" PodSecurityContextFieldUid = "uid" ) type PodSecurityContext struct { - Fsgid *int64 `json:"fsgid,omitempty" yaml:"fsgid,omitempty"` - Gids []int64 `json:"gids,omitempty" yaml:"gids,omitempty"` - RunAsGroup *int64 `json:"runAsGroup,omitempty" yaml:"runAsGroup,omitempty"` - RunAsNonRoot *bool `json:"runAsNonRoot,omitempty" yaml:"runAsNonRoot,omitempty"` - Uid *int64 `json:"uid,omitempty" yaml:"uid,omitempty"` + Fsgid *int64 `json:"fsgid,omitempty" yaml:"fsgid,omitempty"` + Gids []int64 `json:"gids,omitempty" yaml:"gids,omitempty"` + RunAsGroup *int64 `json:"runAsGroup,omitempty" yaml:"runAsGroup,omitempty"` + RunAsNonRoot *bool `json:"runAsNonRoot,omitempty" yaml:"runAsNonRoot,omitempty"` + Sysctls []Sysctl `json:"sysctls,omitempty" yaml:"sysctls,omitempty"` + Uid *int64 `json:"uid,omitempty" yaml:"uid,omitempty"` } diff --git a/client/project/v3/zz_generated_pod_spec.go b/client/project/v3/zz_generated_pod_spec.go index ab9d892d..80c16edf 100644 --- a/client/project/v3/zz_generated_pod_spec.go +++ b/client/project/v3/zz_generated_pod_spec.go @@ -18,14 +18,17 @@ const ( PodSpecFieldNodeID = "nodeId" PodSpecFieldPriority = "priority" PodSpecFieldPriorityClassName = "priorityClassName" + PodSpecFieldReadinessGates = "readinessGates" PodSpecFieldRestartPolicy = "restartPolicy" PodSpecFieldRunAsGroup = "runAsGroup" PodSpecFieldRunAsNonRoot = "runAsNonRoot" + PodSpecFieldRuntimeClassName = "runtimeClassName" PodSpecFieldSchedulerName = "schedulerName" PodSpecFieldScheduling = "scheduling" PodSpecFieldServiceAccountName = "serviceAccountName" PodSpecFieldShareProcessNamespace = "shareProcessNamespace" PodSpecFieldSubdomain = "subdomain" + PodSpecFieldSysctls = "sysctls" PodSpecFieldTerminationGracePeriodSeconds = "terminationGracePeriodSeconds" PodSpecFieldUid = "uid" PodSpecFieldVolumes = "volumes" @@ -48,14 +51,17 @@ type PodSpec struct { NodeID string `json:"nodeId,omitempty" yaml:"nodeId,omitempty"` Priority *int64 `json:"priority,omitempty" yaml:"priority,omitempty"` PriorityClassName string `json:"priorityClassName,omitempty" yaml:"priorityClassName,omitempty"` + ReadinessGates []PodReadinessGate `json:"readinessGates,omitempty" yaml:"readinessGates,omitempty"` RestartPolicy string `json:"restartPolicy,omitempty" yaml:"restartPolicy,omitempty"` RunAsGroup *int64 `json:"runAsGroup,omitempty" yaml:"runAsGroup,omitempty"` RunAsNonRoot *bool `json:"runAsNonRoot,omitempty" yaml:"runAsNonRoot,omitempty"` + RuntimeClassName string `json:"runtimeClassName,omitempty" yaml:"runtimeClassName,omitempty"` SchedulerName string `json:"schedulerName,omitempty" yaml:"schedulerName,omitempty"` Scheduling *Scheduling `json:"scheduling,omitempty" yaml:"scheduling,omitempty"` ServiceAccountName string `json:"serviceAccountName,omitempty" yaml:"serviceAccountName,omitempty"` ShareProcessNamespace *bool `json:"shareProcessNamespace,omitempty" yaml:"shareProcessNamespace,omitempty"` Subdomain string `json:"subdomain,omitempty" yaml:"subdomain,omitempty"` + Sysctls []Sysctl `json:"sysctls,omitempty" yaml:"sysctls,omitempty"` TerminationGracePeriodSeconds *int64 `json:"terminationGracePeriodSeconds,omitempty" yaml:"terminationGracePeriodSeconds,omitempty"` Uid *int64 `json:"uid,omitempty" yaml:"uid,omitempty"` Volumes []Volume `json:"volumes,omitempty" yaml:"volumes,omitempty"` diff --git a/client/project/v3/zz_generated_pod_template_spec.go b/client/project/v3/zz_generated_pod_template_spec.go index 2b8cc98d..6082096f 100644 --- a/client/project/v3/zz_generated_pod_template_spec.go +++ b/client/project/v3/zz_generated_pod_template_spec.go @@ -19,14 +19,17 @@ const ( PodTemplateSpecFieldObjectMeta = "metadata" PodTemplateSpecFieldPriority = "priority" PodTemplateSpecFieldPriorityClassName = "priorityClassName" + PodTemplateSpecFieldReadinessGates = "readinessGates" PodTemplateSpecFieldRestartPolicy = "restartPolicy" PodTemplateSpecFieldRunAsGroup = "runAsGroup" PodTemplateSpecFieldRunAsNonRoot = "runAsNonRoot" + PodTemplateSpecFieldRuntimeClassName = "runtimeClassName" PodTemplateSpecFieldSchedulerName = "schedulerName" PodTemplateSpecFieldScheduling = "scheduling" PodTemplateSpecFieldServiceAccountName = "serviceAccountName" PodTemplateSpecFieldShareProcessNamespace = "shareProcessNamespace" PodTemplateSpecFieldSubdomain = "subdomain" + PodTemplateSpecFieldSysctls = "sysctls" PodTemplateSpecFieldTerminationGracePeriodSeconds = "terminationGracePeriodSeconds" PodTemplateSpecFieldUid = "uid" PodTemplateSpecFieldVolumes = "volumes" @@ -50,14 +53,17 @@ type PodTemplateSpec struct { ObjectMeta *ObjectMeta `json:"metadata,omitempty" yaml:"metadata,omitempty"` Priority *int64 `json:"priority,omitempty" yaml:"priority,omitempty"` PriorityClassName string `json:"priorityClassName,omitempty" yaml:"priorityClassName,omitempty"` + ReadinessGates []PodReadinessGate `json:"readinessGates,omitempty" yaml:"readinessGates,omitempty"` RestartPolicy string `json:"restartPolicy,omitempty" yaml:"restartPolicy,omitempty"` RunAsGroup *int64 `json:"runAsGroup,omitempty" yaml:"runAsGroup,omitempty"` RunAsNonRoot *bool `json:"runAsNonRoot,omitempty" yaml:"runAsNonRoot,omitempty"` + RuntimeClassName string `json:"runtimeClassName,omitempty" yaml:"runtimeClassName,omitempty"` SchedulerName string `json:"schedulerName,omitempty" yaml:"schedulerName,omitempty"` Scheduling *Scheduling `json:"scheduling,omitempty" yaml:"scheduling,omitempty"` ServiceAccountName string `json:"serviceAccountName,omitempty" yaml:"serviceAccountName,omitempty"` ShareProcessNamespace *bool `json:"shareProcessNamespace,omitempty" yaml:"shareProcessNamespace,omitempty"` Subdomain string `json:"subdomain,omitempty" yaml:"subdomain,omitempty"` + Sysctls []Sysctl `json:"sysctls,omitempty" yaml:"sysctls,omitempty"` TerminationGracePeriodSeconds *int64 `json:"terminationGracePeriodSeconds,omitempty" yaml:"terminationGracePeriodSeconds,omitempty"` Uid *int64 `json:"uid,omitempty" yaml:"uid,omitempty"` Volumes []Volume `json:"volumes,omitempty" yaml:"volumes,omitempty"` diff --git a/client/project/v3/zz_generated_replica_set.go b/client/project/v3/zz_generated_replica_set.go index ccbfebb4..af1e66d2 100644 --- a/client/project/v3/zz_generated_replica_set.go +++ b/client/project/v3/zz_generated_replica_set.go @@ -31,12 +31,14 @@ const ( ReplicaSetFieldPriorityClassName = "priorityClassName" ReplicaSetFieldProjectID = "projectId" ReplicaSetFieldPublicEndpoints = "publicEndpoints" + ReplicaSetFieldReadinessGates = "readinessGates" ReplicaSetFieldRemoved = "removed" ReplicaSetFieldReplicaSetConfig = "replicaSetConfig" ReplicaSetFieldReplicaSetStatus = "replicaSetStatus" ReplicaSetFieldRestartPolicy = "restartPolicy" ReplicaSetFieldRunAsGroup = "runAsGroup" ReplicaSetFieldRunAsNonRoot = "runAsNonRoot" + ReplicaSetFieldRuntimeClassName = "runtimeClassName" ReplicaSetFieldScale = "scale" ReplicaSetFieldSchedulerName = "schedulerName" ReplicaSetFieldScheduling = "scheduling" @@ -45,6 +47,7 @@ const ( ReplicaSetFieldShareProcessNamespace = "shareProcessNamespace" ReplicaSetFieldState = "state" ReplicaSetFieldSubdomain = "subdomain" + ReplicaSetFieldSysctls = "sysctls" ReplicaSetFieldTerminationGracePeriodSeconds = "terminationGracePeriodSeconds" ReplicaSetFieldTransitioning = "transitioning" ReplicaSetFieldTransitioningMessage = "transitioningMessage" @@ -82,12 +85,14 @@ type ReplicaSet struct { PriorityClassName string `json:"priorityClassName,omitempty" yaml:"priorityClassName,omitempty"` ProjectID string `json:"projectId,omitempty" yaml:"projectId,omitempty"` PublicEndpoints []PublicEndpoint `json:"publicEndpoints,omitempty" yaml:"publicEndpoints,omitempty"` + ReadinessGates []PodReadinessGate `json:"readinessGates,omitempty" yaml:"readinessGates,omitempty"` Removed string `json:"removed,omitempty" yaml:"removed,omitempty"` ReplicaSetConfig *ReplicaSetConfig `json:"replicaSetConfig,omitempty" yaml:"replicaSetConfig,omitempty"` ReplicaSetStatus *ReplicaSetStatus `json:"replicaSetStatus,omitempty" yaml:"replicaSetStatus,omitempty"` RestartPolicy string `json:"restartPolicy,omitempty" yaml:"restartPolicy,omitempty"` RunAsGroup *int64 `json:"runAsGroup,omitempty" yaml:"runAsGroup,omitempty"` RunAsNonRoot *bool `json:"runAsNonRoot,omitempty" yaml:"runAsNonRoot,omitempty"` + RuntimeClassName string `json:"runtimeClassName,omitempty" yaml:"runtimeClassName,omitempty"` Scale *int64 `json:"scale,omitempty" yaml:"scale,omitempty"` SchedulerName string `json:"schedulerName,omitempty" yaml:"schedulerName,omitempty"` Scheduling *Scheduling `json:"scheduling,omitempty" yaml:"scheduling,omitempty"` @@ -96,6 +101,7 @@ type ReplicaSet struct { ShareProcessNamespace *bool `json:"shareProcessNamespace,omitempty" yaml:"shareProcessNamespace,omitempty"` State string `json:"state,omitempty" yaml:"state,omitempty"` Subdomain string `json:"subdomain,omitempty" yaml:"subdomain,omitempty"` + Sysctls []Sysctl `json:"sysctls,omitempty" yaml:"sysctls,omitempty"` TerminationGracePeriodSeconds *int64 `json:"terminationGracePeriodSeconds,omitempty" yaml:"terminationGracePeriodSeconds,omitempty"` Transitioning string `json:"transitioning,omitempty" yaml:"transitioning,omitempty"` TransitioningMessage string `json:"transitioningMessage,omitempty" yaml:"transitioningMessage,omitempty"` diff --git a/client/project/v3/zz_generated_replication_controller.go b/client/project/v3/zz_generated_replication_controller.go index a3f815fc..fa3f0072 100644 --- a/client/project/v3/zz_generated_replication_controller.go +++ b/client/project/v3/zz_generated_replication_controller.go @@ -31,12 +31,14 @@ const ( ReplicationControllerFieldPriorityClassName = "priorityClassName" ReplicationControllerFieldProjectID = "projectId" ReplicationControllerFieldPublicEndpoints = "publicEndpoints" + ReplicationControllerFieldReadinessGates = "readinessGates" ReplicationControllerFieldRemoved = "removed" ReplicationControllerFieldReplicationControllerConfig = "replicationControllerConfig" ReplicationControllerFieldReplicationControllerStatus = "replicationControllerStatus" ReplicationControllerFieldRestartPolicy = "restartPolicy" ReplicationControllerFieldRunAsGroup = "runAsGroup" ReplicationControllerFieldRunAsNonRoot = "runAsNonRoot" + ReplicationControllerFieldRuntimeClassName = "runtimeClassName" ReplicationControllerFieldScale = "scale" ReplicationControllerFieldSchedulerName = "schedulerName" ReplicationControllerFieldScheduling = "scheduling" @@ -45,6 +47,7 @@ const ( ReplicationControllerFieldShareProcessNamespace = "shareProcessNamespace" ReplicationControllerFieldState = "state" ReplicationControllerFieldSubdomain = "subdomain" + ReplicationControllerFieldSysctls = "sysctls" ReplicationControllerFieldTerminationGracePeriodSeconds = "terminationGracePeriodSeconds" ReplicationControllerFieldTransitioning = "transitioning" ReplicationControllerFieldTransitioningMessage = "transitioningMessage" @@ -82,12 +85,14 @@ type ReplicationController struct { PriorityClassName string `json:"priorityClassName,omitempty" yaml:"priorityClassName,omitempty"` ProjectID string `json:"projectId,omitempty" yaml:"projectId,omitempty"` PublicEndpoints []PublicEndpoint `json:"publicEndpoints,omitempty" yaml:"publicEndpoints,omitempty"` + ReadinessGates []PodReadinessGate `json:"readinessGates,omitempty" yaml:"readinessGates,omitempty"` Removed string `json:"removed,omitempty" yaml:"removed,omitempty"` ReplicationControllerConfig *ReplicationControllerConfig `json:"replicationControllerConfig,omitempty" yaml:"replicationControllerConfig,omitempty"` ReplicationControllerStatus *ReplicationControllerStatus `json:"replicationControllerStatus,omitempty" yaml:"replicationControllerStatus,omitempty"` RestartPolicy string `json:"restartPolicy,omitempty" yaml:"restartPolicy,omitempty"` RunAsGroup *int64 `json:"runAsGroup,omitempty" yaml:"runAsGroup,omitempty"` RunAsNonRoot *bool `json:"runAsNonRoot,omitempty" yaml:"runAsNonRoot,omitempty"` + RuntimeClassName string `json:"runtimeClassName,omitempty" yaml:"runtimeClassName,omitempty"` Scale *int64 `json:"scale,omitempty" yaml:"scale,omitempty"` SchedulerName string `json:"schedulerName,omitempty" yaml:"schedulerName,omitempty"` Scheduling *Scheduling `json:"scheduling,omitempty" yaml:"scheduling,omitempty"` @@ -96,6 +101,7 @@ type ReplicationController struct { ShareProcessNamespace *bool `json:"shareProcessNamespace,omitempty" yaml:"shareProcessNamespace,omitempty"` State string `json:"state,omitempty" yaml:"state,omitempty"` Subdomain string `json:"subdomain,omitempty" yaml:"subdomain,omitempty"` + Sysctls []Sysctl `json:"sysctls,omitempty" yaml:"sysctls,omitempty"` TerminationGracePeriodSeconds *int64 `json:"terminationGracePeriodSeconds,omitempty" yaml:"terminationGracePeriodSeconds,omitempty"` Transitioning string `json:"transitioning,omitempty" yaml:"transitioning,omitempty"` TransitioningMessage string `json:"transitioningMessage,omitempty" yaml:"transitioningMessage,omitempty"` diff --git a/client/project/v3/zz_generated_replication_controller_spec.go b/client/project/v3/zz_generated_replication_controller_spec.go index c7c186a5..666590a0 100644 --- a/client/project/v3/zz_generated_replication_controller_spec.go +++ b/client/project/v3/zz_generated_replication_controller_spec.go @@ -19,10 +19,12 @@ const ( ReplicationControllerSpecFieldObjectMeta = "metadata" ReplicationControllerSpecFieldPriority = "priority" ReplicationControllerSpecFieldPriorityClassName = "priorityClassName" + ReplicationControllerSpecFieldReadinessGates = "readinessGates" ReplicationControllerSpecFieldReplicationControllerConfig = "replicationControllerConfig" ReplicationControllerSpecFieldRestartPolicy = "restartPolicy" ReplicationControllerSpecFieldRunAsGroup = "runAsGroup" ReplicationControllerSpecFieldRunAsNonRoot = "runAsNonRoot" + ReplicationControllerSpecFieldRuntimeClassName = "runtimeClassName" ReplicationControllerSpecFieldScale = "scale" ReplicationControllerSpecFieldSchedulerName = "schedulerName" ReplicationControllerSpecFieldScheduling = "scheduling" @@ -30,6 +32,7 @@ const ( ReplicationControllerSpecFieldServiceAccountName = "serviceAccountName" ReplicationControllerSpecFieldShareProcessNamespace = "shareProcessNamespace" ReplicationControllerSpecFieldSubdomain = "subdomain" + ReplicationControllerSpecFieldSysctls = "sysctls" ReplicationControllerSpecFieldTerminationGracePeriodSeconds = "terminationGracePeriodSeconds" ReplicationControllerSpecFieldUid = "uid" ReplicationControllerSpecFieldVolumes = "volumes" @@ -53,10 +56,12 @@ type ReplicationControllerSpec struct { ObjectMeta *ObjectMeta `json:"metadata,omitempty" yaml:"metadata,omitempty"` Priority *int64 `json:"priority,omitempty" yaml:"priority,omitempty"` PriorityClassName string `json:"priorityClassName,omitempty" yaml:"priorityClassName,omitempty"` + ReadinessGates []PodReadinessGate `json:"readinessGates,omitempty" yaml:"readinessGates,omitempty"` ReplicationControllerConfig *ReplicationControllerConfig `json:"replicationControllerConfig,omitempty" yaml:"replicationControllerConfig,omitempty"` RestartPolicy string `json:"restartPolicy,omitempty" yaml:"restartPolicy,omitempty"` RunAsGroup *int64 `json:"runAsGroup,omitempty" yaml:"runAsGroup,omitempty"` RunAsNonRoot *bool `json:"runAsNonRoot,omitempty" yaml:"runAsNonRoot,omitempty"` + RuntimeClassName string `json:"runtimeClassName,omitempty" yaml:"runtimeClassName,omitempty"` Scale *int64 `json:"scale,omitempty" yaml:"scale,omitempty"` SchedulerName string `json:"schedulerName,omitempty" yaml:"schedulerName,omitempty"` Scheduling *Scheduling `json:"scheduling,omitempty" yaml:"scheduling,omitempty"` @@ -64,6 +69,7 @@ type ReplicationControllerSpec struct { ServiceAccountName string `json:"serviceAccountName,omitempty" yaml:"serviceAccountName,omitempty"` ShareProcessNamespace *bool `json:"shareProcessNamespace,omitempty" yaml:"shareProcessNamespace,omitempty"` Subdomain string `json:"subdomain,omitempty" yaml:"subdomain,omitempty"` + Sysctls []Sysctl `json:"sysctls,omitempty" yaml:"sysctls,omitempty"` TerminationGracePeriodSeconds *int64 `json:"terminationGracePeriodSeconds,omitempty" yaml:"terminationGracePeriodSeconds,omitempty"` Uid *int64 `json:"uid,omitempty" yaml:"uid,omitempty"` Volumes []Volume `json:"volumes,omitempty" yaml:"volumes,omitempty"` diff --git a/client/project/v3/zz_generated_security_context.go b/client/project/v3/zz_generated_security_context.go index 0067d78f..cb33356b 100644 --- a/client/project/v3/zz_generated_security_context.go +++ b/client/project/v3/zz_generated_security_context.go @@ -6,6 +6,7 @@ const ( SecurityContextFieldCapAdd = "capAdd" SecurityContextFieldCapDrop = "capDrop" SecurityContextFieldPrivileged = "privileged" + SecurityContextFieldProcMount = "procMount" SecurityContextFieldReadOnly = "readOnly" SecurityContextFieldRunAsGroup = "runAsGroup" SecurityContextFieldRunAsNonRoot = "runAsNonRoot" @@ -17,6 +18,7 @@ type SecurityContext struct { CapAdd []string `json:"capAdd,omitempty" yaml:"capAdd,omitempty"` CapDrop []string `json:"capDrop,omitempty" yaml:"capDrop,omitempty"` Privileged *bool `json:"privileged,omitempty" yaml:"privileged,omitempty"` + ProcMount string `json:"procMount,omitempty" yaml:"procMount,omitempty"` ReadOnly *bool `json:"readOnly,omitempty" yaml:"readOnly,omitempty"` RunAsGroup *int64 `json:"runAsGroup,omitempty" yaml:"runAsGroup,omitempty"` RunAsNonRoot *bool `json:"runAsNonRoot,omitempty" yaml:"runAsNonRoot,omitempty"` diff --git a/client/project/v3/zz_generated_service_account_token_projection.go b/client/project/v3/zz_generated_service_account_token_projection.go new file mode 100644 index 00000000..7d2e880e --- /dev/null +++ b/client/project/v3/zz_generated_service_account_token_projection.go @@ -0,0 +1,14 @@ +package client + +const ( + ServiceAccountTokenProjectionType = "serviceAccountTokenProjection" + ServiceAccountTokenProjectionFieldAudience = "audience" + ServiceAccountTokenProjectionFieldExpirationSeconds = "expirationSeconds" + ServiceAccountTokenProjectionFieldPath = "path" +) + +type ServiceAccountTokenProjection struct { + Audience string `json:"audience,omitempty" yaml:"audience,omitempty"` + ExpirationSeconds *int64 `json:"expirationSeconds,omitempty" yaml:"expirationSeconds,omitempty"` + Path string `json:"path,omitempty" yaml:"path,omitempty"` +} diff --git a/client/project/v3/zz_generated_stateful_set.go b/client/project/v3/zz_generated_stateful_set.go index d2c1313f..873c42f6 100644 --- a/client/project/v3/zz_generated_stateful_set.go +++ b/client/project/v3/zz_generated_stateful_set.go @@ -31,10 +31,12 @@ const ( StatefulSetFieldPriorityClassName = "priorityClassName" StatefulSetFieldProjectID = "projectId" StatefulSetFieldPublicEndpoints = "publicEndpoints" + StatefulSetFieldReadinessGates = "readinessGates" StatefulSetFieldRemoved = "removed" StatefulSetFieldRestartPolicy = "restartPolicy" StatefulSetFieldRunAsGroup = "runAsGroup" StatefulSetFieldRunAsNonRoot = "runAsNonRoot" + StatefulSetFieldRuntimeClassName = "runtimeClassName" StatefulSetFieldScale = "scale" StatefulSetFieldSchedulerName = "schedulerName" StatefulSetFieldScheduling = "scheduling" @@ -45,6 +47,7 @@ const ( StatefulSetFieldStatefulSetConfig = "statefulSetConfig" StatefulSetFieldStatefulSetStatus = "statefulSetStatus" StatefulSetFieldSubdomain = "subdomain" + StatefulSetFieldSysctls = "sysctls" StatefulSetFieldTerminationGracePeriodSeconds = "terminationGracePeriodSeconds" StatefulSetFieldTransitioning = "transitioning" StatefulSetFieldTransitioningMessage = "transitioningMessage" @@ -82,10 +85,12 @@ type StatefulSet struct { PriorityClassName string `json:"priorityClassName,omitempty" yaml:"priorityClassName,omitempty"` ProjectID string `json:"projectId,omitempty" yaml:"projectId,omitempty"` PublicEndpoints []PublicEndpoint `json:"publicEndpoints,omitempty" yaml:"publicEndpoints,omitempty"` + ReadinessGates []PodReadinessGate `json:"readinessGates,omitempty" yaml:"readinessGates,omitempty"` Removed string `json:"removed,omitempty" yaml:"removed,omitempty"` RestartPolicy string `json:"restartPolicy,omitempty" yaml:"restartPolicy,omitempty"` RunAsGroup *int64 `json:"runAsGroup,omitempty" yaml:"runAsGroup,omitempty"` RunAsNonRoot *bool `json:"runAsNonRoot,omitempty" yaml:"runAsNonRoot,omitempty"` + RuntimeClassName string `json:"runtimeClassName,omitempty" yaml:"runtimeClassName,omitempty"` Scale *int64 `json:"scale,omitempty" yaml:"scale,omitempty"` SchedulerName string `json:"schedulerName,omitempty" yaml:"schedulerName,omitempty"` Scheduling *Scheduling `json:"scheduling,omitempty" yaml:"scheduling,omitempty"` @@ -96,6 +101,7 @@ type StatefulSet struct { StatefulSetConfig *StatefulSetConfig `json:"statefulSetConfig,omitempty" yaml:"statefulSetConfig,omitempty"` StatefulSetStatus *StatefulSetStatus `json:"statefulSetStatus,omitempty" yaml:"statefulSetStatus,omitempty"` Subdomain string `json:"subdomain,omitempty" yaml:"subdomain,omitempty"` + Sysctls []Sysctl `json:"sysctls,omitempty" yaml:"sysctls,omitempty"` TerminationGracePeriodSeconds *int64 `json:"terminationGracePeriodSeconds,omitempty" yaml:"terminationGracePeriodSeconds,omitempty"` Transitioning string `json:"transitioning,omitempty" yaml:"transitioning,omitempty"` TransitioningMessage string `json:"transitioningMessage,omitempty" yaml:"transitioningMessage,omitempty"` diff --git a/client/project/v3/zz_generated_stateful_set_spec.go b/client/project/v3/zz_generated_stateful_set_spec.go index 30da6fc5..98dbd757 100644 --- a/client/project/v3/zz_generated_stateful_set_spec.go +++ b/client/project/v3/zz_generated_stateful_set_spec.go @@ -19,9 +19,11 @@ const ( StatefulSetSpecFieldObjectMeta = "metadata" StatefulSetSpecFieldPriority = "priority" StatefulSetSpecFieldPriorityClassName = "priorityClassName" + StatefulSetSpecFieldReadinessGates = "readinessGates" StatefulSetSpecFieldRestartPolicy = "restartPolicy" StatefulSetSpecFieldRunAsGroup = "runAsGroup" StatefulSetSpecFieldRunAsNonRoot = "runAsNonRoot" + StatefulSetSpecFieldRuntimeClassName = "runtimeClassName" StatefulSetSpecFieldScale = "scale" StatefulSetSpecFieldSchedulerName = "schedulerName" StatefulSetSpecFieldScheduling = "scheduling" @@ -30,6 +32,7 @@ const ( StatefulSetSpecFieldShareProcessNamespace = "shareProcessNamespace" StatefulSetSpecFieldStatefulSetConfig = "statefulSetConfig" StatefulSetSpecFieldSubdomain = "subdomain" + StatefulSetSpecFieldSysctls = "sysctls" StatefulSetSpecFieldTerminationGracePeriodSeconds = "terminationGracePeriodSeconds" StatefulSetSpecFieldUid = "uid" StatefulSetSpecFieldVolumes = "volumes" @@ -53,9 +56,11 @@ type StatefulSetSpec struct { ObjectMeta *ObjectMeta `json:"metadata,omitempty" yaml:"metadata,omitempty"` Priority *int64 `json:"priority,omitempty" yaml:"priority,omitempty"` PriorityClassName string `json:"priorityClassName,omitempty" yaml:"priorityClassName,omitempty"` + ReadinessGates []PodReadinessGate `json:"readinessGates,omitempty" yaml:"readinessGates,omitempty"` RestartPolicy string `json:"restartPolicy,omitempty" yaml:"restartPolicy,omitempty"` RunAsGroup *int64 `json:"runAsGroup,omitempty" yaml:"runAsGroup,omitempty"` RunAsNonRoot *bool `json:"runAsNonRoot,omitempty" yaml:"runAsNonRoot,omitempty"` + RuntimeClassName string `json:"runtimeClassName,omitempty" yaml:"runtimeClassName,omitempty"` Scale *int64 `json:"scale,omitempty" yaml:"scale,omitempty"` SchedulerName string `json:"schedulerName,omitempty" yaml:"schedulerName,omitempty"` Scheduling *Scheduling `json:"scheduling,omitempty" yaml:"scheduling,omitempty"` @@ -64,6 +69,7 @@ type StatefulSetSpec struct { ShareProcessNamespace *bool `json:"shareProcessNamespace,omitempty" yaml:"shareProcessNamespace,omitempty"` StatefulSetConfig *StatefulSetConfig `json:"statefulSetConfig,omitempty" yaml:"statefulSetConfig,omitempty"` Subdomain string `json:"subdomain,omitempty" yaml:"subdomain,omitempty"` + Sysctls []Sysctl `json:"sysctls,omitempty" yaml:"sysctls,omitempty"` TerminationGracePeriodSeconds *int64 `json:"terminationGracePeriodSeconds,omitempty" yaml:"terminationGracePeriodSeconds,omitempty"` Uid *int64 `json:"uid,omitempty" yaml:"uid,omitempty"` Volumes []Volume `json:"volumes,omitempty" yaml:"volumes,omitempty"` diff --git a/client/project/v3/zz_generated_sysctl.go b/client/project/v3/zz_generated_sysctl.go new file mode 100644 index 00000000..ff629d83 --- /dev/null +++ b/client/project/v3/zz_generated_sysctl.go @@ -0,0 +1,12 @@ +package client + +const ( + SysctlType = "sysctl" + SysctlFieldName = "name" + SysctlFieldValue = "value" +) + +type Sysctl struct { + Name string `json:"name,omitempty" yaml:"name,omitempty"` + Value string `json:"value,omitempty" yaml:"value,omitempty"` +} diff --git a/client/project/v3/zz_generated_typed_local_object_reference.go b/client/project/v3/zz_generated_typed_local_object_reference.go new file mode 100644 index 00000000..0de2ebea --- /dev/null +++ b/client/project/v3/zz_generated_typed_local_object_reference.go @@ -0,0 +1,14 @@ +package client + +const ( + TypedLocalObjectReferenceType = "typedLocalObjectReference" + TypedLocalObjectReferenceFieldAPIGroup = "apiGroup" + TypedLocalObjectReferenceFieldKind = "kind" + TypedLocalObjectReferenceFieldName = "name" +) + +type TypedLocalObjectReference struct { + APIGroup string `json:"apiGroup,omitempty" yaml:"apiGroup,omitempty"` + Kind string `json:"kind,omitempty" yaml:"kind,omitempty"` + Name string `json:"name,omitempty" yaml:"name,omitempty"` +} diff --git a/client/project/v3/zz_generated_volume_projection.go b/client/project/v3/zz_generated_volume_projection.go index fce7e219..69054b84 100644 --- a/client/project/v3/zz_generated_volume_projection.go +++ b/client/project/v3/zz_generated_volume_projection.go @@ -1,14 +1,16 @@ package client const ( - VolumeProjectionType = "volumeProjection" - VolumeProjectionFieldConfigMap = "configMap" - VolumeProjectionFieldDownwardAPI = "downwardAPI" - VolumeProjectionFieldSecret = "secret" + VolumeProjectionType = "volumeProjection" + VolumeProjectionFieldConfigMap = "configMap" + VolumeProjectionFieldDownwardAPI = "downwardAPI" + VolumeProjectionFieldSecret = "secret" + VolumeProjectionFieldServiceAccountToken = "serviceAccountToken" ) type VolumeProjection struct { - ConfigMap *ConfigMapProjection `json:"configMap,omitempty" yaml:"configMap,omitempty"` - DownwardAPI *DownwardAPIProjection `json:"downwardAPI,omitempty" yaml:"downwardAPI,omitempty"` - Secret *SecretProjection `json:"secret,omitempty" yaml:"secret,omitempty"` + ConfigMap *ConfigMapProjection `json:"configMap,omitempty" yaml:"configMap,omitempty"` + DownwardAPI *DownwardAPIProjection `json:"downwardAPI,omitempty" yaml:"downwardAPI,omitempty"` + Secret *SecretProjection `json:"secret,omitempty" yaml:"secret,omitempty"` + ServiceAccountToken *ServiceAccountTokenProjection `json:"serviceAccountToken,omitempty" yaml:"serviceAccountToken,omitempty"` } diff --git a/client/project/v3/zz_generated_workload.go b/client/project/v3/zz_generated_workload.go index 99c13a16..55783bc3 100644 --- a/client/project/v3/zz_generated_workload.go +++ b/client/project/v3/zz_generated_workload.go @@ -40,6 +40,7 @@ const ( WorkloadFieldPriorityClassName = "priorityClassName" WorkloadFieldProjectID = "projectId" WorkloadFieldPublicEndpoints = "publicEndpoints" + WorkloadFieldReadinessGates = "readinessGates" WorkloadFieldRemoved = "removed" WorkloadFieldReplicaSetConfig = "replicaSetConfig" WorkloadFieldReplicaSetStatus = "replicaSetStatus" @@ -48,6 +49,7 @@ const ( WorkloadFieldRestartPolicy = "restartPolicy" WorkloadFieldRunAsGroup = "runAsGroup" WorkloadFieldRunAsNonRoot = "runAsNonRoot" + WorkloadFieldRuntimeClassName = "runtimeClassName" WorkloadFieldScale = "scale" WorkloadFieldSchedulerName = "schedulerName" WorkloadFieldScheduling = "scheduling" @@ -58,6 +60,8 @@ const ( WorkloadFieldStatefulSetConfig = "statefulSetConfig" WorkloadFieldStatefulSetStatus = "statefulSetStatus" WorkloadFieldSubdomain = "subdomain" + WorkloadFieldSysctls = "sysctls" + WorkloadFieldTTLSecondsAfterFinished = "ttlSecondsAfterFinished" WorkloadFieldTerminationGracePeriodSeconds = "terminationGracePeriodSeconds" WorkloadFieldTransitioning = "transitioning" WorkloadFieldTransitioningMessage = "transitioningMessage" @@ -104,6 +108,7 @@ type Workload struct { PriorityClassName string `json:"priorityClassName,omitempty" yaml:"priorityClassName,omitempty"` ProjectID string `json:"projectId,omitempty" yaml:"projectId,omitempty"` PublicEndpoints []PublicEndpoint `json:"publicEndpoints,omitempty" yaml:"publicEndpoints,omitempty"` + ReadinessGates []PodReadinessGate `json:"readinessGates,omitempty" yaml:"readinessGates,omitempty"` Removed string `json:"removed,omitempty" yaml:"removed,omitempty"` ReplicaSetConfig *ReplicaSetConfig `json:"replicaSetConfig,omitempty" yaml:"replicaSetConfig,omitempty"` ReplicaSetStatus *ReplicaSetStatus `json:"replicaSetStatus,omitempty" yaml:"replicaSetStatus,omitempty"` @@ -112,6 +117,7 @@ type Workload struct { RestartPolicy string `json:"restartPolicy,omitempty" yaml:"restartPolicy,omitempty"` RunAsGroup *int64 `json:"runAsGroup,omitempty" yaml:"runAsGroup,omitempty"` RunAsNonRoot *bool `json:"runAsNonRoot,omitempty" yaml:"runAsNonRoot,omitempty"` + RuntimeClassName string `json:"runtimeClassName,omitempty" yaml:"runtimeClassName,omitempty"` Scale *int64 `json:"scale,omitempty" yaml:"scale,omitempty"` SchedulerName string `json:"schedulerName,omitempty" yaml:"schedulerName,omitempty"` Scheduling *Scheduling `json:"scheduling,omitempty" yaml:"scheduling,omitempty"` @@ -122,6 +128,8 @@ type Workload struct { StatefulSetConfig *StatefulSetConfig `json:"statefulSetConfig,omitempty" yaml:"statefulSetConfig,omitempty"` StatefulSetStatus *StatefulSetStatus `json:"statefulSetStatus,omitempty" yaml:"statefulSetStatus,omitempty"` Subdomain string `json:"subdomain,omitempty" yaml:"subdomain,omitempty"` + Sysctls []Sysctl `json:"sysctls,omitempty" yaml:"sysctls,omitempty"` + TTLSecondsAfterFinished *int64 `json:"ttlSecondsAfterFinished,omitempty" yaml:"ttlSecondsAfterFinished,omitempty"` TerminationGracePeriodSeconds *int64 `json:"terminationGracePeriodSeconds,omitempty" yaml:"terminationGracePeriodSeconds,omitempty"` Transitioning string `json:"transitioning,omitempty" yaml:"transitioning,omitempty"` TransitioningMessage string `json:"transitioningMessage,omitempty" yaml:"transitioningMessage,omitempty"` diff --git a/config/context.go b/config/context.go index a4841d16..9854195c 100644 --- a/config/context.go +++ b/config/context.go @@ -4,7 +4,6 @@ import ( "context" "github.com/rancher/norman/controller" - "github.com/rancher/norman/event" "github.com/rancher/norman/objectclient/dynamic" "github.com/rancher/norman/restwatch" "github.com/rancher/norman/signal" @@ -26,13 +25,10 @@ import ( "github.com/rancher/types/peermanager" "github.com/rancher/types/user" "github.com/sirupsen/logrus" - "k8s.io/api/core/v1" "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset" "k8s.io/apimachinery/pkg/runtime" "k8s.io/client-go/kubernetes" - typedcorev1 "k8s.io/client-go/kubernetes/typed/core/v1" "k8s.io/client-go/rest" - "k8s.io/client-go/tools/record" ) var ( @@ -143,16 +139,12 @@ func (c *ScaledContext) Start(ctx context.Context) error { } type ManagementContext struct { - eventBroadcaster record.EventBroadcaster - ClientGetter proxy.ClientGetter LocalConfig *rest.Config RESTConfig rest.Config UnversionedClient rest.Interface K8sClient kubernetes.Interface APIExtClient clientset.Interface - Events record.EventRecorder - EventLogger event.Logger Schemas *types.Schemas Scheme *runtime.Scheme Dialer dialer.Factory @@ -310,27 +302,12 @@ func NewManagementContext(config rest.Config) (*ManagementContext, error) { managementv3.AddToScheme(context.Scheme) projectv3.AddToScheme(context.Scheme) - context.eventBroadcaster = record.NewBroadcaster() - context.Events = context.eventBroadcaster.NewRecorder(context.Scheme, v1.EventSource{ - Component: "CattleManagementServer", - }) - context.EventLogger = event.NewLogger(context.Events) - return context, err } func (c *ManagementContext) Start(ctx context.Context) error { logrus.Info("Starting management controllers") - watcher := c.eventBroadcaster.StartRecordingToSink(&typedcorev1.EventSinkImpl{ - Interface: c.K8sClient.CoreV1().Events(""), - }) - - go func() { - <-ctx.Done() - watcher.Stop() - }() - return controller.SyncThenStart(ctx, 50, c.controllers()...) } diff --git a/config/dialer/dialer.go b/config/dialer/dialer.go index 07d220eb..a9058195 100644 --- a/config/dialer/dialer.go +++ b/config/dialer/dialer.go @@ -5,7 +5,6 @@ import "net" type Dialer func(network, address string) (net.Conn, error) type Factory interface { - LocalClusterDialer() Dialer ClusterDialer(clusterName string) (Dialer, error) DockerDialer(clusterName, machineName string) (Dialer, error) NodeDialer(clusterName, machineName string) (Dialer, error) diff --git a/go.mod b/go.mod deleted file mode 100644 index ba0e061b..00000000 --- a/go.mod +++ /dev/null @@ -1,12 +0,0 @@ -module github.com/rancher/types - -require ( - github.com/pkg/errors v0.8.0 - github.com/rancher/norman v0.0.0-20181010023203-ad4865987ce7 - github.com/sirupsen/logrus v1.0.4-0.20170822132746-89742aefa4b2 - k8s.io/api v0.0.0-20180621150657-6c0bbc3e58fa - k8s.io/apiextensions-apiserver v0.0.0-20180621165922-80db67131e8d - k8s.io/apimachinery v0.0.0-20180619225948-e386b2658ed2 - k8s.io/client-go v2.0.0-alpha.0.0.20180621152933-b0722d92a7c1+incompatible - k8s.io/gengo v0.0.0-20180223161844-01a732e01d00 -) diff --git a/go.sum b/go.sum deleted file mode 100644 index 9826acf5..00000000 --- a/go.sum +++ /dev/null @@ -1,103 +0,0 @@ -github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973 h1:xJ4a3vCFaGF/jqvzLMYoU8P317H5OQ+Via4RmuPwCS0= -github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= -github.com/davecgh/go-spew v1.1.1-0.20170626231645-782f4967f2dc h1:NlbIJbqL8zjb55Vdrsr5uqyVC6/NoUUd2YrLojfE2zI= -github.com/davecgh/go-spew v1.1.1-0.20170626231645-782f4967f2dc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680 h1:ZktWZesgun21uEDrwW7iEV1zPCGQldM2atlJZ3TdvVM= -github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= -github.com/gogo/protobuf v0.0.0-20170330071051-c0656edd0d9e h1:ago6fNuQ6IhszPsXkeU7qRCyfsIX7L67WDybsAPkLl8= -github.com/gogo/protobuf v0.0.0-20170330071051-c0656edd0d9e/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= -github.com/golang/glog v0.0.0-20141105023935-44145f04b68c h1:CbdkBQ1/PiAo0FYJhQGwASD8wrgNvTdf01g6+O9tNuA= -github.com/golang/glog v0.0.0-20141105023935-44145f04b68c/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= -github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903 h1:LbsanbbD6LieFkXbj9YNNBupiGHJgFeLpO0j0Fza1h8= -github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/protobuf v0.0.0-20171021043952-1643683e1b54 h1:nRNJXiJvemchkOTn0V4U11TZkvacB94gTzbTZbSA7Rw= -github.com/golang/protobuf v0.0.0-20171021043952-1643683e1b54/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.2.0 h1:P3YflyNX/ehuJFLhxviNdFxQPkGK5cDcApsge1SqnvM= -github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/google/gofuzz v0.0.0-20161122191042-44d81051d367 h1:ScAXWS+TR6MZKex+7Z8rneuSJH+FSDqd6ocQyl+ZHo4= -github.com/google/gofuzz v0.0.0-20161122191042-44d81051d367/go.mod h1:HP5RmnzzSNb993RKQDq4+1A4ia9nllfqcQFTQJedwGI= -github.com/googleapis/gnostic v0.0.0-20170729233727-0c5108395e2d h1:7XGaL1e6bYS1yIonGp9761ExpPPV1ui0SAC59Yube9k= -github.com/googleapis/gnostic v0.0.0-20170729233727-0c5108395e2d/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY= -github.com/gorilla/websocket v0.0.0-20150714140627-6eb6ad425a89 h1:f3M+RTnIGEhCF8ynRezzgqxlQ+VBfer6kL61+4/W+v4= -github.com/gorilla/websocket v0.0.0-20150714140627-6eb6ad425a89/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= -github.com/hashicorp/golang-lru v0.0.0-20160207214719-a0d98a5f2880 h1:OaRuzt9oCKNui8cCskZijoKUwe+aCuuCwvx1ox8FNyw= -github.com/hashicorp/golang-lru v0.0.0-20160207214719-a0d98a5f2880/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= -github.com/howeyc/gopass v0.0.0-20170109162249-bf9dde6d0d2c/go.mod h1:lADxMC39cJJqL93Duh1xhAs4I2Zs8mKS89XWXFGp9cs= -github.com/imdario/mergo v0.0.0-20141206190957-6633656539c1/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= -github.com/json-iterator/go v0.0.0-20180612202835-f2b4162afba3 h1:/UewZcckqhvnnS0C6r3Sher2hSEbVmM6Ogpcjen08+Y= -github.com/json-iterator/go v0.0.0-20180612202835-f2b4162afba3/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= -github.com/kr/pretty v0.0.0-20140812000539-f31442d60e51 h1:kGEU5h0EzkNa+B8Q3e0GlaIocJYB1G6ZpefcceXhfgc= -github.com/kr/pretty v0.0.0-20140812000539-f31442d60e51/go.mod h1:Bvhd+E3laJ0AVkG0c9rmtZcnhV0HQ3+c3YxxqTvc/gA= -github.com/kr/text v0.0.0-20130911015532-6807e777504f h1:JaNmHIV9Eby6srQVWuiQ6n8ko2o/lG6udSRCbFZe1fs= -github.com/kr/text v0.0.0-20130911015532-6807e777504f/go.mod h1:sjUstKUATFIcff4qlB53Kml0wQPtJVc/3fWrmuUmcfA= -github.com/maruel/panicparse v1.1.1/go.mod h1:nty42YY5QByNC5MM7q/nj938VbgPU7avs45z6NClpxI= -github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU= -github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= -github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= -github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= -github.com/modern-go/reflect2 v0.0.0-20180320133207-05fbef0ca5da h1:ZQGIPjr1iTtUPXZFk8WShqb5G+Qg65VHFLtSvmHh+Mw= -github.com/modern-go/reflect2 v0.0.0-20180320133207-05fbef0ca5da/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= -github.com/onsi/ginkgo v1.2.1-0.20170318221715-67b9df7f55fe h1:d3gNxYlRvgsR9X/YxcYc0e0wsFAhC6u5zM51TC+o+EA= -github.com/onsi/ginkgo v1.2.1-0.20170318221715-67b9df7f55fe/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/gomega v0.0.0-20160911051023-d59fa0ac68bb h1:myDTJUQm/UVMeOHuw47rGP+3Id5b0s0T7EVl71ZweuI= -github.com/onsi/gomega v0.0.0-20160911051023-d59fa0ac68bb/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= -github.com/pkg/errors v0.8.0 h1:WdK/asTD0HN+q6hsWO3/vpuAkAr+tw6aNJNDFFf0+qw= -github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pmezard/go-difflib v0.0.0-20151028094244-d8ed2627bdf0 h1:GD+A8+e+wFkqje55/2fOVnZPkoDIu1VooBWfNrnY8Uo= -github.com/pmezard/go-difflib v0.0.0-20151028094244-d8ed2627bdf0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/prometheus/client_golang v0.8.0 h1:1921Yw9Gc3iSc4VQh3PIoOqgPCZS7G/4xQNVUp8Mda8= -github.com/prometheus/client_golang v0.8.0/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= -github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910 h1:idejC8f05m9MGOsuEi1ATq9shN03HrxNkD/luQvxCv8= -github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= -github.com/prometheus/common v0.0.0-20180801064454-c7de2306084e h1:n/3MEhJQjQxrOUCzh1Y3Re6aJUUWRp2M9+Oc3eVn/54= -github.com/prometheus/common v0.0.0-20180801064454-c7de2306084e/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= -github.com/prometheus/procfs v0.0.0-20180920065004-418d78d0b9a7 h1:NgR6WN8nQ4SmFC1sSUHY8SriLuWCZ6cCIQtH4vDZN3c= -github.com/prometheus/procfs v0.0.0-20180920065004-418d78d0b9a7/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= -github.com/rancher/norman v0.0.0-20180925184916-d675dc79491f h1:HFqoOtSaJM2IV0W0MeqvmA0GUyWlO1AKQZm14K4GN8M= -github.com/rancher/norman v0.0.0-20180925184916-d675dc79491f/go.mod h1:RG+lnsTkAH0hpXtWUsBSmrgPrPccC2z6d7M1m/grjO0= -github.com/rancher/norman v0.0.0-20181010023203-ad4865987ce7 h1:sLQZP5VDqDjctHvTsxMsfrtACC+eooWOySuWgLd6q/8= -github.com/rancher/norman v0.0.0-20181010023203-ad4865987ce7/go.mod h1:hIlaLlQ+ZVmTY/Hv+JBmxKLhJzOpHbG2IhaWOkHaSBs= -github.com/sirupsen/logrus v1.0.4-0.20170822132746-89742aefa4b2 h1:a07zp0wovcAE2jH+wlD22JLqUH6Rdl8Aon+NiyPxE+0= -github.com/sirupsen/logrus v1.0.4-0.20170822132746-89742aefa4b2/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc= -github.com/spf13/pflag v1.0.1 h1:aCvUg6QPl3ibpQUxyLkrEkCHtPqYJL4x9AuhqVqFis4= -github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= -github.com/stretchr/testify v1.1.5-0.20170601210322-f6abca593680 h1:oAXco1Ts88F75L1qvG3BAa4ChXI3EZDfxbB+p+y8+gE= -github.com/stretchr/testify v1.1.5-0.20170601210322-f6abca593680/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= -golang.org/x/crypto v0.0.0-20170825220121-81e90905daef h1:R8ubLIilYRXIXpgjOg2l/ECVs3HzVKIjJEhxSsQ91u4= -golang.org/x/crypto v0.0.0-20170825220121-81e90905daef/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/net v0.0.0-20170809000501-1c05540f6879 h1:0rFa7EaCGdQPmZVbo9F7MNF65b8dyzS6EUnXjs9Cllk= -golang.org/x/net v0.0.0-20170809000501-1c05540f6879/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f h1:wMNYb4v58l5UBM7MYRLPG6ZhfOqbKu7X5eyFl8ZhKvA= -golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sys v0.0.0-20171031081856-95c657629925 h1:nCH33NboKIsT4HoXBsXTWX8ul303HxWgkc5s2Ezwacg= -golang.org/x/sys v0.0.0-20171031081856-95c657629925/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/text v0.0.0-20170810154203-b19bf474d317 h1:WKW+OPdYPlvOTVGHuMfjnIC6yY2SI93yFB0pZ7giBmQ= -golang.org/x/text v0.0.0-20170810154203-b19bf474d317/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/time v0.0.0-20161028155119-f51c12702a4d h1:TnM+PKb3ylGmZvyPXmo9m/wktg7Jn/a/fNmr33HSj8g= -golang.org/x/time v0.0.0-20161028155119-f51c12702a4d/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/tools v0.0.0-20170428054726-2382e3994d48 h1:Al/HKLBwsMBsWhxa71LOWO8MeCbD21L+x5rHb83JHjI= -golang.org/x/tools v0.0.0-20170428054726-2382e3994d48/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -gopkg.in/airbrake/gobrake.v2 v2.0.9 h1:7z2uVWwn7oVeeugY1DtlPAy5H+KYgB1KeKTnqjNatLo= -gopkg.in/airbrake/gobrake.v2 v2.0.9/go.mod h1:/h5ZAUhDkGaJfjzjKLSjv6zCL6O0LLBxU4K+aSYdM/U= -gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY= -gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/gemnasium/logrus-airbrake-hook.v2 v2.1.2 h1:OAj3g0cR6Dx/R07QgQe8wkA9RNjB2u4i700xBkIT4e0= -gopkg.in/gemnasium/logrus-airbrake-hook.v2 v2.1.2/go.mod h1:Xk6kEKp8OKb+X14hQBKWaSkCsqBpgog8nAV2xsGOxlo= -gopkg.in/inf.v0 v0.9.0 h1:3zYtXIO92bvsdS3ggAdA8Gb4Azj0YU+TVY1uGYNFA8o= -gopkg.in/inf.v0 v0.9.0/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= -gopkg.in/yaml.v1 v1.0.0-20140924161607-9f9df34309c0/go.mod h1:WDnlLJ4WF5VGsH/HVa3CI79GS0ol3YnhVnKP89i0kNg= -gopkg.in/yaml.v2 v2.0.0-20170721113624-670d4cfef054 h1:ROF+R/wHHruzF40n5DfPv2jwm7rCJwvs8fz+RTZWjLE= -gopkg.in/yaml.v2 v2.0.0-20170721113624-670d4cfef054/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= -k8s.io/api v0.0.0-20180621150657-6c0bbc3e58fa h1:FdiZyyrmQXY7AWCNUfAJrx9UCjMF/oBNZP8CmKoc2aU= -k8s.io/api v0.0.0-20180621150657-6c0bbc3e58fa/go.mod h1:iuAfoD4hCxJ8Onx9kaTIt30j7jUFS00AXQi6QMi99vA= -k8s.io/apiextensions-apiserver v0.0.0-20180621165922-80db67131e8d h1:QYxqxjF8LG0fEp4lhpDj4zGXaO1EAbt97/3vqVL0dpk= -k8s.io/apiextensions-apiserver v0.0.0-20180621165922-80db67131e8d/go.mod h1:IxkesAMoaCRoLrPJdZNZUQp9NfZnzqaVzLhb2VEQzXE= -k8s.io/apimachinery v0.0.0-20180619225948-e386b2658ed2 h1:NJEj7o7SKxpURej3uJ1QZJZCeRlRj21EatnCK65nrB4= -k8s.io/apimachinery v0.0.0-20180619225948-e386b2658ed2/go.mod h1:ccL7Eh7zubPUSh9A3USN90/OzHNSVN6zxzde07TDCL0= -k8s.io/client-go v2.0.0-alpha.0.0.20180621152933-b0722d92a7c1+incompatible h1:lph8g2o3QoQdw5W+fKHD/+Td4MEN2dmXgAjoOH5aISo= -k8s.io/client-go v2.0.0-alpha.0.0.20180621152933-b0722d92a7c1+incompatible/go.mod h1:7vJpHMYJwNQCWgzmNV+VYUl1zCObLyodBc8nIyt8L5s= -k8s.io/gengo v0.0.0-20180223161844-01a732e01d00 h1:vt4Sh/+HFnLoTScgFLNoMjNqOg0sQgAzViarcz+UX3Q= -k8s.io/gengo v0.0.0-20180223161844-01a732e01d00/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= -k8s.io/kube-openapi v0.0.0-20180509051136-39cb288412c4 h1:gW+EUB2I96nbxVenV/8ctfbACsHP+yxlT2dhMCsiy+s= -k8s.io/kube-openapi v0.0.0-20180509051136-39cb288412c4/go.mod h1:BXM9ceUBTj2QnfH2MK1odQs778ajze1RxcmP6S8RVVc= -k8s.io/kubernetes v1.10.5/go.mod h1:ocZa8+6APFNC2tX1DZASIbocyYT5jHzqFVsY5aoB7Jk= diff --git a/vendor.conf b/vendor.conf new file mode 100644 index 00000000..f5fad8ee --- /dev/null +++ b/vendor.conf @@ -0,0 +1,5 @@ +# package +github.com/rancher/types + +github.com/pkg/errors v0.8.0 +github.com/rancher/norman 4c3df5a3de57701e5ff57e13b45a3b219b5d9d33 transitive=true diff --git a/vendor/github.com/beorn7/perks/.gitignore b/vendor/github.com/beorn7/perks/.gitignore new file mode 100644 index 00000000..1bd9209a --- /dev/null +++ b/vendor/github.com/beorn7/perks/.gitignore @@ -0,0 +1,2 @@ +*.test +*.prof diff --git a/vendor/github.com/beorn7/perks/README.md b/vendor/github.com/beorn7/perks/README.md new file mode 100644 index 00000000..fc057777 --- /dev/null +++ b/vendor/github.com/beorn7/perks/README.md @@ -0,0 +1,31 @@ +# Perks for Go (golang.org) + +Perks contains the Go package quantile that computes approximate quantiles over +an unbounded data stream within low memory and CPU bounds. + +For more information and examples, see: +http://godoc.org/github.com/bmizerany/perks + +A very special thank you and shout out to Graham Cormode (Rutgers University), +Flip Korn (AT&T Labs–Research), S. Muthukrishnan (Rutgers University), and +Divesh Srivastava (AT&T Labs–Research) for their research and publication of +[Effective Computation of Biased Quantiles over Data Streams](http://www.cs.rutgers.edu/~muthu/bquant.pdf) + +Thank you, also: +* Armon Dadgar (@armon) +* Andrew Gerrand (@nf) +* Brad Fitzpatrick (@bradfitz) +* Keith Rarick (@kr) + +FAQ: + +Q: Why not move the quantile package into the project root? +A: I want to add more packages to perks later. + +Copyright (C) 2013 Blake Mizerany + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/beorn7/perks/quantile/stream.go b/vendor/github.com/beorn7/perks/quantile/stream.go index d7d14f8e..587b1fc5 100644 --- a/vendor/github.com/beorn7/perks/quantile/stream.go +++ b/vendor/github.com/beorn7/perks/quantile/stream.go @@ -77,20 +77,15 @@ func NewHighBiased(epsilon float64) *Stream { // is guaranteed to be within (Quantile±Epsilon). // // See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error properties. -func NewTargeted(targetMap map[float64]float64) *Stream { - // Convert map to slice to avoid slow iterations on a map. - // ƒ is called on the hot path, so converting the map to a slice - // beforehand results in significant CPU savings. - targets := targetMapToSlice(targetMap) - +func NewTargeted(targets map[float64]float64) *Stream { ƒ := func(s *stream, r float64) float64 { var m = math.MaxFloat64 var f float64 - for _, t := range targets { - if t.quantile*s.n <= r { - f = (2 * t.epsilon * r) / t.quantile + for quantile, epsilon := range targets { + if quantile*s.n <= r { + f = (2 * epsilon * r) / quantile } else { - f = (2 * t.epsilon * (s.n - r)) / (1 - t.quantile) + f = (2 * epsilon * (s.n - r)) / (1 - quantile) } if f < m { m = f @@ -101,25 +96,6 @@ func NewTargeted(targetMap map[float64]float64) *Stream { return newStream(ƒ) } -type target struct { - quantile float64 - epsilon float64 -} - -func targetMapToSlice(targetMap map[float64]float64) []target { - targets := make([]target, 0, len(targetMap)) - - for quantile, epsilon := range targetMap { - t := target{ - quantile: quantile, - epsilon: epsilon, - } - targets = append(targets, t) - } - - return targets -} - // Stream computes quantiles for a stream of float64s. It is not thread-safe by // design. Take care when using across multiple goroutines. type Stream struct { @@ -157,7 +133,7 @@ func (s *Stream) Query(q float64) float64 { if l == 0 { return 0 } - i := int(math.Ceil(float64(l) * q)) + i := int(float64(l) * q) if i > 0 { i -= 1 } diff --git a/vendor/github.com/davecgh/go-spew/.gitignore b/vendor/github.com/davecgh/go-spew/.gitignore new file mode 100644 index 00000000..00268614 --- /dev/null +++ b/vendor/github.com/davecgh/go-spew/.gitignore @@ -0,0 +1,22 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe diff --git a/vendor/github.com/davecgh/go-spew/.travis.yml b/vendor/github.com/davecgh/go-spew/.travis.yml new file mode 100644 index 00000000..984e0736 --- /dev/null +++ b/vendor/github.com/davecgh/go-spew/.travis.yml @@ -0,0 +1,14 @@ +language: go +go: + - 1.5.4 + - 1.6.3 + - 1.7 +install: + - go get -v golang.org/x/tools/cmd/cover +script: + - go test -v -tags=safe ./spew + - go test -v -tags=testcgo ./spew -covermode=count -coverprofile=profile.cov +after_success: + - go get -v github.com/mattn/goveralls + - export PATH=$PATH:$HOME/gopath/bin + - goveralls -coverprofile=profile.cov -service=travis-ci diff --git a/vendor/github.com/davecgh/go-spew/README.md b/vendor/github.com/davecgh/go-spew/README.md new file mode 100644 index 00000000..26243044 --- /dev/null +++ b/vendor/github.com/davecgh/go-spew/README.md @@ -0,0 +1,205 @@ +go-spew +======= + +[![Build Status](https://img.shields.io/travis/davecgh/go-spew.svg)] +(https://travis-ci.org/davecgh/go-spew) [![ISC License] +(http://img.shields.io/badge/license-ISC-blue.svg)](http://copyfree.org) [![Coverage Status] +(https://img.shields.io/coveralls/davecgh/go-spew.svg)] +(https://coveralls.io/r/davecgh/go-spew?branch=master) + + +Go-spew implements a deep pretty printer for Go data structures to aid in +debugging. A comprehensive suite of tests with 100% test coverage is provided +to ensure proper functionality. See `test_coverage.txt` for the gocov coverage +report. Go-spew is licensed under the liberal ISC license, so it may be used in +open source or commercial projects. + +If you're interested in reading about how this package came to life and some +of the challenges involved in providing a deep pretty printer, there is a blog +post about it +[here](https://web.archive.org/web/20160304013555/https://blog.cyphertite.com/go-spew-a-journey-into-dumping-go-data-structures/). + +## Documentation + +[![GoDoc](https://img.shields.io/badge/godoc-reference-blue.svg)] +(http://godoc.org/github.com/davecgh/go-spew/spew) + +Full `go doc` style documentation for the project can be viewed online without +installing this package by using the excellent GoDoc site here: +http://godoc.org/github.com/davecgh/go-spew/spew + +You can also view the documentation locally once the package is installed with +the `godoc` tool by running `godoc -http=":6060"` and pointing your browser to +http://localhost:6060/pkg/github.com/davecgh/go-spew/spew + +## Installation + +```bash +$ go get -u github.com/davecgh/go-spew/spew +``` + +## Quick Start + +Add this import line to the file you're working in: + +```Go +import "github.com/davecgh/go-spew/spew" +``` + +To dump a variable with full newlines, indentation, type, and pointer +information use Dump, Fdump, or Sdump: + +```Go +spew.Dump(myVar1, myVar2, ...) +spew.Fdump(someWriter, myVar1, myVar2, ...) +str := spew.Sdump(myVar1, myVar2, ...) +``` + +Alternatively, if you would prefer to use format strings with a compacted inline +printing style, use the convenience wrappers Printf, Fprintf, etc with %v (most +compact), %+v (adds pointer addresses), %#v (adds types), or %#+v (adds types +and pointer addresses): + +```Go +spew.Printf("myVar1: %v -- myVar2: %+v", myVar1, myVar2) +spew.Printf("myVar3: %#v -- myVar4: %#+v", myVar3, myVar4) +spew.Fprintf(someWriter, "myVar1: %v -- myVar2: %+v", myVar1, myVar2) +spew.Fprintf(someWriter, "myVar3: %#v -- myVar4: %#+v", myVar3, myVar4) +``` + +## Debugging a Web Application Example + +Here is an example of how you can use `spew.Sdump()` to help debug a web application. Please be sure to wrap your output using the `html.EscapeString()` function for safety reasons. You should also only use this debugging technique in a development environment, never in production. + +```Go +package main + +import ( + "fmt" + "html" + "net/http" + + "github.com/davecgh/go-spew/spew" +) + +func handler(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "text/html") + fmt.Fprintf(w, "Hi there, %s!", r.URL.Path[1:]) + fmt.Fprintf(w, "") +} + +func main() { + http.HandleFunc("/", handler) + http.ListenAndServe(":8080", nil) +} +``` + +## Sample Dump Output + +``` +(main.Foo) { + unexportedField: (*main.Bar)(0xf84002e210)({ + flag: (main.Flag) flagTwo, + data: (uintptr) + }), + ExportedField: (map[interface {}]interface {}) { + (string) "one": (bool) true + } +} +([]uint8) { + 00000000 11 12 13 14 15 16 17 18 19 1a 1b 1c 1d 1e 1f 20 |............... | + 00000010 21 22 23 24 25 26 27 28 29 2a 2b 2c 2d 2e 2f 30 |!"#$%&'()*+,-./0| + 00000020 31 32 |12| +} +``` + +## Sample Formatter Output + +Double pointer to a uint8: +``` + %v: <**>5 + %+v: <**>(0xf8400420d0->0xf8400420c8)5 + %#v: (**uint8)5 + %#+v: (**uint8)(0xf8400420d0->0xf8400420c8)5 +``` + +Pointer to circular struct with a uint8 field and a pointer to itself: +``` + %v: <*>{1 <*>} + %+v: <*>(0xf84003e260){ui8:1 c:<*>(0xf84003e260)} + %#v: (*main.circular){ui8:(uint8)1 c:(*main.circular)} + %#+v: (*main.circular)(0xf84003e260){ui8:(uint8)1 c:(*main.circular)(0xf84003e260)} +``` + +## Configuration Options + +Configuration of spew is handled by fields in the ConfigState type. For +convenience, all of the top-level functions use a global state available via the +spew.Config global. + +It is also possible to create a ConfigState instance that provides methods +equivalent to the top-level functions. This allows concurrent configuration +options. See the ConfigState documentation for more details. + +``` +* Indent + String to use for each indentation level for Dump functions. + It is a single space by default. A popular alternative is "\t". + +* MaxDepth + Maximum number of levels to descend into nested data structures. + There is no limit by default. + +* DisableMethods + Disables invocation of error and Stringer interface methods. + Method invocation is enabled by default. + +* DisablePointerMethods + Disables invocation of error and Stringer interface methods on types + which only accept pointer receivers from non-pointer variables. This option + relies on access to the unsafe package, so it will not have any effect when + running in environments without access to the unsafe package such as Google + App Engine or with the "safe" build tag specified. + Pointer method invocation is enabled by default. + +* DisablePointerAddresses + DisablePointerAddresses specifies whether to disable the printing of + pointer addresses. This is useful when diffing data structures in tests. + +* DisableCapacities + DisableCapacities specifies whether to disable the printing of capacities + for arrays, slices, maps and channels. This is useful when diffing data + structures in tests. + +* ContinueOnMethod + Enables recursion into types after invoking error and Stringer interface + methods. Recursion after method invocation is disabled by default. + +* SortKeys + Specifies map keys should be sorted before being printed. Use + this to have a more deterministic, diffable output. Note that + only native types (bool, int, uint, floats, uintptr and string) + and types which implement error or Stringer interfaces are supported, + with other types sorted according to the reflect.Value.String() output + which guarantees display stability. Natural map order is used by + default. + +* SpewKeys + SpewKeys specifies that, as a last resort attempt, map keys should be + spewed to strings and sorted by those strings. This is only considered + if SortKeys is true. + +``` + +## Unsafe Package Dependency + +This package relies on the unsafe package to perform some of the more advanced +features, however it also supports a "limited" mode which allows it to work in +environments where the unsafe package is not available. By default, it will +operate in this mode on Google App Engine and when compiled with GopherJS. The +"safe" build tag may also be specified to force the package to build without +using the unsafe package. + +## License + +Go-spew is licensed under the [copyfree](http://copyfree.org) ISC License. diff --git a/vendor/github.com/davecgh/go-spew/cov_report.sh b/vendor/github.com/davecgh/go-spew/cov_report.sh new file mode 100644 index 00000000..9579497e --- /dev/null +++ b/vendor/github.com/davecgh/go-spew/cov_report.sh @@ -0,0 +1,22 @@ +#!/bin/sh + +# This script uses gocov to generate a test coverage report. +# The gocov tool my be obtained with the following command: +# go get github.com/axw/gocov/gocov +# +# It will be installed to $GOPATH/bin, so ensure that location is in your $PATH. + +# Check for gocov. +if ! type gocov >/dev/null 2>&1; then + echo >&2 "This script requires the gocov tool." + echo >&2 "You may obtain it with the following command:" + echo >&2 "go get github.com/axw/gocov/gocov" + exit 1 +fi + +# Only run the cgo tests if gcc is installed. +if type gcc >/dev/null 2>&1; then + (cd spew && gocov test -tags testcgo | gocov report) +else + (cd spew && gocov test | gocov report) +fi diff --git a/vendor/github.com/davecgh/go-spew/test_coverage.txt b/vendor/github.com/davecgh/go-spew/test_coverage.txt new file mode 100644 index 00000000..2cd087a2 --- /dev/null +++ b/vendor/github.com/davecgh/go-spew/test_coverage.txt @@ -0,0 +1,61 @@ + +github.com/davecgh/go-spew/spew/dump.go dumpState.dump 100.00% (88/88) +github.com/davecgh/go-spew/spew/format.go formatState.format 100.00% (82/82) +github.com/davecgh/go-spew/spew/format.go formatState.formatPtr 100.00% (52/52) +github.com/davecgh/go-spew/spew/dump.go dumpState.dumpPtr 100.00% (44/44) +github.com/davecgh/go-spew/spew/dump.go dumpState.dumpSlice 100.00% (39/39) +github.com/davecgh/go-spew/spew/common.go handleMethods 100.00% (30/30) +github.com/davecgh/go-spew/spew/common.go printHexPtr 100.00% (18/18) +github.com/davecgh/go-spew/spew/common.go unsafeReflectValue 100.00% (13/13) +github.com/davecgh/go-spew/spew/format.go formatState.constructOrigFormat 100.00% (12/12) +github.com/davecgh/go-spew/spew/dump.go fdump 100.00% (11/11) +github.com/davecgh/go-spew/spew/format.go formatState.Format 100.00% (11/11) +github.com/davecgh/go-spew/spew/common.go init 100.00% (10/10) +github.com/davecgh/go-spew/spew/common.go printComplex 100.00% (9/9) +github.com/davecgh/go-spew/spew/common.go valuesSorter.Less 100.00% (8/8) +github.com/davecgh/go-spew/spew/format.go formatState.buildDefaultFormat 100.00% (7/7) +github.com/davecgh/go-spew/spew/format.go formatState.unpackValue 100.00% (5/5) +github.com/davecgh/go-spew/spew/dump.go dumpState.indent 100.00% (4/4) +github.com/davecgh/go-spew/spew/common.go catchPanic 100.00% (4/4) +github.com/davecgh/go-spew/spew/config.go ConfigState.convertArgs 100.00% (4/4) +github.com/davecgh/go-spew/spew/spew.go convertArgs 100.00% (4/4) +github.com/davecgh/go-spew/spew/format.go newFormatter 100.00% (3/3) +github.com/davecgh/go-spew/spew/dump.go Sdump 100.00% (3/3) +github.com/davecgh/go-spew/spew/common.go printBool 100.00% (3/3) +github.com/davecgh/go-spew/spew/common.go sortValues 100.00% (3/3) +github.com/davecgh/go-spew/spew/config.go ConfigState.Sdump 100.00% (3/3) +github.com/davecgh/go-spew/spew/dump.go dumpState.unpackValue 100.00% (3/3) +github.com/davecgh/go-spew/spew/spew.go Printf 100.00% (1/1) +github.com/davecgh/go-spew/spew/spew.go Println 100.00% (1/1) +github.com/davecgh/go-spew/spew/spew.go Sprint 100.00% (1/1) +github.com/davecgh/go-spew/spew/spew.go Sprintf 100.00% (1/1) +github.com/davecgh/go-spew/spew/spew.go Sprintln 100.00% (1/1) +github.com/davecgh/go-spew/spew/common.go printFloat 100.00% (1/1) +github.com/davecgh/go-spew/spew/config.go NewDefaultConfig 100.00% (1/1) +github.com/davecgh/go-spew/spew/common.go printInt 100.00% (1/1) +github.com/davecgh/go-spew/spew/common.go printUint 100.00% (1/1) +github.com/davecgh/go-spew/spew/common.go valuesSorter.Len 100.00% (1/1) +github.com/davecgh/go-spew/spew/common.go valuesSorter.Swap 100.00% (1/1) +github.com/davecgh/go-spew/spew/config.go ConfigState.Errorf 100.00% (1/1) +github.com/davecgh/go-spew/spew/config.go ConfigState.Fprint 100.00% (1/1) +github.com/davecgh/go-spew/spew/config.go ConfigState.Fprintf 100.00% (1/1) +github.com/davecgh/go-spew/spew/config.go ConfigState.Fprintln 100.00% (1/1) +github.com/davecgh/go-spew/spew/config.go ConfigState.Print 100.00% (1/1) +github.com/davecgh/go-spew/spew/config.go ConfigState.Printf 100.00% (1/1) +github.com/davecgh/go-spew/spew/config.go ConfigState.Println 100.00% (1/1) +github.com/davecgh/go-spew/spew/config.go ConfigState.Sprint 100.00% (1/1) +github.com/davecgh/go-spew/spew/config.go ConfigState.Sprintf 100.00% (1/1) +github.com/davecgh/go-spew/spew/config.go ConfigState.Sprintln 100.00% (1/1) +github.com/davecgh/go-spew/spew/config.go ConfigState.NewFormatter 100.00% (1/1) +github.com/davecgh/go-spew/spew/config.go ConfigState.Fdump 100.00% (1/1) +github.com/davecgh/go-spew/spew/config.go ConfigState.Dump 100.00% (1/1) +github.com/davecgh/go-spew/spew/dump.go Fdump 100.00% (1/1) +github.com/davecgh/go-spew/spew/dump.go Dump 100.00% (1/1) +github.com/davecgh/go-spew/spew/spew.go Fprintln 100.00% (1/1) +github.com/davecgh/go-spew/spew/format.go NewFormatter 100.00% (1/1) +github.com/davecgh/go-spew/spew/spew.go Errorf 100.00% (1/1) +github.com/davecgh/go-spew/spew/spew.go Fprint 100.00% (1/1) +github.com/davecgh/go-spew/spew/spew.go Fprintf 100.00% (1/1) +github.com/davecgh/go-spew/spew/spew.go Print 100.00% (1/1) +github.com/davecgh/go-spew/spew ------------------------------- 100.00% (505/505) + diff --git a/vendor/github.com/gogo/protobuf/.gitignore b/vendor/github.com/gogo/protobuf/.gitignore new file mode 100644 index 00000000..76009479 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/.gitignore @@ -0,0 +1,3 @@ +._* +*.js +*.js.map diff --git a/vendor/github.com/gogo/protobuf/.mailmap b/vendor/github.com/gogo/protobuf/.mailmap new file mode 100644 index 00000000..bc001021 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/.mailmap @@ -0,0 +1,8 @@ +Walter Schulze Walter Schulze +Walter Schulze +Walter Schulze awalterschulze +Walter Schulze awalterschulze@gmail.com +John Tuley +Anton Povarov +Denis Smirnov dennwc +DongYun Kang \ No newline at end of file diff --git a/vendor/github.com/gogo/protobuf/.travis.yml b/vendor/github.com/gogo/protobuf/.travis.yml new file mode 100644 index 00000000..5ac9a939 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/.travis.yml @@ -0,0 +1,24 @@ +env: + - PROTOBUF_VERSION=2.6.1 + - PROTOBUF_VERSION=3.0.2 + - PROTOBUF_VERSION=3.2.0 + +before_install: + - ./install-protobuf.sh + - PATH=/home/travis/bin:$PATH protoc --version + +script: + - PATH=/home/travis/bin:$PATH make buildserverall + - echo $TRAVIS_GO_VERSION + - if [ "$TRAVIS_GO_VERSION" == 1.8 ] && [[ "$PROTOBUF_VERSION" == 3.2.0 ]]; then ! git status --porcelain | read || (git status; git diff; exit 1); fi + +language: go + +go: + - 1.6.3 + - 1.7.1 + - 1.8 + +matrix: + allow_failures: + - go: 1.6.3 diff --git a/vendor/github.com/gogo/protobuf/GOLANG_CONTRIBUTORS b/vendor/github.com/gogo/protobuf/GOLANG_CONTRIBUTORS new file mode 100644 index 00000000..b368efb7 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/GOLANG_CONTRIBUTORS @@ -0,0 +1,5 @@ +The contributors to the Go protobuf repository: + +# This source code was written by the Go contributors. +# The master list of contributors is in the main Go distribution, +# visible at http://tip.golang.org/CONTRIBUTORS. \ No newline at end of file diff --git a/vendor/github.com/gogo/protobuf/Makefile b/vendor/github.com/gogo/protobuf/Makefile new file mode 100644 index 00000000..0dcb4ab7 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/Makefile @@ -0,0 +1,154 @@ +# Protocol Buffers for Go with Gadgets +# +# Copyright (c) 2013, The GoGo Authors. All rights reserved. +# http://github.com/gogo/protobuf +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +GO_VERSION:=$(shell go version) + +.PHONY: nuke regenerate tests clean install gofmt vet contributors + +all: clean install regenerate install tests errcheck vet + +buildserverall: clean install regenerate install tests vet js + +install: + go install ./proto + go install ./gogoproto + go install ./jsonpb + go install ./protoc-gen-gogo + go install ./protoc-gen-gofast + go install ./protoc-gen-gogofast + go install ./protoc-gen-gogofaster + go install ./protoc-gen-gogoslick + go install ./protoc-gen-gostring + go install ./protoc-min-version + go install ./protoc-gen-combo + go install ./gogoreplace + +clean: + go clean ./... + +nuke: + go clean -i ./... + +gofmt: + gofmt -l -s -w . + +regenerate: + make -C protoc-gen-gogo/descriptor regenerate + make -C protoc-gen-gogo/plugin regenerate + make -C protoc-gen-gogo/testdata regenerate + make -C gogoproto regenerate + make -C proto/testdata regenerate + make -C jsonpb/jsonpb_test_proto regenerate + make -C _conformance regenerate + make -C types regenerate + make -C test regenerate + make -C test/example regenerate + make -C test/unrecognized regenerate + make -C test/group regenerate + make -C test/unrecognizedgroup regenerate + make -C test/enumstringer regenerate + make -C test/unmarshalmerge regenerate + make -C test/moredefaults regenerate + make -C test/issue8 regenerate + make -C test/enumprefix regenerate + make -C test/enumcustomname regenerate + make -C test/packed regenerate + make -C test/protosize regenerate + make -C test/tags regenerate + make -C test/oneof regenerate + make -C test/oneof3 regenerate + make -C test/theproto3 regenerate + make -C test/mapsproto2 regenerate + make -C test/issue42order regenerate + make -C proto generate-test-pbs + make -C test/importdedup regenerate + make -C test/custombytesnonstruct regenerate + make -C test/required regenerate + make -C test/casttype regenerate + make -C test/castvalue regenerate + make -C vanity/test regenerate + make -C test/sizeunderscore regenerate + make -C test/issue34 regenerate + make -C test/empty-issue70 regenerate + make -C test/indeximport-issue72 regenerate + make -C test/fuzztests regenerate + make -C test/oneofembed regenerate + make -C test/asymetric-issue125 regenerate + make -C test/filedotname regenerate + make -C test/nopackage regenerate + make -C test/types regenerate + make -C test/proto3extension regenerate + make -C test/stdtypes regenerate + make -C test/data regenerate + make -C test/typedecl regenerate + make -C test/issue260 regenerate + make -C test/issue261 regenerate + make -C test/issue262 regenerate + make -C test/enumdecl regenerate + make -C test/typedecl_all regenerate + make -C test/enumdecl_all regenerate + make gofmt + +tests: + go build ./test/enumprefix + go test ./... + +vet: + go vet ./... + go tool vet --shadow . + +errcheck: + go get github.com/kisielk/errcheck + errcheck ./test/... + +drone: + sudo apt-get install protobuf-compiler + (cd $(GOPATH)/src/github.com/gogo/protobuf && make buildserverall) + +testall: + go get -u github.com/golang/protobuf/proto + make -C protoc-gen-gogo/testdata test + make -C vanity/test test + make -C test/registration test + make tests + +bench: + (cd test/mixbench && go build .) + (cd test/mixbench && ./mixbench) + +contributors: + git log --format='%aN <%aE>' | sort -fu > CONTRIBUTORS + +js: +ifeq (go1.8, $(findstring go1.8, $(GO_VERSION))) + go get github.com/gopherjs/gopherjs + gopherjs build github.com/gogo/protobuf/protoc-gen-gogo +endif + +update: + (cd protobuf && make update) diff --git a/vendor/github.com/gogo/protobuf/README b/vendor/github.com/gogo/protobuf/README new file mode 100644 index 00000000..0ad51363 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/README @@ -0,0 +1,258 @@ +GoGoProtobuf http://github.com/gogo/protobuf extends +GoProtobuf http://github.com/golang/protobuf + +# Go support for Protocol Buffers + +Google's data interchange format. +Copyright 2010 The Go Authors. +https://github.com/golang/protobuf + +This package and the code it generates requires at least Go 1.4. + +This software implements Go bindings for protocol buffers. For +information about protocol buffers themselves, see + https://developers.google.com/protocol-buffers/ + +## Installation ## + +To use this software, you must: +- Install the standard C++ implementation of protocol buffers from + https://developers.google.com/protocol-buffers/ +- Of course, install the Go compiler and tools from + https://golang.org/ + See + https://golang.org/doc/install + for details or, if you are using gccgo, follow the instructions at + https://golang.org/doc/install/gccgo +- Grab the code from the repository and install the proto package. + The simplest way is to run `go get -u github.com/golang/protobuf/{proto,protoc-gen-go}`. + The compiler plugin, protoc-gen-go, will be installed in $GOBIN, + defaulting to $GOPATH/bin. It must be in your $PATH for the protocol + compiler, protoc, to find it. + +This software has two parts: a 'protocol compiler plugin' that +generates Go source files that, once compiled, can access and manage +protocol buffers; and a library that implements run-time support for +encoding (marshaling), decoding (unmarshaling), and accessing protocol +buffers. + +There is support for gRPC in Go using protocol buffers. +See the note at the bottom of this file for details. + +There are no insertion points in the plugin. + +GoGoProtobuf provides extensions for protocol buffers and GoProtobuf +see http://github.com/gogo/protobuf/gogoproto/doc.go + +## Using protocol buffers with Go ## + +Once the software is installed, there are two steps to using it. +First you must compile the protocol buffer definitions and then import +them, with the support library, into your program. + +To compile the protocol buffer definition, run protoc with the --gogo_out +parameter set to the directory you want to output the Go code to. + + protoc --gogo_out=. *.proto + +The generated files will be suffixed .pb.go. See the Test code below +for an example using such a file. + +The package comment for the proto library contains text describing +the interface provided in Go for protocol buffers. Here is an edited +version. + +If you are using any gogo.proto extensions you will need to specify the +proto_path to include the descriptor.proto and gogo.proto. +gogo.proto is located in github.com/gogo/protobuf/gogoproto +This should be fine, since your import is the same. +descriptor.proto is located in either github.com/gogo/protobuf/protobuf +or code.google.com/p/protobuf/trunk/src/ +Its import is google/protobuf/descriptor.proto so it might need some help. + + protoc --gogo_out=. -I=.:github.com/gogo/protobuf/protobuf *.proto + +========== + +The proto package converts data structures to and from the +wire format of protocol buffers. It works in concert with the +Go source code generated for .proto files by the protocol compiler. + +A summary of the properties of the protocol buffer interface +for a protocol buffer variable v: + + - Names are turned from camel_case to CamelCase for export. + - There are no methods on v to set fields; just treat + them as structure fields. + - There are getters that return a field's value if set, + and return the field's default value if unset. + The getters work even if the receiver is a nil message. + - The zero value for a struct is its correct initialization state. + All desired fields must be set before marshaling. + - A Reset() method will restore a protobuf struct to its zero state. + - Non-repeated fields are pointers to the values; nil means unset. + That is, optional or required field int32 f becomes F *int32. + - Repeated fields are slices. + - Helper functions are available to aid the setting of fields. + Helpers for getting values are superseded by the + GetFoo methods and their use is deprecated. + msg.Foo = proto.String("hello") // set field + - Constants are defined to hold the default values of all fields that + have them. They have the form Default_StructName_FieldName. + Because the getter methods handle defaulted values, + direct use of these constants should be rare. + - Enums are given type names and maps from names to values. + Enum values are prefixed with the enum's type name. Enum types have + a String method, and a Enum method to assist in message construction. + - Nested groups and enums have type names prefixed with the name of + the surrounding message type. + - Extensions are given descriptor names that start with E_, + followed by an underscore-delimited list of the nested messages + that contain it (if any) followed by the CamelCased name of the + extension field itself. HasExtension, ClearExtension, GetExtension + and SetExtension are functions for manipulating extensions. + - Oneof field sets are given a single field in their message, + with distinguished wrapper types for each possible field value. + - Marshal and Unmarshal are functions to encode and decode the wire format. + +When the .proto file specifies `syntax="proto3"`, there are some differences: + + - Non-repeated fields of non-message type are values instead of pointers. + - Getters are only generated for message and oneof fields. + - Enum types do not get an Enum method. + +Consider file test.proto, containing + +```proto + package example; + + enum FOO { X = 17; }; + + message Test { + required string label = 1; + optional int32 type = 2 [default=77]; + repeated int64 reps = 3; + optional group OptionalGroup = 4 { + required string RequiredField = 5; + } + } +``` + +To create and play with a Test object from the example package, + +```go + package main + + import ( + "log" + + "github.com/gogo/protobuf/proto" + "path/to/example" + ) + + func main() { + test := &example.Test { + Label: proto.String("hello"), + Type: proto.Int32(17), + Reps: []int64{1, 2, 3}, + Optionalgroup: &example.Test_OptionalGroup { + RequiredField: proto.String("good bye"), + }, + } + data, err := proto.Marshal(test) + if err != nil { + log.Fatal("marshaling error: ", err) + } + newTest := &example.Test{} + err = proto.Unmarshal(data, newTest) + if err != nil { + log.Fatal("unmarshaling error: ", err) + } + // Now test and newTest contain the same data. + if test.GetLabel() != newTest.GetLabel() { + log.Fatalf("data mismatch %q != %q", test.GetLabel(), newTest.GetLabel()) + } + // etc. + } +``` + + +## Parameters ## + +To pass extra parameters to the plugin, use a comma-separated +parameter list separated from the output directory by a colon: + + + protoc --gogo_out=plugins=grpc,import_path=mypackage:. *.proto + + +- `import_prefix=xxx` - a prefix that is added onto the beginning of + all imports. Useful for things like generating protos in a + subdirectory, or regenerating vendored protobufs in-place. +- `import_path=foo/bar` - used as the package if no input files + declare `go_package`. If it contains slashes, everything up to the + rightmost slash is ignored. +- `plugins=plugin1+plugin2` - specifies the list of sub-plugins to + load. The only plugin in this repo is `grpc`. +- `Mfoo/bar.proto=quux/shme` - declares that foo/bar.proto is + associated with Go package quux/shme. This is subject to the + import_prefix parameter. + +## gRPC Support ## + +If a proto file specifies RPC services, protoc-gen-go can be instructed to +generate code compatible with gRPC (http://www.grpc.io/). To do this, pass +the `plugins` parameter to protoc-gen-go; the usual way is to insert it into +the --go_out argument to protoc: + + protoc --gogo_out=plugins=grpc:. *.proto + +## Compatibility ## + +The library and the generated code are expected to be stable over time. +However, we reserve the right to make breaking changes without notice for the +following reasons: + +- Security. A security issue in the specification or implementation may come to + light whose resolution requires breaking compatibility. We reserve the right + to address such security issues. +- Unspecified behavior. There are some aspects of the Protocol Buffers + specification that are undefined. Programs that depend on such unspecified + behavior may break in future releases. +- Specification errors or changes. If it becomes necessary to address an + inconsistency, incompleteness, or change in the Protocol Buffers + specification, resolving the issue could affect the meaning or legality of + existing programs. We reserve the right to address such issues, including + updating the implementations. +- Bugs. If the library has a bug that violates the specification, a program + that depends on the buggy behavior may break if the bug is fixed. We reserve + the right to fix such bugs. +- Adding methods or fields to generated structs. These may conflict with field + names that already exist in a schema, causing applications to break. When the + code generator encounters a field in the schema that would collide with a + generated field or method name, the code generator will append an underscore + to the generated field or method name. +- Adding, removing, or changing methods or fields in generated structs that + start with `XXX`. These parts of the generated code are exported out of + necessity, but should not be considered part of the public API. +- Adding, removing, or changing unexported symbols in generated code. + +Any breaking changes outside of these will be announced 6 months in advance to +protobuf@googlegroups.com. + +You should, whenever possible, use generated code created by the `protoc-gen-go` +tool built at the same commit as the `proto` package. The `proto` package +declares package-level constants in the form `ProtoPackageIsVersionX`. +Application code and generated code may depend on one of these constants to +ensure that compilation will fail if the available version of the proto library +is too old. Whenever we make a change to the generated code that requires newer +library support, in the same commit we will increment the version number of the +generated code and declare a new package-level constant whose name incorporates +the latest version number. Removing a compatibility constant is considered a +breaking change and would be subject to the announcement policy stated above. + +## Plugins ## + +The `protoc-gen-go/generator` package exposes a plugin interface, +which is used by the gRPC code generation. This interface is not +supported and is subject to incompatible changes without notice. diff --git a/vendor/github.com/gogo/protobuf/Readme.md b/vendor/github.com/gogo/protobuf/Readme.md new file mode 100644 index 00000000..e97bb1ba --- /dev/null +++ b/vendor/github.com/gogo/protobuf/Readme.md @@ -0,0 +1,117 @@ +# Protocol Buffers for Go with Gadgets + +[![Build Status](https://travis-ci.org/gogo/protobuf.svg?branch=master)](https://travis-ci.org/gogo/protobuf) + +gogoprotobuf is a fork of golang/protobuf with extra code generation features. + +This code generation is used to achieve: + + - fast marshalling and unmarshalling + - more canonical Go structures + - goprotobuf compatibility + - less typing by optionally generating extra helper code + - peace of mind by optionally generating test and benchmark code + - other serialization formats + +Keeping track of how up to date gogoprotobuf is relative to golang/protobuf is done in this +issue + +## Users + +These projects use gogoprotobuf: + + - etcd - blog - sample proto file + - spacemonkey - blog + - badoo - sample proto file + - mesos-go - sample proto file + - heka - the switch from golang/protobuf to gogo/protobuf when it was still on code.google.com + - cockroachdb - sample proto file + - go-ipfs - sample proto file + - rkive-go - sample proto file + - dropbox + - srclib - sample proto file + - adyoulike + - cloudfoundry - sample proto file + - kubernetes - go2idl built on top of gogoprotobuf + - dgraph - release notes - benchmarks + - centrifugo - release notes - blog + - docker swarmkit - sample proto file + - nats.io - go-nats-streaming + - tidb - Communication between tidb and tikv + - protoactor-go - vanity command that also generates actors from service definitions + +Please lets us know if you are using gogoprotobuf by posting on our GoogleGroup. + +### Mentioned + + - Cloudflare - go serialization talk - Albert Strasheim + - gophercon + - alecthomas' go serialization benchmarks + +## Getting Started + +There are several ways to use gogoprotobuf, but for all you need to install go and protoc. +After that you can choose: + + - Speed + - More Speed and more generated code + - Most Speed and most customization + +### Installation + +To install it, you must first have Go (at least version 1.6.3) installed (see [http://golang.org/doc/install](http://golang.org/doc/install)). Go 1.7.1 and 1.8 are continuously tested. + +Next, install the standard protocol buffer implementation from [https://github.com/google/protobuf](https://github.com/google/protobuf). +Most versions from 2.3.1 should not give any problems, but 2.6.1, 3.0.2 and 3.2.0 are continuously tested. + +### Speed + +Install the protoc-gen-gofast binary + + go get github.com/gogo/protobuf/protoc-gen-gofast + +Use it to generate faster marshaling and unmarshaling go code for your protocol buffers. + + protoc --gofast_out=. myproto.proto + +This does not allow you to use any of the other gogoprotobuf [extensions](https://github.com/gogo/protobuf/blob/master/extensions.md). + +### More Speed and more generated code + +Fields without pointers cause less time in the garbage collector. +More code generation results in more convenient methods. + +Other binaries are also included: + + protoc-gen-gogofast (same as gofast, but imports gogoprotobuf) + protoc-gen-gogofaster (same as gogofast, without XXX_unrecognized, less pointer fields) + protoc-gen-gogoslick (same as gogofaster, but with generated string, gostring and equal methods) + +Installing any of these binaries is easy. Simply run: + + go get github.com/gogo/protobuf/proto + go get github.com/gogo/protobuf/{binary} + go get github.com/gogo/protobuf/gogoproto + +These binaries allow you to using gogoprotobuf [extensions](https://github.com/gogo/protobuf/blob/master/extensions.md). + +### Most Speed and most customization + +Customizing the fields of the messages to be the fields that you actually want to use removes the need to copy between the structs you use and structs you use to serialize. +gogoprotobuf also offers more serialization formats and generation of tests and even more methods. + +Please visit the [extensions](https://github.com/gogo/protobuf/blob/master/extensions.md) page for more documentation. + +Install protoc-gen-gogo: + + go get github.com/gogo/protobuf/proto + go get github.com/gogo/protobuf/jsonpb + go get github.com/gogo/protobuf/protoc-gen-gogo + go get github.com/gogo/protobuf/gogoproto + +## GRPC + +It works the same as golang/protobuf, simply specify the plugin. +Here is an example using gofast: + + protoc --gofast_out=plugins=grpc:. my.proto diff --git a/vendor/github.com/gogo/protobuf/bench.md b/vendor/github.com/gogo/protobuf/bench.md new file mode 100644 index 00000000..16da66ad --- /dev/null +++ b/vendor/github.com/gogo/protobuf/bench.md @@ -0,0 +1,190 @@ +# Benchmarks + +## How to reproduce + +For a comparison run: + + make bench + +followed by [benchcmp](http://code.google.com/p/go/source/browse/misc/benchcmp benchcmp) on the resulting files: + + $GOROOT/misc/benchcmp $GOPATH/src/github.com/gogo/protobuf/test/mixbench/marshal.txt $GOPATH/src/github.com/gogo/protobuf/test/mixbench/marshaler.txt + $GOROOT/misc/benchcmp $GOPATH/src/github.com/gogo/protobuf/test/mixbench/unmarshal.txt $GOPATH/src/github.com/gogo/protobuf/test/mixbench/unmarshaler.txt + +Benchmarks ran on Revision: 11c56be39364 + +June 2013 + +Processor 2,66 GHz Intel Core i7 + +Memory 8 GB 1067 MHz DDR3 + +## Marshaler + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
benchmarkold ns/opnew ns/opdelta
BenchmarkNidOptNativeProtoMarshal2656889-66.53%
BenchmarkNinOptNativeProtoMarshal26511015-61.71%
BenchmarkNidRepNativeProtoMarshal4266112519-70.65%
BenchmarkNinRepNativeProtoMarshal4230612354-70.80%
BenchmarkNidRepPackedNativeProtoMarshal3414811902-65.15%
BenchmarkNinRepPackedNativeProtoMarshal3337511969-64.14%
BenchmarkNidOptStructProtoMarshal71483727-47.86%
BenchmarkNinOptStructProtoMarshal69563481-49.96%
BenchmarkNidRepStructProtoMarshal4655119492-58.13%
BenchmarkNinRepStructProtoMarshal4671519043-59.24%
BenchmarkNidEmbeddedStructProtoMarshal52312050-60.81%
BenchmarkNinEmbeddedStructProtoMarshal46652000-57.13%
BenchmarkNidNestedStructProtoMarshal181106103604-42.79%
BenchmarkNinNestedStructProtoMarshal182053102069-43.93%
BenchmarkNidOptCustomProtoMarshal1209310-74.36%
BenchmarkNinOptCustomProtoMarshal1435277-80.70%
BenchmarkNidRepCustomProtoMarshal4126763-81.51%
BenchmarkNinRepCustomProtoMarshal3972769-80.64%
BenchmarkNinOptNativeUnionProtoMarshal973303-68.86%
BenchmarkNinOptStructUnionProtoMarshal1536521-66.08%
BenchmarkNinEmbeddedStructUnionProtoMarshal2327884-62.01%
BenchmarkNinNestedStructUnionProtoMarshal2070743-64.11%
BenchmarkTreeProtoMarshal1554838-46.07%
BenchmarkOrBranchProtoMarshal31562012-36.25%
BenchmarkAndBranchProtoMarshal31831996-37.29%
BenchmarkLeafProtoMarshal965606-37.20%
BenchmarkDeepTreeProtoMarshal23161283-44.60%
BenchmarkADeepBranchProtoMarshal27191492-45.13%
BenchmarkAndDeepBranchProtoMarshal46632922-37.34%
BenchmarkDeepLeafProtoMarshal18491016-45.05%
BenchmarkNilProtoMarshal43976-82.53%
BenchmarkNidOptEnumProtoMarshal514152-70.43%
BenchmarkNinOptEnumProtoMarshal550158-71.27%
BenchmarkNidRepEnumProtoMarshal647207-68.01%
BenchmarkNinRepEnumProtoMarshal662213-67.82%
BenchmarkTimerProtoMarshal934271-70.99%
BenchmarkMyExtendableProtoMarshal608185-69.57%
BenchmarkOtherExtenableProtoMarshal1112332-70.14%
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
benchmarkold MB/snew MB/sspeedup
BenchmarkNidOptNativeProtoMarshal126.86378.862.99x
BenchmarkNinOptNativeProtoMarshal114.27298.422.61x
BenchmarkNidRepNativeProtoMarshal164.25561.203.42x
BenchmarkNinRepNativeProtoMarshal166.10568.233.42x
BenchmarkNidRepPackedNativeProtoMarshal99.10283.972.87x
BenchmarkNinRepPackedNativeProtoMarshal101.30282.312.79x
BenchmarkNidOptStructProtoMarshal176.83339.071.92x
BenchmarkNinOptStructProtoMarshal163.59326.572.00x
BenchmarkNidRepStructProtoMarshal178.84427.492.39x
BenchmarkNinRepStructProtoMarshal178.70437.692.45x
BenchmarkNidEmbeddedStructProtoMarshal124.24317.562.56x
BenchmarkNinEmbeddedStructProtoMarshal132.03307.992.33x
BenchmarkNidNestedStructProtoMarshal192.91337.861.75x
BenchmarkNinNestedStructProtoMarshal192.44344.451.79x
BenchmarkNidOptCustomProtoMarshal29.77116.033.90x
BenchmarkNinOptCustomProtoMarshal22.29115.385.18x
BenchmarkNidRepCustomProtoMarshal35.14189.805.40x
BenchmarkNinRepCustomProtoMarshal36.50188.405.16x
BenchmarkNinOptNativeUnionProtoMarshal32.87105.393.21x
BenchmarkNinOptStructUnionProtoMarshal66.40195.762.95x
BenchmarkNinEmbeddedStructUnionProtoMarshal93.24245.262.63x
BenchmarkNinNestedStructUnionProtoMarshal57.49160.062.78x
BenchmarkTreeProtoMarshal137.64255.121.85x
BenchmarkOrBranchProtoMarshal137.80216.101.57x
BenchmarkAndBranchProtoMarshal136.64217.891.59x
BenchmarkLeafProtoMarshal214.48341.531.59x
BenchmarkDeepTreeProtoMarshal95.85173.031.81x
BenchmarkADeepBranchProtoMarshal82.73150.781.82x
BenchmarkAndDeepBranchProtoMarshal96.72153.981.59x
BenchmarkDeepLeafProtoMarshal117.34213.411.82x
BenchmarkNidOptEnumProtoMarshal3.8913.163.38x
BenchmarkNinOptEnumProtoMarshal1.826.303.46x
BenchmarkNidRepEnumProtoMarshal12.3638.503.11x
BenchmarkNinRepEnumProtoMarshal12.0837.533.11x
BenchmarkTimerProtoMarshal73.81253.873.44x
BenchmarkMyExtendableProtoMarshal13.1543.083.28x
BenchmarkOtherExtenableProtoMarshal24.2881.093.34x
+ +## Unmarshaler + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
benchmarkold ns/opnew ns/opdelta
BenchmarkNidOptNativeProtoUnmarshal25211006-60.10%
BenchmarkNinOptNativeProtoUnmarshal25291750-30.80%
BenchmarkNidRepNativeProtoUnmarshal4906735299-28.06%
BenchmarkNinRepNativeProtoUnmarshal4799035456-26.12%
BenchmarkNidRepPackedNativeProtoUnmarshal2645623950-9.47%
BenchmarkNinRepPackedNativeProtoUnmarshal2649924037-9.29%
BenchmarkNidOptStructProtoUnmarshal68033873-43.07%
BenchmarkNinOptStructProtoUnmarshal67864154-38.79%
BenchmarkNidRepStructProtoUnmarshal5627631970-43.19%
BenchmarkNinRepStructProtoUnmarshal4875031832-34.70%
BenchmarkNidEmbeddedStructProtoUnmarshal45561973-56.69%
BenchmarkNinEmbeddedStructProtoUnmarshal44851975-55.96%
BenchmarkNidNestedStructProtoUnmarshal223395135844-39.19%
BenchmarkNinNestedStructProtoUnmarshal226446134022-40.82%
BenchmarkNidOptCustomProtoUnmarshal1859300-83.86%
BenchmarkNinOptCustomProtoUnmarshal1486402-72.95%
BenchmarkNidRepCustomProtoUnmarshal82291669-79.72%
BenchmarkNinRepCustomProtoUnmarshal82531649-80.02%
BenchmarkNinOptNativeUnionProtoUnmarshal840307-63.45%
BenchmarkNinOptStructUnionProtoUnmarshal1395639-54.19%
BenchmarkNinEmbeddedStructUnionProtoUnmarshal22971167-49.19%
BenchmarkNinNestedStructUnionProtoUnmarshal1820889-51.15%
BenchmarkTreeProtoUnmarshal1521720-52.66%
BenchmarkOrBranchProtoUnmarshal26691385-48.11%
BenchmarkAndBranchProtoUnmarshal26671420-46.76%
BenchmarkLeafProtoUnmarshal1171584-50.13%
BenchmarkDeepTreeProtoUnmarshal20651081-47.65%
BenchmarkADeepBranchProtoUnmarshal26951178-56.29%
BenchmarkAndDeepBranchProtoUnmarshal40551918-52.70%
BenchmarkDeepLeafProtoUnmarshal1758865-50.80%
BenchmarkNilProtoUnmarshal56463-88.79%
BenchmarkNidOptEnumProtoUnmarshal76273-90.34%
BenchmarkNinOptEnumProtoUnmarshal764163-78.66%
BenchmarkNidRepEnumProtoUnmarshal1078447-58.53%
BenchmarkNinRepEnumProtoUnmarshal1071479-55.28%
BenchmarkTimerProtoUnmarshal1128362-67.91%
BenchmarkMyExtendableProtoUnmarshal808217-73.14%
BenchmarkOtherExtenableProtoUnmarshal1233517-58.07%
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
benchmarkold MB/snew MB/sspeedup
BenchmarkNidOptNativeProtoUnmarshal133.67334.982.51x
BenchmarkNinOptNativeProtoUnmarshal119.77173.081.45x
BenchmarkNidRepNativeProtoUnmarshal143.23199.121.39x
BenchmarkNinRepNativeProtoUnmarshal146.07198.161.36x
BenchmarkNidRepPackedNativeProtoUnmarshal127.80141.041.10x
BenchmarkNinRepPackedNativeProtoUnmarshal127.55140.781.10x
BenchmarkNidOptStructProtoUnmarshal185.79326.311.76x
BenchmarkNinOptStructProtoUnmarshal167.68273.661.63x
BenchmarkNidRepStructProtoUnmarshal147.88260.391.76x
BenchmarkNinRepStructProtoUnmarshal171.20261.971.53x
BenchmarkNidEmbeddedStructProtoUnmarshal142.86329.422.31x
BenchmarkNinEmbeddedStructProtoUnmarshal137.33311.832.27x
BenchmarkNidNestedStructProtoUnmarshal154.97259.471.67x
BenchmarkNinNestedStructProtoUnmarshal154.32258.421.67x
BenchmarkNidOptCustomProtoUnmarshal19.36119.666.18x
BenchmarkNinOptCustomProtoUnmarshal21.5279.503.69x
BenchmarkNidRepCustomProtoUnmarshal17.6286.864.93x
BenchmarkNinRepCustomProtoUnmarshal17.5787.925.00x
BenchmarkNinOptNativeUnionProtoUnmarshal38.07104.122.73x
BenchmarkNinOptStructUnionProtoUnmarshal73.08159.542.18x
BenchmarkNinEmbeddedStructUnionProtoUnmarshal94.00185.921.98x
BenchmarkNinNestedStructUnionProtoUnmarshal65.35133.752.05x
BenchmarkTreeProtoUnmarshal141.28297.132.10x
BenchmarkOrBranchProtoUnmarshal162.56313.961.93x
BenchmarkAndBranchProtoUnmarshal163.06306.151.88x
BenchmarkLeafProtoUnmarshal176.72354.192.00x
BenchmarkDeepTreeProtoUnmarshal107.50205.301.91x
BenchmarkADeepBranchProtoUnmarshal83.48190.882.29x
BenchmarkAndDeepBranchProtoUnmarshal110.97234.602.11x
BenchmarkDeepLeafProtoUnmarshal123.40250.732.03x
BenchmarkNidOptEnumProtoUnmarshal2.6227.1610.37x
BenchmarkNinOptEnumProtoUnmarshal1.316.114.66x
BenchmarkNidRepEnumProtoUnmarshal7.4217.882.41x
BenchmarkNinRepEnumProtoUnmarshal7.4716.692.23x
BenchmarkTimerProtoUnmarshal61.12190.343.11x
BenchmarkMyExtendableProtoUnmarshal9.9036.713.71x
BenchmarkOtherExtenableProtoUnmarshal21.9052.132.38x
\ No newline at end of file diff --git a/vendor/github.com/gogo/protobuf/custom_types.md b/vendor/github.com/gogo/protobuf/custom_types.md new file mode 100644 index 00000000..3eed249b --- /dev/null +++ b/vendor/github.com/gogo/protobuf/custom_types.md @@ -0,0 +1,68 @@ +# Custom types + +Custom types is a gogo protobuf extensions that allows for using a custom +struct type to decorate the underlying structure of the protocol message. + +# How to use + +## Defining the protobuf message + +```proto +message CustomType { + optional ProtoType Field = 1 [(gogoproto.customtype) = "T"]; +} + +message ProtoType { + optional string Field = 1; +} +``` + +or alternatively you can declare the field type in the protocol message to be +`bytes`: + +```proto +message BytesCustomType { + optional bytes Field = 1 [(gogoproto.customtype) = "T"]; +} +``` + +The downside of using `bytes` is that it makes it harder to generate protobuf +code in other languages. In either case, it is the user responsibility to +ensure that the custom type marshals and unmarshals to the expected wire +format. That is, in the first example, gogo protobuf will not attempt to ensure +that the wire format of `ProtoType` and `T` are wire compatible. + +## Custom type method signatures + +The custom type must define the following methods with the given +signatures. Assuming the custom type is called `T`: + +```go +func (t T) Marshal() ([]byte, error) {} +func (t *T) MarshalTo(data []byte) (n int, err error) {} +func (t *T) Unmarshal(data []byte) error {} + +func (t T) MarshalJSON() ([]byte, error) {} +func (t *T) UnmarshalJSON(data []byte) error {} + +// only required if the compare option is set +func (t T) Compare(other T) int {} +// only required if the equal option is set +func (t T) Equal(other T) bool {} +// only required if populate option is set +func NewPopulatedT(r randyThetest) *T {} +``` + +Check [t.go](test/t.go) for a full example + +# Warnings and issues + +`Warning about customtype: It is your responsibility to test all cases of your marshaling, unmarshaling and size methods implemented for your custom type.` + +Issues with customtype include: + * A Bytes method is not allowed. + * Defining a customtype as a fake proto message is broken. + * proto.Clone is broken. + * Using a proto message as a customtype is not allowed. + * cusomtype of type map can not UnmarshalText + * customtype of type struct cannot jsonpb unmarshal diff --git a/vendor/github.com/gogo/protobuf/extensions.md b/vendor/github.com/gogo/protobuf/extensions.md new file mode 100644 index 00000000..891359ac --- /dev/null +++ b/vendor/github.com/gogo/protobuf/extensions.md @@ -0,0 +1,161 @@ +# gogoprotobuf Extensions + +Here is an [example.proto](https://github.com/gogo/protobuf/blob/master/test/example/example.proto) which uses most of the gogoprotobuf code generation plugins. + +Please also look at the example [Makefile](https://github.com/gogo/protobuf/blob/master/test/example/Makefile) which shows how to specify the `descriptor.proto` and `gogo.proto` in your proto_path + +The documentation at [http://godoc.org/github.com/gogo/protobuf/gogoproto](http://godoc.org/github.com/gogo/protobuf/gogoproto) describes the extensions made to goprotobuf in more detail. + +Also see [http://godoc.org/github.com/gogo/protobuf/plugin/](http://godoc.org/github.com/gogo/protobuf/plugin/) for documentation of each of the extensions which have their own plugins. + +# Fast Marshalling and Unmarshalling + +Generating a `Marshal`, `MarshalTo`, `Size` (or `ProtoSize`) and `Unmarshal` method for a struct results in faster marshalling and unmarshalling than when using reflect. + +See [BenchComparison](https://github.com/gogo/protobuf/blob/master/bench.md) for a comparison between reflect and generated code used for marshalling and unmarshalling. + + + + + + + + + + + +
NameOptionTypeDescriptionDefault
marshalerMessageboolif true, a Marshal and MarshalTo method is generated for the specific messagefalse
sizerMessageboolif true, a Size method is generated for the specific messagefalse
unmarshaler Message bool if true, an Unmarshal method is generated for the specific message false
protosizerMessageboolif true, a ProtoSize method is generated for the specific messagefalse
unsafe_marshaler Message bool if true, a Marshal and MarshalTo method is generated for the specific message. The generated code uses the unsafe package and is not compatible with big endian CPUs. false
unsafe_unmarshaler Message bool if true, an Unmarshal method is generated for the specific message. The generated code uses the unsafe package and is not compatible with big endian CPUs. false
stable_marshaler Message bool if true, a Marshal and MarshalTo method is generated for the specific message, but unlike marshaler the output is guaranteed to be deterministic, at the sacrifice of some speed false
typedecl (beta) Message bool if false, type declaration of the message is excluded from the generated output. Requires the marshaler and unmarshaler to be generated. true
+ +# More Canonical Go Structures + +Lots of times working with a goprotobuf struct will lead you to a place where you create another struct that is easier to work with and then have a function to copy the values between the two structs. + +You might also find that basic structs that started their life as part of an API need to be sent over the wire. With gob, you could just send it. With goprotobuf, you need to make a new struct. + +`gogoprotobuf` tries to fix these problems with the nullable, embed, customtype, customname, casttype, castkey and castvalue field extensions. + + + + + + + + + + + + + + + +
NameOptionTypeDescriptionDefault
nullable Field bool if false, a field is generated without a pointer (see warning below). true
embed Field bool if true, the field is generated as an embedded field. false
customtype Field string It works with the Marshal and Unmarshal methods, to allow you to have your own types in your struct, but marshal to bytes. For example, custom.Uuid or custom.Fixed128. For more information please refer to the CustomTypes document goprotobuf type
customname (beta) Field string Changes the generated fieldname. This is especially useful when generated methods conflict with fieldnames. goprotobuf field name
casttype (beta) Field string Changes the generated field type. It assumes that this type is castable to the original goprotobuf field type. It currently does not support maps, structs or enums. goprotobuf field type
castkey (beta) Field string Changes the generated fieldtype for a map key. All generated code assumes that this type is castable to the protocol buffer field type. Only supported on maps. goprotobuf field type
castvalue (beta) Field string Changes the generated fieldtype for a map value. All generated code assumes that this type is castable to the protocol buffer field type. Only supported on maps. goprotobuf field type
enum_customname (beta) Enum string Sets the type name of an enum. If goproto_enum_prefix is enabled, this value will be used as a prefix when generating enum values.goprotobuf enum type name. Helps with golint issues.
enumdecl (beta) Enum bool if false, type declaration of the enum is excluded from the generated output. Requires the marshaler and unmarshaler to be generated. true
enumvalue_customname (beta) Enum Value string Changes the generated enum name. Helps with golint issues.goprotobuf enum value name
stdtime Timestamp Field bool Changes the Well Known Timestamp Type to time.TimeTimestamp
stdduration Duration Field bool Changes the Well Known Duration Type to time.DurationDuration
+ +`Warning about nullable: according to the Protocol Buffer specification, you should be able to tell whether a field is set or unset. With the option nullable=false this feature is lost, since your non-nullable fields will always be set.` + +# Goprotobuf Compatibility + +Gogoprotobuf is compatible with Goprotobuf, because it is compatible with protocol buffers (see the section on tests below). + +Gogoprotobuf generates the same code as goprotobuf if no extensions are used. + +The enumprefix, getters and stringer extensions can be used to remove some of the unnecessary code generated by goprotobuf. + + + + + + + + + + +
NameOptionTypeDescriptionDefault
gogoproto_import File bool if false, the generated code imports github.com/golang/protobuf/proto instead of github.com/gogo/protobuf/proto. true
goproto_enum_prefix Enum bool if false, generates the enum constant names without the messagetype prefix true
goproto_getters Message bool if false, the message is generated without get methods, this is useful when you would rather want to use face true
goproto_stringer Message bool if false, the message is generated without the default string method, this is useful for rather using stringer true
goproto_enum_stringer (experimental) Enum bool if false, the enum is generated without the default string method, this is useful for rather using enum_stringer true
goproto_extensions_map (beta) Message bool if false, the extensions field is generated as type []byte instead of type map[int32]proto.Extension true
goproto_unrecognized (beta) Message bool if false, XXX_unrecognized field is not generated. This is useful to reduce GC pressure at the cost of losing information about unrecognized fields. true
+ +# Less Typing + +The Protocol Buffer language is very parseable and extra code can be easily generated for structures. + +Helper methods, functions and interfaces can be generated by triggering certain extensions like gostring. + + + + + + + + + + + + + +
NameOptionTypeDescriptionDefault
gostring Message bool if true, a `GoString` method is generated. This returns a string representing valid go code to reproduce the current state of the struct. false
onlyone (deprecated) Message bool if true, all fields must be nullable and only one of the fields may be set, like a union. Two methods are generated: `GetValue() interface{}` and `SetValue(v interface{}) (set bool)`. These provide easier interaction with a union. false
equal Message bool if true, an Equal method is generated false
compare Message bool if true, a Compare method is generated. This is very useful for quickly implementing sort on a list of protobuf structs false
verbose_equal Message bool if true, a verbose equal method is generated for the message. This returns an error which describes the exact element which is not equal to the exact element in the other struct. false
stringer Message bool if true, a String method is generated for the message. false
face Message bool if true, a function will be generated which can convert a structure which satisfies an interface (face) to the specified structure. This interface contains getters for each of the fields in the struct. The specified struct is also generated with the getters. This allows it to satisfy its own face. false
description (beta) Message bool if true, a Description method is generated for the message. false
populate Message bool if true, a `NewPopulated` function is generated. This is necessary for generated tests. false
enum_stringer (experimental) Enum bool if true, a String method is generated for an Enum false
+ +Issues with Compare include: + * Oneof is not supported yet + * Not all Well Known Types are supported yet + * Maps are not supported + +#Peace of Mind + +Test and Benchmark generation is done with the following extensions: + + + + +
testgen Message bool if true, tests are generated for proto, json and prototext marshalling as well as for some of the other enabled plugins false
benchgen Message bool if true, benchmarks are generated for proto, json and prototext marshalling as well as for some of the other enabled plugins false
+ +# More Serialization Formats + +Other serialization formats like xml and json typically use reflect to marshal and unmarshal structured data. Manipulating these structs into something other than the default Go requires editing tags. The following extensions provide ways of editing these tags for the generated protobuf structs. + + + + +
jsontag (beta) Field string if set, the json tag value between the double quotes is replaced with this string fieldname
moretags (beta) Field string if set, this string is appended to the tag string empty
+ +Here is a longer explanation of jsontag and moretags + +# File Options + +Each of the boolean message and enum extensions also have a file extension: + + * `marshaler_all` + * `sizer_all` + * `protosizer_all` + * `unmarshaler_all` + * `unsafe_marshaler_all` + * `unsafe_unmarshaler_all` + * `stable_marshaler_all` + * `goproto_enum_prefix_all` + * `goproto_getters_all` + * `goproto_stringer_all` + * `goproto_enum_stringer_all` + * `goproto_extensions_map_all` + * `goproto_unrecognized_all` + * `gostring_all` + * `onlyone_all` + * `equal_all` + * `compare_all` + * `verbose_equal_all` + * `stringer_all` + * `enum_stringer_all` + * `face_all` + * `description_all` + * `populate_all` + * `testgen_all` + * `benchgen_all` + * `enumdecl_all` + * `typedecl_all` + +Each of these are the same as their Message Option counterparts, except they apply to all messages in the file. Their Message option counterparts can also be used to overwrite their effect. + +# Tests + + * The normal barrage of tests are run with: `make tests` + * A few weird tests: `make testall` + * Tests for compatibility with [golang/protobuf](https://github.com/golang/protobuf) are handled by a different project [harmonytests](https://github.com/gogo/harmonytests), since it requires goprotobuf. + * Cross version tests are made with [Travis CI](https://travis-ci.org/gogo/protobuf). + * GRPC Tests are also handled by a different project [grpctests](https://github.com/gogo/grpctests), since it depends on a lot of grpc libraries. + * Thanks to [go-fuzz](https://github.com/dvyukov/go-fuzz/) we have proper [fuzztests](https://github.com/gogo/fuzztests). + diff --git a/vendor/github.com/gogo/protobuf/install-protobuf.sh b/vendor/github.com/gogo/protobuf/install-protobuf.sh new file mode 100755 index 00000000..10c9320e --- /dev/null +++ b/vendor/github.com/gogo/protobuf/install-protobuf.sh @@ -0,0 +1,29 @@ +#!/usr/bin/env bash + +set -ex + +die() { + echo "$@" >&2 + exit 1 +} + +case "$PROTOBUF_VERSION" in +2*) + basename=protobuf-$PROTOBUF_VERSION + ;; +3*) + basename=protobuf-cpp-$PROTOBUF_VERSION + ;; +*) + die "unknown protobuf version: $PROTOBUF_VERSION" + ;; +esac + +cd /home/travis + +wget https://github.com/google/protobuf/releases/download/v$PROTOBUF_VERSION/$basename.tar.gz +tar xzf $basename.tar.gz + +cd protobuf-$PROTOBUF_VERSION + +./configure --prefix=/home/travis && make -j2 && make install diff --git a/vendor/github.com/golang/groupcache/.gitignore b/vendor/github.com/golang/groupcache/.gitignore new file mode 100644 index 00000000..b25c15b8 --- /dev/null +++ b/vendor/github.com/golang/groupcache/.gitignore @@ -0,0 +1 @@ +*~ diff --git a/vendor/github.com/golang/groupcache/README.md b/vendor/github.com/golang/groupcache/README.md new file mode 100644 index 00000000..70c29da1 --- /dev/null +++ b/vendor/github.com/golang/groupcache/README.md @@ -0,0 +1,73 @@ +# groupcache + +## Summary + +groupcache is a caching and cache-filling library, intended as a +replacement for memcached in many cases. + +For API docs and examples, see http://godoc.org/github.com/golang/groupcache + +## Comparison to memcached + +### **Like memcached**, groupcache: + + * shards by key to select which peer is responsible for that key + +### **Unlike memcached**, groupcache: + + * does not require running a separate set of servers, thus massively + reducing deployment/configuration pain. groupcache is a client + library as well as a server. It connects to its own peers. + + * comes with a cache filling mechanism. Whereas memcached just says + "Sorry, cache miss", often resulting in a thundering herd of + database (or whatever) loads from an unbounded number of clients + (which has resulted in several fun outages), groupcache coordinates + cache fills such that only one load in one process of an entire + replicated set of processes populates the cache, then multiplexes + the loaded value to all callers. + + * does not support versioned values. If key "foo" is value "bar", + key "foo" must always be "bar". There are neither cache expiration + times, nor explicit cache evictions. Thus there is also no CAS, + nor Increment/Decrement. This also means that groupcache.... + + * ... supports automatic mirroring of super-hot items to multiple + processes. This prevents memcached hot spotting where a machine's + CPU and/or NIC are overloaded by very popular keys/values. + + * is currently only available for Go. It's very unlikely that I + (bradfitz@) will port the code to any other language. + +## Loading process + +In a nutshell, a groupcache lookup of **Get("foo")** looks like: + +(On machine #5 of a set of N machines running the same code) + + 1. Is the value of "foo" in local memory because it's super hot? If so, use it. + + 2. Is the value of "foo" in local memory because peer #5 (the current + peer) is the owner of it? If so, use it. + + 3. Amongst all the peers in my set of N, am I the owner of the key + "foo"? (e.g. does it consistent hash to 5?) If so, load it. If + other callers come in, via the same process or via RPC requests + from peers, they block waiting for the load to finish and get the + same answer. If not, RPC to the peer that's the owner and get + the answer. If the RPC fails, just load it locally (still with + local dup suppression). + +## Users + +groupcache is in production use by dl.google.com (its original user), +parts of Blogger, parts of Google Code, parts of Google Fiber, parts +of Google production monitoring systems, etc. + +## Presentations + +See http://talks.golang.org/2013/oscon-dl.slide + +## Help + +Use the golang-nuts mailing list for any discussion or questions. diff --git a/vendor/github.com/golang/protobuf/.gitignore b/vendor/github.com/golang/protobuf/.gitignore new file mode 100644 index 00000000..c7dd4058 --- /dev/null +++ b/vendor/github.com/golang/protobuf/.gitignore @@ -0,0 +1,17 @@ +.DS_Store +*.[568ao] +*.ao +*.so +*.pyc +._* +.nfs.* +[568a].out +*~ +*.orig +core +_obj +_test +_testmain.go + +# Conformance test output and transient files. +conformance/failing_tests.txt diff --git a/vendor/github.com/golang/protobuf/.travis.yml b/vendor/github.com/golang/protobuf/.travis.yml new file mode 100644 index 00000000..455fa660 --- /dev/null +++ b/vendor/github.com/golang/protobuf/.travis.yml @@ -0,0 +1,30 @@ +sudo: false +language: go +go: +- 1.6.x +- 1.10.x +- 1.x + +install: + - go get -v -d -t github.com/golang/protobuf/... + - curl -L https://github.com/google/protobuf/releases/download/v3.5.1/protoc-3.5.1-linux-x86_64.zip -o /tmp/protoc.zip + - unzip /tmp/protoc.zip -d "$HOME"/protoc + - mkdir -p "$HOME"/src && ln -s "$HOME"/protoc "$HOME"/src/protobuf + +env: + - PATH=$HOME/protoc/bin:$PATH + +script: + - make all + - make regenerate + # TODO(tamird): When https://github.com/travis-ci/gimme/pull/130 is + # released, make this look for "1.x". + - if [[ "$TRAVIS_GO_VERSION" == 1.10* ]]; then + if [[ "$(git status --porcelain 2>&1)" != "" ]]; then + git status >&2; + git diff -a >&2; + exit 1; + fi; + echo "git status is clean."; + fi; + - make test diff --git a/vendor/github.com/golang/protobuf/LICENSE b/vendor/github.com/golang/protobuf/LICENSE index 0f646931..1b1b1921 100644 --- a/vendor/github.com/golang/protobuf/LICENSE +++ b/vendor/github.com/golang/protobuf/LICENSE @@ -1,4 +1,7 @@ +Go support for Protocol Buffers - Google's data interchange format + Copyright 2010 The Go Authors. All rights reserved. +https://github.com/golang/protobuf Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are diff --git a/vendor/github.com/golang/protobuf/Makefile b/vendor/github.com/golang/protobuf/Makefile new file mode 100644 index 00000000..2bc2621a --- /dev/null +++ b/vendor/github.com/golang/protobuf/Makefile @@ -0,0 +1,48 @@ +# Go support for Protocol Buffers - Google's data interchange format +# +# Copyright 2010 The Go Authors. All rights reserved. +# https://github.com/golang/protobuf +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +all: install + +install: + go install ./proto ./jsonpb ./ptypes ./protoc-gen-go + +test: + go test ./... ./protoc-gen-go/testdata + make -C conformance test + +clean: + go clean ./... + +nuke: + go clean -i ./... + +regenerate: + ./regenerate.sh diff --git a/vendor/github.com/golang/protobuf/README.md b/vendor/github.com/golang/protobuf/README.md new file mode 100644 index 00000000..01b29daf --- /dev/null +++ b/vendor/github.com/golang/protobuf/README.md @@ -0,0 +1,283 @@ +# Go support for Protocol Buffers + +[![Build Status](https://travis-ci.org/golang/protobuf.svg?branch=master)](https://travis-ci.org/golang/protobuf) +[![GoDoc](https://godoc.org/github.com/golang/protobuf?status.svg)](https://godoc.org/github.com/golang/protobuf) + +Google's data interchange format. +Copyright 2010 The Go Authors. +https://github.com/golang/protobuf + +This package and the code it generates requires at least Go 1.6. + +This software implements Go bindings for protocol buffers. For +information about protocol buffers themselves, see + https://developers.google.com/protocol-buffers/ + +## Installation ## + +To use this software, you must: +- Install the standard C++ implementation of protocol buffers from + https://developers.google.com/protocol-buffers/ +- Of course, install the Go compiler and tools from + https://golang.org/ + See + https://golang.org/doc/install + for details or, if you are using gccgo, follow the instructions at + https://golang.org/doc/install/gccgo +- Grab the code from the repository and install the proto package. + The simplest way is to run `go get -u github.com/golang/protobuf/protoc-gen-go`. + The compiler plugin, protoc-gen-go, will be installed in $GOBIN, + defaulting to $GOPATH/bin. It must be in your $PATH for the protocol + compiler, protoc, to find it. + +This software has two parts: a 'protocol compiler plugin' that +generates Go source files that, once compiled, can access and manage +protocol buffers; and a library that implements run-time support for +encoding (marshaling), decoding (unmarshaling), and accessing protocol +buffers. + +There is support for gRPC in Go using protocol buffers. +See the note at the bottom of this file for details. + +There are no insertion points in the plugin. + + +## Using protocol buffers with Go ## + +Once the software is installed, there are two steps to using it. +First you must compile the protocol buffer definitions and then import +them, with the support library, into your program. + +To compile the protocol buffer definition, run protoc with the --go_out +parameter set to the directory you want to output the Go code to. + + protoc --go_out=. *.proto + +The generated files will be suffixed .pb.go. See the Test code below +for an example using such a file. + +## Packages and input paths ## + +The protocol buffer language has a concept of "packages" which does not +correspond well to the Go notion of packages. In generated Go code, +each source `.proto` file is associated with a single Go package. The +name and import path for this package is specified with the `go_package` +proto option: + + option go_package = "github.com/golang/protobuf/ptypes/any"; + +The protocol buffer compiler will attempt to derive a package name and +import path if a `go_package` option is not present, but it is +best to always specify one explicitly. + +There is a one-to-one relationship between source `.proto` files and +generated `.pb.go` files, but any number of `.pb.go` files may be +contained in the same Go package. + +The output name of a generated file is produced by replacing the +`.proto` suffix with `.pb.go` (e.g., `foo.proto` produces `foo.pb.go`). +However, the output directory is selected in one of two ways. Let +us say we have `inputs/x.proto` with a `go_package` option of +`github.com/golang/protobuf/p`. The corresponding output file may +be: + +- Relative to the import path: + + protoc --go_out=. inputs/x.proto + # writes ./github.com/golang/protobuf/p/x.pb.go + + (This can work well with `--go_out=$GOPATH`.) + +- Relative to the input file: + + protoc --go_out=paths=source_relative:. inputs/x.proto + # generate ./inputs/x.pb.go + +## Generated code ## + +The package comment for the proto library contains text describing +the interface provided in Go for protocol buffers. Here is an edited +version. + +The proto package converts data structures to and from the +wire format of protocol buffers. It works in concert with the +Go source code generated for .proto files by the protocol compiler. + +A summary of the properties of the protocol buffer interface +for a protocol buffer variable v: + + - Names are turned from camel_case to CamelCase for export. + - There are no methods on v to set fields; just treat + them as structure fields. + - There are getters that return a field's value if set, + and return the field's default value if unset. + The getters work even if the receiver is a nil message. + - The zero value for a struct is its correct initialization state. + All desired fields must be set before marshaling. + - A Reset() method will restore a protobuf struct to its zero state. + - Non-repeated fields are pointers to the values; nil means unset. + That is, optional or required field int32 f becomes F *int32. + - Repeated fields are slices. + - Helper functions are available to aid the setting of fields. + Helpers for getting values are superseded by the + GetFoo methods and their use is deprecated. + msg.Foo = proto.String("hello") // set field + - Constants are defined to hold the default values of all fields that + have them. They have the form Default_StructName_FieldName. + Because the getter methods handle defaulted values, + direct use of these constants should be rare. + - Enums are given type names and maps from names to values. + Enum values are prefixed with the enum's type name. Enum types have + a String method, and a Enum method to assist in message construction. + - Nested groups and enums have type names prefixed with the name of + the surrounding message type. + - Extensions are given descriptor names that start with E_, + followed by an underscore-delimited list of the nested messages + that contain it (if any) followed by the CamelCased name of the + extension field itself. HasExtension, ClearExtension, GetExtension + and SetExtension are functions for manipulating extensions. + - Oneof field sets are given a single field in their message, + with distinguished wrapper types for each possible field value. + - Marshal and Unmarshal are functions to encode and decode the wire format. + +When the .proto file specifies `syntax="proto3"`, there are some differences: + + - Non-repeated fields of non-message type are values instead of pointers. + - Enum types do not get an Enum method. + +Consider file test.proto, containing + +```proto + syntax = "proto2"; + package example; + + enum FOO { X = 17; }; + + message Test { + required string label = 1; + optional int32 type = 2 [default=77]; + repeated int64 reps = 3; + optional group OptionalGroup = 4 { + required string RequiredField = 5; + } + } +``` + +To create and play with a Test object from the example package, + +```go + package main + + import ( + "log" + + "github.com/golang/protobuf/proto" + "path/to/example" + ) + + func main() { + test := &example.Test { + Label: proto.String("hello"), + Type: proto.Int32(17), + Reps: []int64{1, 2, 3}, + Optionalgroup: &example.Test_OptionalGroup { + RequiredField: proto.String("good bye"), + }, + } + data, err := proto.Marshal(test) + if err != nil { + log.Fatal("marshaling error: ", err) + } + newTest := &example.Test{} + err = proto.Unmarshal(data, newTest) + if err != nil { + log.Fatal("unmarshaling error: ", err) + } + // Now test and newTest contain the same data. + if test.GetLabel() != newTest.GetLabel() { + log.Fatalf("data mismatch %q != %q", test.GetLabel(), newTest.GetLabel()) + } + // etc. + } +``` + +## Parameters ## + +To pass extra parameters to the plugin, use a comma-separated +parameter list separated from the output directory by a colon: + + protoc --go_out=plugins=grpc,import_path=mypackage:. *.proto + +- `paths=(import | source_relative)` - specifies how the paths of + generated files are structured. See the "Packages and imports paths" + section above. The default is `import`. +- `plugins=plugin1+plugin2` - specifies the list of sub-plugins to + load. The only plugin in this repo is `grpc`. +- `Mfoo/bar.proto=quux/shme` - declares that foo/bar.proto is + associated with Go package quux/shme. This is subject to the + import_prefix parameter. + +The following parameters are deprecated and should not be used: + +- `import_prefix=xxx` - a prefix that is added onto the beginning of + all imports. +- `import_path=foo/bar` - used as the package if no input files + declare `go_package`. If it contains slashes, everything up to the + rightmost slash is ignored. + +## gRPC Support ## + +If a proto file specifies RPC services, protoc-gen-go can be instructed to +generate code compatible with gRPC (http://www.grpc.io/). To do this, pass +the `plugins` parameter to protoc-gen-go; the usual way is to insert it into +the --go_out argument to protoc: + + protoc --go_out=plugins=grpc:. *.proto + +## Compatibility ## + +The library and the generated code are expected to be stable over time. +However, we reserve the right to make breaking changes without notice for the +following reasons: + +- Security. A security issue in the specification or implementation may come to + light whose resolution requires breaking compatibility. We reserve the right + to address such security issues. +- Unspecified behavior. There are some aspects of the Protocol Buffers + specification that are undefined. Programs that depend on such unspecified + behavior may break in future releases. +- Specification errors or changes. If it becomes necessary to address an + inconsistency, incompleteness, or change in the Protocol Buffers + specification, resolving the issue could affect the meaning or legality of + existing programs. We reserve the right to address such issues, including + updating the implementations. +- Bugs. If the library has a bug that violates the specification, a program + that depends on the buggy behavior may break if the bug is fixed. We reserve + the right to fix such bugs. +- Adding methods or fields to generated structs. These may conflict with field + names that already exist in a schema, causing applications to break. When the + code generator encounters a field in the schema that would collide with a + generated field or method name, the code generator will append an underscore + to the generated field or method name. +- Adding, removing, or changing methods or fields in generated structs that + start with `XXX`. These parts of the generated code are exported out of + necessity, but should not be considered part of the public API. +- Adding, removing, or changing unexported symbols in generated code. + +Any breaking changes outside of these will be announced 6 months in advance to +protobuf@googlegroups.com. + +You should, whenever possible, use generated code created by the `protoc-gen-go` +tool built at the same commit as the `proto` package. The `proto` package +declares package-level constants in the form `ProtoPackageIsVersionX`. +Application code and generated code may depend on one of these constants to +ensure that compilation will fail if the available version of the proto library +is too old. Whenever we make a change to the generated code that requires newer +library support, in the same commit we will increment the version number of the +generated code and declare a new package-level constant whose name incorporates +the latest version number. Removing a compatibility constant is considered a +breaking change and would be subject to the announcement policy stated above. + +The `protoc-gen-go/generator` package exposes a plugin interface, +which is used by the gRPC code generation. This interface is not +supported and is subject to incompatible changes without notice. diff --git a/vendor/github.com/golang/protobuf/proto/encode.go b/vendor/github.com/golang/protobuf/proto/encode.go index 3abfed2c..c27d35f8 100644 --- a/vendor/github.com/golang/protobuf/proto/encode.go +++ b/vendor/github.com/golang/protobuf/proto/encode.go @@ -37,9 +37,27 @@ package proto import ( "errors" + "fmt" "reflect" ) +// RequiredNotSetError is the error returned if Marshal is called with +// a protocol buffer struct whose required fields have not +// all been initialized. It is also the error returned if Unmarshal is +// called with an encoded protocol buffer that does not include all the +// required fields. +// +// When printed, RequiredNotSetError reports the first unset required field in a +// message. If the field cannot be precisely determined, it is reported as +// "{Unknown}". +type RequiredNotSetError struct { + field string +} + +func (e *RequiredNotSetError) Error() string { + return fmt.Sprintf("proto: required field %q not set", e.field) +} + var ( // errRepeatedHasNil is the error returned if Marshal is called with // a struct with a repeated field containing a nil element. diff --git a/vendor/github.com/golang/protobuf/proto/lib.go b/vendor/github.com/golang/protobuf/proto/lib.go index 75565cc6..0e2191b8 100644 --- a/vendor/github.com/golang/protobuf/proto/lib.go +++ b/vendor/github.com/golang/protobuf/proto/lib.go @@ -265,6 +265,7 @@ package proto import ( "encoding/json" + "errors" "fmt" "log" "reflect" @@ -273,66 +274,7 @@ import ( "sync" ) -// RequiredNotSetError is an error type returned by either Marshal or Unmarshal. -// Marshal reports this when a required field is not initialized. -// Unmarshal reports this when a required field is missing from the wire data. -type RequiredNotSetError struct{ field string } - -func (e *RequiredNotSetError) Error() string { - if e.field == "" { - return fmt.Sprintf("proto: required field not set") - } - return fmt.Sprintf("proto: required field %q not set", e.field) -} -func (e *RequiredNotSetError) RequiredNotSet() bool { - return true -} - -type invalidUTF8Error struct{ field string } - -func (e *invalidUTF8Error) Error() string { - if e.field == "" { - return "proto: invalid UTF-8 detected" - } - return fmt.Sprintf("proto: field %q contains invalid UTF-8", e.field) -} -func (e *invalidUTF8Error) InvalidUTF8() bool { - return true -} - -// errInvalidUTF8 is a sentinel error to identify fields with invalid UTF-8. -// This error should not be exposed to the external API as such errors should -// be recreated with the field information. -var errInvalidUTF8 = &invalidUTF8Error{} - -// isNonFatal reports whether the error is either a RequiredNotSet error -// or a InvalidUTF8 error. -func isNonFatal(err error) bool { - if re, ok := err.(interface{ RequiredNotSet() bool }); ok && re.RequiredNotSet() { - return true - } - if re, ok := err.(interface{ InvalidUTF8() bool }); ok && re.InvalidUTF8() { - return true - } - return false -} - -type nonFatal struct{ E error } - -// Merge merges err into nf and reports whether it was successful. -// Otherwise it returns false for any fatal non-nil errors. -func (nf *nonFatal) Merge(err error) (ok bool) { - if err == nil { - return true // not an error - } - if !isNonFatal(err) { - return false // fatal error - } - if nf.E == nil { - nf.E = err // store first instance of non-fatal error - } - return true -} +var errInvalidUTF8 = errors.New("proto: invalid UTF-8 string") // Message is implemented by generated protocol buffer messages. type Message interface { diff --git a/vendor/github.com/golang/protobuf/proto/properties.go b/vendor/github.com/golang/protobuf/proto/properties.go index 50b99b83..f710adab 100644 --- a/vendor/github.com/golang/protobuf/proto/properties.go +++ b/vendor/github.com/golang/protobuf/proto/properties.go @@ -139,7 +139,7 @@ type Properties struct { Repeated bool Packed bool // relevant for repeated primitives only Enum string // set for enum types only - proto3 bool // whether this is known to be a proto3 field + proto3 bool // whether this is known to be a proto3 field; set for []byte only oneof bool // whether this is a oneof field Default string // default value @@ -148,9 +148,9 @@ type Properties struct { stype reflect.Type // set for struct types only sprop *StructProperties // set for struct types only - mtype reflect.Type // set for map types only - MapKeyProp *Properties // set for map types only - MapValProp *Properties // set for map types only + mtype reflect.Type // set for map types only + mkeyprop *Properties // set for map types only + mvalprop *Properties // set for map types only } // String formats the properties in the protobuf struct field tag style. @@ -275,16 +275,16 @@ func (p *Properties) setFieldProps(typ reflect.Type, f *reflect.StructField, loc case reflect.Map: p.mtype = t1 - p.MapKeyProp = &Properties{} - p.MapKeyProp.init(reflect.PtrTo(p.mtype.Key()), "Key", f.Tag.Get("protobuf_key"), nil, lockGetProp) - p.MapValProp = &Properties{} + p.mkeyprop = &Properties{} + p.mkeyprop.init(reflect.PtrTo(p.mtype.Key()), "Key", f.Tag.Get("protobuf_key"), nil, lockGetProp) + p.mvalprop = &Properties{} vtype := p.mtype.Elem() if vtype.Kind() != reflect.Ptr && vtype.Kind() != reflect.Slice { // The value type is not a message (*T) or bytes ([]byte), // so we need encoders for the pointer to this type. vtype = reflect.PtrTo(vtype) } - p.MapValProp.init(vtype, "Value", f.Tag.Get("protobuf_val"), nil, lockGetProp) + p.mvalprop.init(vtype, "Value", f.Tag.Get("protobuf_val"), nil, lockGetProp) } if p.stype != nil { diff --git a/vendor/github.com/golang/protobuf/proto/table_marshal.go b/vendor/github.com/golang/protobuf/proto/table_marshal.go index b1679449..0f212b30 100644 --- a/vendor/github.com/golang/protobuf/proto/table_marshal.go +++ b/vendor/github.com/golang/protobuf/proto/table_marshal.go @@ -231,7 +231,7 @@ func (u *marshalInfo) marshal(b []byte, ptr pointer, deterministic bool) ([]byte return b, err } - var err, errLater error + var err, errreq error // The old marshaler encodes extensions at beginning. if u.extensions.IsValid() { e := ptr.offset(u.extensions).toExtensions() @@ -252,13 +252,11 @@ func (u *marshalInfo) marshal(b []byte, ptr pointer, deterministic bool) ([]byte } } for _, f := range u.fields { - if f.required { + if f.required && errreq == nil { if ptr.offset(f.field).getPointer().isNil() { // Required field is not set. // We record the error but keep going, to give a complete marshaling. - if errLater == nil { - errLater = &RequiredNotSetError{f.name} - } + errreq = &RequiredNotSetError{f.name} continue } } @@ -271,21 +269,14 @@ func (u *marshalInfo) marshal(b []byte, ptr pointer, deterministic bool) ([]byte if err1, ok := err.(*RequiredNotSetError); ok { // Required field in submessage is not set. // We record the error but keep going, to give a complete marshaling. - if errLater == nil { - errLater = &RequiredNotSetError{f.name + "." + err1.field} + if errreq == nil { + errreq = &RequiredNotSetError{f.name + "." + err1.field} } continue } if err == errRepeatedHasNil { err = errors.New("proto: repeated field " + f.name + " has nil element") } - if err == errInvalidUTF8 { - if errLater == nil { - fullName := revProtoTypes[reflect.PtrTo(u.typ)] + "." + f.name - errLater = &invalidUTF8Error{fullName} - } - continue - } return b, err } } @@ -293,7 +284,7 @@ func (u *marshalInfo) marshal(b []byte, ptr pointer, deterministic bool) ([]byte s := *ptr.offset(u.unrecognized).toBytes() b = append(b, s...) } - return b, errLater + return b, errreq } // computeMarshalInfo initializes the marshal info. @@ -539,7 +530,6 @@ func typeMarshaler(t reflect.Type, tags []string, nozero, oneof bool) (sizer, ma packed := false proto3 := false - validateUTF8 := true for i := 2; i < len(tags); i++ { if tags[i] == "packed" { packed = true @@ -548,7 +538,6 @@ func typeMarshaler(t reflect.Type, tags []string, nozero, oneof bool) (sizer, ma proto3 = true } } - validateUTF8 = validateUTF8 && proto3 switch t.Kind() { case reflect.Bool: @@ -746,18 +735,6 @@ func typeMarshaler(t reflect.Type, tags []string, nozero, oneof bool) (sizer, ma } return sizeFloat64Value, appendFloat64Value case reflect.String: - if validateUTF8 { - if pointer { - return sizeStringPtr, appendUTF8StringPtr - } - if slice { - return sizeStringSlice, appendUTF8StringSlice - } - if nozero { - return sizeStringValueNoZero, appendUTF8StringValueNoZero - } - return sizeStringValue, appendUTF8StringValue - } if pointer { return sizeStringPtr, appendStringPtr } @@ -2007,6 +1984,9 @@ func appendBoolPackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byt } func appendStringValue(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { v := *ptr.toString() + if !utf8.ValidString(v) { + return nil, errInvalidUTF8 + } b = appendVarint(b, wiretag) b = appendVarint(b, uint64(len(v))) b = append(b, v...) @@ -2017,6 +1997,9 @@ func appendStringValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]b if v == "" { return b, nil } + if !utf8.ValidString(v) { + return nil, errInvalidUTF8 + } b = appendVarint(b, wiretag) b = appendVarint(b, uint64(len(v))) b = append(b, v...) @@ -2028,83 +2011,24 @@ func appendStringPtr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, err return b, nil } v := *p + if !utf8.ValidString(v) { + return nil, errInvalidUTF8 + } b = appendVarint(b, wiretag) b = appendVarint(b, uint64(len(v))) b = append(b, v...) return b, nil } func appendStringSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toStringSlice() - for _, v := range s { - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(v))) - b = append(b, v...) - } - return b, nil -} -func appendUTF8StringValue(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - var invalidUTF8 bool - v := *ptr.toString() - if !utf8.ValidString(v) { - invalidUTF8 = true - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(v))) - b = append(b, v...) - if invalidUTF8 { - return b, errInvalidUTF8 - } - return b, nil -} -func appendUTF8StringValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - var invalidUTF8 bool - v := *ptr.toString() - if v == "" { - return b, nil - } - if !utf8.ValidString(v) { - invalidUTF8 = true - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(v))) - b = append(b, v...) - if invalidUTF8 { - return b, errInvalidUTF8 - } - return b, nil -} -func appendUTF8StringPtr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - var invalidUTF8 bool - p := *ptr.toStringPtr() - if p == nil { - return b, nil - } - v := *p - if !utf8.ValidString(v) { - invalidUTF8 = true - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(v))) - b = append(b, v...) - if invalidUTF8 { - return b, errInvalidUTF8 - } - return b, nil -} -func appendUTF8StringSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - var invalidUTF8 bool s := *ptr.toStringSlice() for _, v := range s { if !utf8.ValidString(v) { - invalidUTF8 = true + return nil, errInvalidUTF8 } b = appendVarint(b, wiretag) b = appendVarint(b, uint64(len(v))) b = append(b, v...) } - if invalidUTF8 { - return b, errInvalidUTF8 - } return b, nil } func appendBytes(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { @@ -2183,8 +2107,7 @@ func makeGroupSliceMarshaler(u *marshalInfo) (sizer, marshaler) { }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { s := ptr.getPointerSlice() - var err error - var nerr nonFatal + var err, errreq error for _, v := range s { if v.isNil() { return b, errRepeatedHasNil @@ -2192,14 +2115,22 @@ func makeGroupSliceMarshaler(u *marshalInfo) (sizer, marshaler) { b = appendVarint(b, wiretag) // start group b, err = u.marshal(b, v, deterministic) b = appendVarint(b, wiretag+(WireEndGroup-WireStartGroup)) // end group - if !nerr.Merge(err) { + if err != nil { + if _, ok := err.(*RequiredNotSetError); ok { + // Required field in submessage is not set. + // We record the error but keep going, to give a complete marshaling. + if errreq == nil { + errreq = err + } + continue + } if err == ErrNil { err = errRepeatedHasNil } return b, err } } - return b, nerr.E + return b, errreq } } @@ -2243,8 +2174,7 @@ func makeMessageSliceMarshaler(u *marshalInfo) (sizer, marshaler) { }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { s := ptr.getPointerSlice() - var err error - var nerr nonFatal + var err, errreq error for _, v := range s { if v.isNil() { return b, errRepeatedHasNil @@ -2254,14 +2184,22 @@ func makeMessageSliceMarshaler(u *marshalInfo) (sizer, marshaler) { b = appendVarint(b, uint64(siz)) b, err = u.marshal(b, v, deterministic) - if !nerr.Merge(err) { + if err != nil { + if _, ok := err.(*RequiredNotSetError); ok { + // Required field in submessage is not set. + // We record the error but keep going, to give a complete marshaling. + if errreq == nil { + errreq = err + } + continue + } if err == ErrNil { err = errRepeatedHasNil } return b, err } } - return b, nerr.E + return b, errreq } } @@ -2285,25 +2223,6 @@ func makeMapMarshaler(f *reflect.StructField) (sizer, marshaler) { // value. // Key cannot be pointer-typed. valIsPtr := valType.Kind() == reflect.Ptr - - // If value is a message with nested maps, calling - // valSizer in marshal may be quadratic. We should use - // cached version in marshal (but not in size). - // If value is not message type, we don't have size cache, - // but it cannot be nested either. Just use valSizer. - valCachedSizer := valSizer - if valIsPtr && valType.Elem().Kind() == reflect.Struct { - u := getMarshalInfo(valType.Elem()) - valCachedSizer = func(ptr pointer, tagsize int) int { - // Same as message sizer, but use cache. - p := ptr.getPointer() - if p.isNil() { - return 0 - } - siz := u.cachedsize(p) - return siz + SizeVarint(uint64(siz)) + tagsize - } - } return func(ptr pointer, tagsize int) int { m := ptr.asPointerTo(t).Elem() // the map n := 0 @@ -2324,26 +2243,24 @@ func makeMapMarshaler(f *reflect.StructField) (sizer, marshaler) { if len(keys) > 1 && deterministic { sort.Sort(mapKeys(keys)) } - - var nerr nonFatal for _, k := range keys { ki := k.Interface() vi := m.MapIndex(k).Interface() kaddr := toAddrPointer(&ki, false) // pointer to key vaddr := toAddrPointer(&vi, valIsPtr) // pointer to value b = appendVarint(b, tag) - siz := keySizer(kaddr, 1) + valCachedSizer(vaddr, 1) // tag of key = 1 (size=1), tag of val = 2 (size=1) + siz := keySizer(kaddr, 1) + valSizer(vaddr, 1) // tag of key = 1 (size=1), tag of val = 2 (size=1) b = appendVarint(b, uint64(siz)) b, err = keyMarshaler(b, kaddr, keyWireTag, deterministic) - if !nerr.Merge(err) { + if err != nil { return b, err } b, err = valMarshaler(b, vaddr, valWireTag, deterministic) - if err != ErrNil && !nerr.Merge(err) { // allow nil value in map + if err != nil && err != ErrNil { // allow nil value in map return b, err } } - return b, nerr.E + return b, nil } } @@ -2416,7 +2333,6 @@ func (u *marshalInfo) appendExtensions(b []byte, ext *XXX_InternalExtensions, de defer mu.Unlock() var err error - var nerr nonFatal // Fast-path for common cases: zero or one extensions. // Don't bother sorting the keys. @@ -2436,11 +2352,11 @@ func (u *marshalInfo) appendExtensions(b []byte, ext *XXX_InternalExtensions, de v := e.value p := toAddrPointer(&v, ei.isptr) b, err = ei.marshaler(b, p, ei.wiretag, deterministic) - if !nerr.Merge(err) { + if err != nil { return b, err } } - return b, nerr.E + return b, nil } // Sort the keys to provide a deterministic encoding. @@ -2467,11 +2383,11 @@ func (u *marshalInfo) appendExtensions(b []byte, ext *XXX_InternalExtensions, de v := e.value p := toAddrPointer(&v, ei.isptr) b, err = ei.marshaler(b, p, ei.wiretag, deterministic) - if !nerr.Merge(err) { + if err != nil { return b, err } } - return b, nerr.E + return b, nil } // message set format is: @@ -2528,7 +2444,6 @@ func (u *marshalInfo) appendMessageSet(b []byte, ext *XXX_InternalExtensions, de defer mu.Unlock() var err error - var nerr nonFatal // Fast-path for common cases: zero or one extensions. // Don't bother sorting the keys. @@ -2555,12 +2470,12 @@ func (u *marshalInfo) appendMessageSet(b []byte, ext *XXX_InternalExtensions, de v := e.value p := toAddrPointer(&v, ei.isptr) b, err = ei.marshaler(b, p, 3<<3|WireBytes, deterministic) - if !nerr.Merge(err) { + if err != nil { return b, err } b = append(b, 1<<3|WireEndGroup) } - return b, nerr.E + return b, nil } // Sort the keys to provide a deterministic encoding. @@ -2594,11 +2509,11 @@ func (u *marshalInfo) appendMessageSet(b []byte, ext *XXX_InternalExtensions, de p := toAddrPointer(&v, ei.isptr) b, err = ei.marshaler(b, p, 3<<3|WireBytes, deterministic) b = append(b, 1<<3|WireEndGroup) - if !nerr.Merge(err) { + if err != nil { return b, err } } - return b, nerr.E + return b, nil } // sizeV1Extensions computes the size of encoded data for a V1-API extension field. @@ -2641,7 +2556,6 @@ func (u *marshalInfo) appendV1Extensions(b []byte, m map[int32]Extension, determ sort.Ints(keys) var err error - var nerr nonFatal for _, k := range keys { e := m[int32(k)] if e.value == nil || e.desc == nil { @@ -2658,11 +2572,11 @@ func (u *marshalInfo) appendV1Extensions(b []byte, m map[int32]Extension, determ v := e.value p := toAddrPointer(&v, ei.isptr) b, err = ei.marshaler(b, p, ei.wiretag, deterministic) - if !nerr.Merge(err) { + if err != nil { return b, err } } - return b, nerr.E + return b, nil } // newMarshaler is the interface representing objects that can marshal themselves. diff --git a/vendor/github.com/golang/protobuf/proto/table_unmarshal.go b/vendor/github.com/golang/protobuf/proto/table_unmarshal.go index ebf1caa5..55f0340a 100644 --- a/vendor/github.com/golang/protobuf/proto/table_unmarshal.go +++ b/vendor/github.com/golang/protobuf/proto/table_unmarshal.go @@ -97,8 +97,6 @@ type unmarshalFieldInfo struct { // if a required field, contains a single set bit at this field's index in the required field list. reqMask uint64 - - name string // name of the field, for error reporting } var ( @@ -138,8 +136,8 @@ func (u *unmarshalInfo) unmarshal(m pointer, b []byte) error { if u.isMessageSet { return UnmarshalMessageSet(b, m.offset(u.extensions).toExtensions()) } - var reqMask uint64 // bitmask of required fields we've seen. - var errLater error + var reqMask uint64 // bitmask of required fields we've seen. + var rnse *RequiredNotSetError // an instance of a RequiredNotSetError returned by a submessage. for len(b) > 0 { // Read tag and wire type. // Special case 1 and 2 byte varints. @@ -178,20 +176,11 @@ func (u *unmarshalInfo) unmarshal(m pointer, b []byte) error { if r, ok := err.(*RequiredNotSetError); ok { // Remember this error, but keep parsing. We need to produce // a full parse even if a required field is missing. - if errLater == nil { - errLater = r - } + rnse = r reqMask |= f.reqMask continue } if err != errInternalBadWireType { - if err == errInvalidUTF8 { - if errLater == nil { - fullName := revProtoTypes[reflect.PtrTo(u.typ)] + "." + f.name - errLater = &invalidUTF8Error{fullName} - } - continue - } return err } // Fragments with bad wire type are treated as unknown fields. @@ -250,16 +239,20 @@ func (u *unmarshalInfo) unmarshal(m pointer, b []byte) error { emap[int32(tag)] = e } } - if reqMask != u.reqMask && errLater == nil { + if rnse != nil { + // A required field of a submessage/group is missing. Return that error. + return rnse + } + if reqMask != u.reqMask { // A required field of this message is missing. for _, n := range u.reqFields { if reqMask&1 == 0 { - errLater = &RequiredNotSetError{n} + return &RequiredNotSetError{n} } reqMask >>= 1 } } - return errLater + return nil } // computeUnmarshalInfo fills in u with information for use @@ -358,7 +351,7 @@ func (u *unmarshalInfo) computeUnmarshalInfo() { } // Store the info in the correct slot in the message. - u.setTag(tag, toField(&f), unmarshal, reqMask, name) + u.setTag(tag, toField(&f), unmarshal, reqMask) } // Find any types associated with oneof fields. @@ -373,17 +366,10 @@ func (u *unmarshalInfo) computeUnmarshalInfo() { f := typ.Field(0) // oneof implementers have one field baseUnmarshal := fieldUnmarshaler(&f) - tags := strings.Split(f.Tag.Get("protobuf"), ",") - fieldNum, err := strconv.Atoi(tags[1]) + tagstr := strings.Split(f.Tag.Get("protobuf"), ",")[1] + tag, err := strconv.Atoi(tagstr) if err != nil { - panic("protobuf tag field not an integer: " + tags[1]) - } - var name string - for _, tag := range tags { - if strings.HasPrefix(tag, "name=") { - name = strings.TrimPrefix(tag, "name=") - break - } + panic("protobuf tag field not an integer: " + tagstr) } // Find the oneof field that this struct implements. @@ -394,7 +380,7 @@ func (u *unmarshalInfo) computeUnmarshalInfo() { // That lets us know where this struct should be stored // when we encounter it during unmarshaling. unmarshal := makeUnmarshalOneof(typ, of.ityp, baseUnmarshal) - u.setTag(fieldNum, of.field, unmarshal, 0, name) + u.setTag(tag, of.field, unmarshal, 0) } } } @@ -415,7 +401,7 @@ func (u *unmarshalInfo) computeUnmarshalInfo() { // [0 0] is [tag=0/wiretype=varint varint-encoded-0]. u.setTag(0, zeroField, func(b []byte, f pointer, w int) ([]byte, error) { return nil, fmt.Errorf("proto: %s: illegal tag 0 (wire type %d)", t, w) - }, 0, "") + }, 0) // Set mask for required field check. u.reqMask = uint64(1)<= 0 && (tag < 16 || tag < 2*n) { // TODO: what are the right numbers here? for len(u.dense) <= tag { @@ -457,17 +442,11 @@ func typeUnmarshaler(t reflect.Type, tags string) unmarshaler { tagArray := strings.Split(tags, ",") encoding := tagArray[0] name := "unknown" - proto3 := false - validateUTF8 := true for _, tag := range tagArray[3:] { if strings.HasPrefix(tag, "name=") { name = tag[5:] } - if tag == "proto3" { - proto3 = true - } } - validateUTF8 = validateUTF8 && proto3 // Figure out packaging (pointer, slice, or both) slice := false @@ -615,15 +594,6 @@ func typeUnmarshaler(t reflect.Type, tags string) unmarshaler { } return unmarshalBytesValue case reflect.String: - if validateUTF8 { - if pointer { - return unmarshalUTF8StringPtr - } - if slice { - return unmarshalUTF8StringSlice - } - return unmarshalUTF8StringValue - } if pointer { return unmarshalStringPtr } @@ -1478,6 +1448,9 @@ func unmarshalStringValue(b []byte, f pointer, w int) ([]byte, error) { return nil, io.ErrUnexpectedEOF } v := string(b[:x]) + if !utf8.ValidString(v) { + return nil, errInvalidUTF8 + } *f.toString() = v return b[x:], nil } @@ -1495,6 +1468,9 @@ func unmarshalStringPtr(b []byte, f pointer, w int) ([]byte, error) { return nil, io.ErrUnexpectedEOF } v := string(b[:x]) + if !utf8.ValidString(v) { + return nil, errInvalidUTF8 + } *f.toStringPtr() = &v return b[x:], nil } @@ -1512,72 +1488,14 @@ func unmarshalStringSlice(b []byte, f pointer, w int) ([]byte, error) { return nil, io.ErrUnexpectedEOF } v := string(b[:x]) + if !utf8.ValidString(v) { + return nil, errInvalidUTF8 + } s := f.toStringSlice() *s = append(*s, v) return b[x:], nil } -func unmarshalUTF8StringValue(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - v := string(b[:x]) - *f.toString() = v - if !utf8.ValidString(v) { - return b[x:], errInvalidUTF8 - } - return b[x:], nil -} - -func unmarshalUTF8StringPtr(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - v := string(b[:x]) - *f.toStringPtr() = &v - if !utf8.ValidString(v) { - return b[x:], errInvalidUTF8 - } - return b[x:], nil -} - -func unmarshalUTF8StringSlice(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - v := string(b[:x]) - s := f.toStringSlice() - *s = append(*s, v) - if !utf8.ValidString(v) { - return b[x:], errInvalidUTF8 - } - return b[x:], nil -} - var emptyBuf [0]byte func unmarshalBytesValue(b []byte, f pointer, w int) ([]byte, error) { @@ -1756,7 +1674,6 @@ func makeUnmarshalMap(f *reflect.StructField) unmarshaler { // Maps will be somewhat slow. Oh well. // Read key and value from data. - var nerr nonFatal k := reflect.New(kt) v := reflect.New(vt) for len(b) > 0 { @@ -1777,7 +1694,7 @@ func makeUnmarshalMap(f *reflect.StructField) unmarshaler { err = errInternalBadWireType // skip unknown tag } - if nerr.Merge(err) { + if err == nil { continue } if err != errInternalBadWireType { @@ -1800,7 +1717,7 @@ func makeUnmarshalMap(f *reflect.StructField) unmarshaler { // Insert into map. m.SetMapIndex(k.Elem(), v.Elem()) - return r, nerr.E + return r, nil } } @@ -1826,16 +1743,15 @@ func makeUnmarshalOneof(typ, ityp reflect.Type, unmarshal unmarshaler) unmarshal // Unmarshal data into holder. // We unmarshal into the first field of the holder object. var err error - var nerr nonFatal b, err = unmarshal(b, valToPointer(v).offset(field0), w) - if !nerr.Merge(err) { + if err != nil { return nil, err } // Write pointer to holder into target field. f.asPointerTo(ityp).Elem().Set(v) - return b, nerr.E + return b, nil } } diff --git a/vendor/github.com/golang/protobuf/proto/text.go b/vendor/github.com/golang/protobuf/proto/text.go index 1aaee725..2205fdaa 100644 --- a/vendor/github.com/golang/protobuf/proto/text.go +++ b/vendor/github.com/golang/protobuf/proto/text.go @@ -353,7 +353,7 @@ func (tm *TextMarshaler) writeStruct(w *textWriter, sv reflect.Value) error { return err } } - if err := tm.writeAny(w, key, props.MapKeyProp); err != nil { + if err := tm.writeAny(w, key, props.mkeyprop); err != nil { return err } if err := w.WriteByte('\n'); err != nil { @@ -370,7 +370,7 @@ func (tm *TextMarshaler) writeStruct(w *textWriter, sv reflect.Value) error { return err } } - if err := tm.writeAny(w, val, props.MapValProp); err != nil { + if err := tm.writeAny(w, val, props.mvalprop); err != nil { return err } if err := w.WriteByte('\n'); err != nil { diff --git a/vendor/github.com/golang/protobuf/proto/text_parser.go b/vendor/github.com/golang/protobuf/proto/text_parser.go index bb55a3af..0685bae3 100644 --- a/vendor/github.com/golang/protobuf/proto/text_parser.go +++ b/vendor/github.com/golang/protobuf/proto/text_parser.go @@ -630,17 +630,17 @@ func (p *textParser) readStruct(sv reflect.Value, terminator string) error { if err := p.consumeToken(":"); err != nil { return err } - if err := p.readAny(key, props.MapKeyProp); err != nil { + if err := p.readAny(key, props.mkeyprop); err != nil { return err } if err := p.consumeOptionalSeparator(); err != nil { return err } case "value": - if err := p.checkForColon(props.MapValProp, dst.Type().Elem()); err != nil { + if err := p.checkForColon(props.mvalprop, dst.Type().Elem()); err != nil { return err } - if err := p.readAny(val, props.MapValProp); err != nil { + if err := p.readAny(val, props.mvalprop); err != nil { return err } if err := p.consumeOptionalSeparator(); err != nil { diff --git a/vendor/github.com/golang/protobuf/ptypes/any.go b/vendor/github.com/golang/protobuf/ptypes/any.go index 70276e8f..b2af97f4 100644 --- a/vendor/github.com/golang/protobuf/ptypes/any.go +++ b/vendor/github.com/golang/protobuf/ptypes/any.go @@ -130,12 +130,10 @@ func UnmarshalAny(any *any.Any, pb proto.Message) error { // Is returns true if any value contains a given message type. func Is(any *any.Any, pb proto.Message) bool { - // The following is equivalent to AnyMessageName(any) == proto.MessageName(pb), - // but it avoids scanning TypeUrl for the slash. - if any == nil { + aname, err := AnyMessageName(any) + if err != nil { return false } - name := proto.MessageName(pb) - prefix := len(any.TypeUrl) - len(name) - return prefix >= 1 && any.TypeUrl[prefix-1] == '/' && any.TypeUrl[prefix:] == name + + return aname == proto.MessageName(pb) } diff --git a/vendor/github.com/golang/protobuf/ptypes/any/any.pb.go b/vendor/github.com/golang/protobuf/ptypes/any/any.pb.go index e3c56d3f..f67edc7d 100644 --- a/vendor/github.com/golang/protobuf/ptypes/any/any.pb.go +++ b/vendor/github.com/golang/protobuf/ptypes/any/any.pb.go @@ -121,7 +121,7 @@ type Any struct { // Schemes other than `http`, `https` (or the empty scheme) might be // used with implementation specific semantics. // - TypeUrl string `protobuf:"bytes,1,opt,name=type_url,json=typeUrl,proto3" json:"type_url,omitempty"` + TypeUrl string `protobuf:"bytes,1,opt,name=type_url,json=typeUrl" json:"type_url,omitempty"` // Must be a valid serialized protocol buffer of the above specified type. Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` diff --git a/vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go b/vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go index a7beb2c4..4d75473b 100644 --- a/vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go +++ b/vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go @@ -82,14 +82,14 @@ type Duration struct { // Signed seconds of the span of time. Must be from -315,576,000,000 // to +315,576,000,000 inclusive. Note: these bounds are computed from: // 60 sec/min * 60 min/hr * 24 hr/day * 365.25 days/year * 10000 years - Seconds int64 `protobuf:"varint,1,opt,name=seconds,proto3" json:"seconds,omitempty"` + Seconds int64 `protobuf:"varint,1,opt,name=seconds" json:"seconds,omitempty"` // Signed fractions of a second at nanosecond resolution of the span // of time. Durations less than one second are represented with a 0 // `seconds` field and a positive or negative `nanos` field. For durations // of one second or more, a non-zero value for the `nanos` field must be // of the same sign as the `seconds` field. Must be from -999,999,999 // to +999,999,999 inclusive. - Nanos int32 `protobuf:"varint,2,opt,name=nanos,proto3" json:"nanos,omitempty"` + Nanos int32 `protobuf:"varint,2,opt,name=nanos" json:"nanos,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` diff --git a/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go b/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go index 8e76ae97..e9c22228 100644 --- a/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go +++ b/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go @@ -100,12 +100,12 @@ type Timestamp struct { // Represents seconds of UTC time since Unix epoch // 1970-01-01T00:00:00Z. Must be from 0001-01-01T00:00:00Z to // 9999-12-31T23:59:59Z inclusive. - Seconds int64 `protobuf:"varint,1,opt,name=seconds,proto3" json:"seconds,omitempty"` + Seconds int64 `protobuf:"varint,1,opt,name=seconds" json:"seconds,omitempty"` // Non-negative fractions of a second at nanosecond resolution. Negative // second values with fractions must still have non-negative nanos values // that count forward in time. Must be from 0 to 999,999,999 // inclusive. - Nanos int32 `protobuf:"varint,2,opt,name=nanos,proto3" json:"nanos,omitempty"` + Nanos int32 `protobuf:"varint,2,opt,name=nanos" json:"nanos,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` diff --git a/vendor/github.com/golang/protobuf/regenerate.sh b/vendor/github.com/golang/protobuf/regenerate.sh new file mode 100755 index 00000000..dc7e2d1f --- /dev/null +++ b/vendor/github.com/golang/protobuf/regenerate.sh @@ -0,0 +1,53 @@ +#!/bin/bash + +set -e + +# Install the working tree's protoc-gen-gen in a tempdir. +tmpdir=$(mktemp -d -t regen-wkt.XXXXXX) +trap 'rm -rf $tmpdir' EXIT +mkdir -p $tmpdir/bin +PATH=$tmpdir/bin:$PATH +GOBIN=$tmpdir/bin go install ./protoc-gen-go + +# Public imports require at least Go 1.9. +supportTypeAliases="" +if go list -f '{{context.ReleaseTags}}' runtime | grep -q go1.9; then + supportTypeAliases=1 +fi + +# Generate various test protos. +PROTO_DIRS=( + conformance/internal/conformance_proto + jsonpb/jsonpb_test_proto + proto + protoc-gen-go/testdata +) +for dir in ${PROTO_DIRS[@]}; do + for p in `find $dir -name "*.proto"`; do + if [[ $p == */import_public/* && ! $supportTypeAliases ]]; then + echo "# $p (skipped)" + continue; + fi + echo "# $p" + protoc -I$dir --go_out=plugins=grpc,paths=source_relative:$dir $p + done +done + +# Deriving the location of the source protos from the path to the +# protoc binary may be a bit odd, but this is what protoc itself does. +PROTO_INCLUDE=$(dirname $(dirname $(which protoc)))/include + +# Well-known types. +WKT_PROTOS=(any duration empty struct timestamp wrappers) +for p in ${WKT_PROTOS[@]}; do + echo "# google/protobuf/$p.proto" + protoc --go_out=paths=source_relative:$tmpdir google/protobuf/$p.proto + cp $tmpdir/google/protobuf/$p.pb.go ptypes/$p + cp $PROTO_INCLUDE/google/protobuf/$p.proto ptypes/$p +done + +# descriptor.proto. +echo "# google/protobuf/descriptor.proto" +protoc --go_out=paths=source_relative:$tmpdir google/protobuf/descriptor.proto +cp $tmpdir/google/protobuf/descriptor.pb.go protoc-gen-go/descriptor +cp $PROTO_INCLUDE/google/protobuf/descriptor.proto protoc-gen-go/descriptor diff --git a/vendor/github.com/google/btree/.travis.yml b/vendor/github.com/google/btree/.travis.yml new file mode 100644 index 00000000..4f2ee4d9 --- /dev/null +++ b/vendor/github.com/google/btree/.travis.yml @@ -0,0 +1 @@ +language: go diff --git a/vendor/k8s.io/kube-openapi/LICENSE b/vendor/github.com/google/btree/LICENSE similarity index 100% rename from vendor/k8s.io/kube-openapi/LICENSE rename to vendor/github.com/google/btree/LICENSE diff --git a/vendor/github.com/google/btree/README.md b/vendor/github.com/google/btree/README.md new file mode 100644 index 00000000..6062a4da --- /dev/null +++ b/vendor/github.com/google/btree/README.md @@ -0,0 +1,12 @@ +# BTree implementation for Go + +![Travis CI Build Status](https://api.travis-ci.org/google/btree.svg?branch=master) + +This package provides an in-memory B-Tree implementation for Go, useful as +an ordered, mutable data structure. + +The API is based off of the wonderful +http://godoc.org/github.com/petar/GoLLRB/llrb, and is meant to allow btree to +act as a drop-in replacement for gollrb trees. + +See http://godoc.org/github.com/google/btree for documentation. diff --git a/vendor/github.com/google/btree/btree.go b/vendor/github.com/google/btree/btree.go new file mode 100644 index 00000000..fc5aaaa1 --- /dev/null +++ b/vendor/github.com/google/btree/btree.go @@ -0,0 +1,649 @@ +// Copyright 2014 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package btree implements in-memory B-Trees of arbitrary degree. +// +// btree implements an in-memory B-Tree for use as an ordered data structure. +// It is not meant for persistent storage solutions. +// +// It has a flatter structure than an equivalent red-black or other binary tree, +// which in some cases yields better memory usage and/or performance. +// See some discussion on the matter here: +// http://google-opensource.blogspot.com/2013/01/c-containers-that-save-memory-and-time.html +// Note, though, that this project is in no way related to the C++ B-Tree +// implmentation written about there. +// +// Within this tree, each node contains a slice of items and a (possibly nil) +// slice of children. For basic numeric values or raw structs, this can cause +// efficiency differences when compared to equivalent C++ template code that +// stores values in arrays within the node: +// * Due to the overhead of storing values as interfaces (each +// value needs to be stored as the value itself, then 2 words for the +// interface pointing to that value and its type), resulting in higher +// memory use. +// * Since interfaces can point to values anywhere in memory, values are +// most likely not stored in contiguous blocks, resulting in a higher +// number of cache misses. +// These issues don't tend to matter, though, when working with strings or other +// heap-allocated structures, since C++-equivalent structures also must store +// pointers and also distribute their values across the heap. +// +// This implementation is designed to be a drop-in replacement to gollrb.LLRB +// trees, (http://github.com/petar/gollrb), an excellent and probably the most +// widely used ordered tree implementation in the Go ecosystem currently. +// Its functions, therefore, exactly mirror those of +// llrb.LLRB where possible. Unlike gollrb, though, we currently don't +// support storing multiple equivalent values or backwards iteration. +package btree + +import ( + "fmt" + "io" + "sort" + "strings" +) + +// Item represents a single object in the tree. +type Item interface { + // Less tests whether the current item is less than the given argument. + // + // This must provide a strict weak ordering. + // If !a.Less(b) && !b.Less(a), we treat this to mean a == b (i.e. we can only + // hold one of either a or b in the tree). + Less(than Item) bool +} + +const ( + DefaultFreeListSize = 32 +) + +// FreeList represents a free list of btree nodes. By default each +// BTree has its own FreeList, but multiple BTrees can share the same +// FreeList. +// Two Btrees using the same freelist are not safe for concurrent write access. +type FreeList struct { + freelist []*node +} + +// NewFreeList creates a new free list. +// size is the maximum size of the returned free list. +func NewFreeList(size int) *FreeList { + return &FreeList{freelist: make([]*node, 0, size)} +} + +func (f *FreeList) newNode() (n *node) { + index := len(f.freelist) - 1 + if index < 0 { + return new(node) + } + f.freelist, n = f.freelist[:index], f.freelist[index] + return +} + +func (f *FreeList) freeNode(n *node) { + if len(f.freelist) < cap(f.freelist) { + f.freelist = append(f.freelist, n) + } +} + +// ItemIterator allows callers of Ascend* to iterate in-order over portions of +// the tree. When this function returns false, iteration will stop and the +// associated Ascend* function will immediately return. +type ItemIterator func(i Item) bool + +// New creates a new B-Tree with the given degree. +// +// New(2), for example, will create a 2-3-4 tree (each node contains 1-3 items +// and 2-4 children). +func New(degree int) *BTree { + return NewWithFreeList(degree, NewFreeList(DefaultFreeListSize)) +} + +// NewWithFreeList creates a new B-Tree that uses the given node free list. +func NewWithFreeList(degree int, f *FreeList) *BTree { + if degree <= 1 { + panic("bad degree") + } + return &BTree{ + degree: degree, + freelist: f, + } +} + +// items stores items in a node. +type items []Item + +// insertAt inserts a value into the given index, pushing all subsequent values +// forward. +func (s *items) insertAt(index int, item Item) { + *s = append(*s, nil) + if index < len(*s) { + copy((*s)[index+1:], (*s)[index:]) + } + (*s)[index] = item +} + +// removeAt removes a value at a given index, pulling all subsequent values +// back. +func (s *items) removeAt(index int) Item { + item := (*s)[index] + (*s)[index] = nil + copy((*s)[index:], (*s)[index+1:]) + *s = (*s)[:len(*s)-1] + return item +} + +// pop removes and returns the last element in the list. +func (s *items) pop() (out Item) { + index := len(*s) - 1 + out = (*s)[index] + (*s)[index] = nil + *s = (*s)[:index] + return +} + +// find returns the index where the given item should be inserted into this +// list. 'found' is true if the item already exists in the list at the given +// index. +func (s items) find(item Item) (index int, found bool) { + i := sort.Search(len(s), func(i int) bool { + return item.Less(s[i]) + }) + if i > 0 && !s[i-1].Less(item) { + return i - 1, true + } + return i, false +} + +// children stores child nodes in a node. +type children []*node + +// insertAt inserts a value into the given index, pushing all subsequent values +// forward. +func (s *children) insertAt(index int, n *node) { + *s = append(*s, nil) + if index < len(*s) { + copy((*s)[index+1:], (*s)[index:]) + } + (*s)[index] = n +} + +// removeAt removes a value at a given index, pulling all subsequent values +// back. +func (s *children) removeAt(index int) *node { + n := (*s)[index] + (*s)[index] = nil + copy((*s)[index:], (*s)[index+1:]) + *s = (*s)[:len(*s)-1] + return n +} + +// pop removes and returns the last element in the list. +func (s *children) pop() (out *node) { + index := len(*s) - 1 + out = (*s)[index] + (*s)[index] = nil + *s = (*s)[:index] + return +} + +// node is an internal node in a tree. +// +// It must at all times maintain the invariant that either +// * len(children) == 0, len(items) unconstrained +// * len(children) == len(items) + 1 +type node struct { + items items + children children + t *BTree +} + +// split splits the given node at the given index. The current node shrinks, +// and this function returns the item that existed at that index and a new node +// containing all items/children after it. +func (n *node) split(i int) (Item, *node) { + item := n.items[i] + next := n.t.newNode() + next.items = append(next.items, n.items[i+1:]...) + n.items = n.items[:i] + if len(n.children) > 0 { + next.children = append(next.children, n.children[i+1:]...) + n.children = n.children[:i+1] + } + return item, next +} + +// maybeSplitChild checks if a child should be split, and if so splits it. +// Returns whether or not a split occurred. +func (n *node) maybeSplitChild(i, maxItems int) bool { + if len(n.children[i].items) < maxItems { + return false + } + first := n.children[i] + item, second := first.split(maxItems / 2) + n.items.insertAt(i, item) + n.children.insertAt(i+1, second) + return true +} + +// insert inserts an item into the subtree rooted at this node, making sure +// no nodes in the subtree exceed maxItems items. Should an equivalent item be +// be found/replaced by insert, it will be returned. +func (n *node) insert(item Item, maxItems int) Item { + i, found := n.items.find(item) + if found { + out := n.items[i] + n.items[i] = item + return out + } + if len(n.children) == 0 { + n.items.insertAt(i, item) + return nil + } + if n.maybeSplitChild(i, maxItems) { + inTree := n.items[i] + switch { + case item.Less(inTree): + // no change, we want first split node + case inTree.Less(item): + i++ // we want second split node + default: + out := n.items[i] + n.items[i] = item + return out + } + } + return n.children[i].insert(item, maxItems) +} + +// get finds the given key in the subtree and returns it. +func (n *node) get(key Item) Item { + i, found := n.items.find(key) + if found { + return n.items[i] + } else if len(n.children) > 0 { + return n.children[i].get(key) + } + return nil +} + +// min returns the first item in the subtree. +func min(n *node) Item { + if n == nil { + return nil + } + for len(n.children) > 0 { + n = n.children[0] + } + if len(n.items) == 0 { + return nil + } + return n.items[0] +} + +// max returns the last item in the subtree. +func max(n *node) Item { + if n == nil { + return nil + } + for len(n.children) > 0 { + n = n.children[len(n.children)-1] + } + if len(n.items) == 0 { + return nil + } + return n.items[len(n.items)-1] +} + +// toRemove details what item to remove in a node.remove call. +type toRemove int + +const ( + removeItem toRemove = iota // removes the given item + removeMin // removes smallest item in the subtree + removeMax // removes largest item in the subtree +) + +// remove removes an item from the subtree rooted at this node. +func (n *node) remove(item Item, minItems int, typ toRemove) Item { + var i int + var found bool + switch typ { + case removeMax: + if len(n.children) == 0 { + return n.items.pop() + } + i = len(n.items) + case removeMin: + if len(n.children) == 0 { + return n.items.removeAt(0) + } + i = 0 + case removeItem: + i, found = n.items.find(item) + if len(n.children) == 0 { + if found { + return n.items.removeAt(i) + } + return nil + } + default: + panic("invalid type") + } + // If we get to here, we have children. + child := n.children[i] + if len(child.items) <= minItems { + return n.growChildAndRemove(i, item, minItems, typ) + } + // Either we had enough items to begin with, or we've done some + // merging/stealing, because we've got enough now and we're ready to return + // stuff. + if found { + // The item exists at index 'i', and the child we've selected can give us a + // predecessor, since if we've gotten here it's got > minItems items in it. + out := n.items[i] + // We use our special-case 'remove' call with typ=maxItem to pull the + // predecessor of item i (the rightmost leaf of our immediate left child) + // and set it into where we pulled the item from. + n.items[i] = child.remove(nil, minItems, removeMax) + return out + } + // Final recursive call. Once we're here, we know that the item isn't in this + // node and that the child is big enough to remove from. + return child.remove(item, minItems, typ) +} + +// growChildAndRemove grows child 'i' to make sure it's possible to remove an +// item from it while keeping it at minItems, then calls remove to actually +// remove it. +// +// Most documentation says we have to do two sets of special casing: +// 1) item is in this node +// 2) item is in child +// In both cases, we need to handle the two subcases: +// A) node has enough values that it can spare one +// B) node doesn't have enough values +// For the latter, we have to check: +// a) left sibling has node to spare +// b) right sibling has node to spare +// c) we must merge +// To simplify our code here, we handle cases #1 and #2 the same: +// If a node doesn't have enough items, we make sure it does (using a,b,c). +// We then simply redo our remove call, and the second time (regardless of +// whether we're in case 1 or 2), we'll have enough items and can guarantee +// that we hit case A. +func (n *node) growChildAndRemove(i int, item Item, minItems int, typ toRemove) Item { + child := n.children[i] + if i > 0 && len(n.children[i-1].items) > minItems { + // Steal from left child + stealFrom := n.children[i-1] + stolenItem := stealFrom.items.pop() + child.items.insertAt(0, n.items[i-1]) + n.items[i-1] = stolenItem + if len(stealFrom.children) > 0 { + child.children.insertAt(0, stealFrom.children.pop()) + } + } else if i < len(n.items) && len(n.children[i+1].items) > minItems { + // steal from right child + stealFrom := n.children[i+1] + stolenItem := stealFrom.items.removeAt(0) + child.items = append(child.items, n.items[i]) + n.items[i] = stolenItem + if len(stealFrom.children) > 0 { + child.children = append(child.children, stealFrom.children.removeAt(0)) + } + } else { + if i >= len(n.items) { + i-- + child = n.children[i] + } + // merge with right child + mergeItem := n.items.removeAt(i) + mergeChild := n.children.removeAt(i + 1) + child.items = append(child.items, mergeItem) + child.items = append(child.items, mergeChild.items...) + child.children = append(child.children, mergeChild.children...) + n.t.freeNode(mergeChild) + } + return n.remove(item, minItems, typ) +} + +// iterate provides a simple method for iterating over elements in the tree. +// It could probably use some work to be extra-efficient (it calls from() a +// little more than it should), but it works pretty well for now. +// +// It requires that 'from' and 'to' both return true for values we should hit +// with the iterator. It should also be the case that 'from' returns true for +// values less than or equal to values 'to' returns true for, and 'to' +// returns true for values greater than or equal to those that 'from' +// does. +func (n *node) iterate(from, to func(Item) bool, iter ItemIterator) bool { + for i, item := range n.items { + if !from(item) { + continue + } + if len(n.children) > 0 && !n.children[i].iterate(from, to, iter) { + return false + } + if !to(item) { + return false + } + if !iter(item) { + return false + } + } + if len(n.children) > 0 { + return n.children[len(n.children)-1].iterate(from, to, iter) + } + return true +} + +// Used for testing/debugging purposes. +func (n *node) print(w io.Writer, level int) { + fmt.Fprintf(w, "%sNODE:%v\n", strings.Repeat(" ", level), n.items) + for _, c := range n.children { + c.print(w, level+1) + } +} + +// BTree is an implementation of a B-Tree. +// +// BTree stores Item instances in an ordered structure, allowing easy insertion, +// removal, and iteration. +// +// Write operations are not safe for concurrent mutation by multiple +// goroutines, but Read operations are. +type BTree struct { + degree int + length int + root *node + freelist *FreeList +} + +// maxItems returns the max number of items to allow per node. +func (t *BTree) maxItems() int { + return t.degree*2 - 1 +} + +// minItems returns the min number of items to allow per node (ignored for the +// root node). +func (t *BTree) minItems() int { + return t.degree - 1 +} + +func (t *BTree) newNode() (n *node) { + n = t.freelist.newNode() + n.t = t + return +} + +func (t *BTree) freeNode(n *node) { + for i := range n.items { + n.items[i] = nil // clear to allow GC + } + n.items = n.items[:0] + for i := range n.children { + n.children[i] = nil // clear to allow GC + } + n.children = n.children[:0] + n.t = nil // clear to allow GC + t.freelist.freeNode(n) +} + +// ReplaceOrInsert adds the given item to the tree. If an item in the tree +// already equals the given one, it is removed from the tree and returned. +// Otherwise, nil is returned. +// +// nil cannot be added to the tree (will panic). +func (t *BTree) ReplaceOrInsert(item Item) Item { + if item == nil { + panic("nil item being added to BTree") + } + if t.root == nil { + t.root = t.newNode() + t.root.items = append(t.root.items, item) + t.length++ + return nil + } else if len(t.root.items) >= t.maxItems() { + item2, second := t.root.split(t.maxItems() / 2) + oldroot := t.root + t.root = t.newNode() + t.root.items = append(t.root.items, item2) + t.root.children = append(t.root.children, oldroot, second) + } + out := t.root.insert(item, t.maxItems()) + if out == nil { + t.length++ + } + return out +} + +// Delete removes an item equal to the passed in item from the tree, returning +// it. If no such item exists, returns nil. +func (t *BTree) Delete(item Item) Item { + return t.deleteItem(item, removeItem) +} + +// DeleteMin removes the smallest item in the tree and returns it. +// If no such item exists, returns nil. +func (t *BTree) DeleteMin() Item { + return t.deleteItem(nil, removeMin) +} + +// DeleteMax removes the largest item in the tree and returns it. +// If no such item exists, returns nil. +func (t *BTree) DeleteMax() Item { + return t.deleteItem(nil, removeMax) +} + +func (t *BTree) deleteItem(item Item, typ toRemove) Item { + if t.root == nil || len(t.root.items) == 0 { + return nil + } + out := t.root.remove(item, t.minItems(), typ) + if len(t.root.items) == 0 && len(t.root.children) > 0 { + oldroot := t.root + t.root = t.root.children[0] + t.freeNode(oldroot) + } + if out != nil { + t.length-- + } + return out +} + +// AscendRange calls the iterator for every value in the tree within the range +// [greaterOrEqual, lessThan), until iterator returns false. +func (t *BTree) AscendRange(greaterOrEqual, lessThan Item, iterator ItemIterator) { + if t.root == nil { + return + } + t.root.iterate( + func(a Item) bool { return !a.Less(greaterOrEqual) }, + func(a Item) bool { return a.Less(lessThan) }, + iterator) +} + +// AscendLessThan calls the iterator for every value in the tree within the range +// [first, pivot), until iterator returns false. +func (t *BTree) AscendLessThan(pivot Item, iterator ItemIterator) { + if t.root == nil { + return + } + t.root.iterate( + func(a Item) bool { return true }, + func(a Item) bool { return a.Less(pivot) }, + iterator) +} + +// AscendGreaterOrEqual calls the iterator for every value in the tree within +// the range [pivot, last], until iterator returns false. +func (t *BTree) AscendGreaterOrEqual(pivot Item, iterator ItemIterator) { + if t.root == nil { + return + } + t.root.iterate( + func(a Item) bool { return !a.Less(pivot) }, + func(a Item) bool { return true }, + iterator) +} + +// Ascend calls the iterator for every value in the tree within the range +// [first, last], until iterator returns false. +func (t *BTree) Ascend(iterator ItemIterator) { + if t.root == nil { + return + } + t.root.iterate( + func(a Item) bool { return true }, + func(a Item) bool { return true }, + iterator) +} + +// Get looks for the key item in the tree, returning it. It returns nil if +// unable to find that item. +func (t *BTree) Get(key Item) Item { + if t.root == nil { + return nil + } + return t.root.get(key) +} + +// Min returns the smallest item in the tree, or nil if the tree is empty. +func (t *BTree) Min() Item { + return min(t.root) +} + +// Max returns the largest item in the tree, or nil if the tree is empty. +func (t *BTree) Max() Item { + return max(t.root) +} + +// Has returns true if the given key is in the tree. +func (t *BTree) Has(key Item) bool { + return t.Get(key) != nil +} + +// Len returns the number of items currently in the tree. +func (t *BTree) Len() int { + return t.length +} + +// Int implements the Item interface for integers. +type Int int + +// Less returns true if int(a) < int(b). +func (a Int) Less(b Item) bool { + return a < b.(Int) +} diff --git a/vendor/github.com/google/btree/btree_mem.go b/vendor/github.com/google/btree/btree_mem.go new file mode 100644 index 00000000..cb95b7fa --- /dev/null +++ b/vendor/github.com/google/btree/btree_mem.go @@ -0,0 +1,76 @@ +// Copyright 2014 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build ignore + +// This binary compares memory usage between btree and gollrb. +package main + +import ( + "flag" + "fmt" + "math/rand" + "runtime" + "time" + + "github.com/google/btree" + "github.com/petar/GoLLRB/llrb" +) + +var ( + size = flag.Int("size", 1000000, "size of the tree to build") + degree = flag.Int("degree", 8, "degree of btree") + gollrb = flag.Bool("llrb", false, "use llrb instead of btree") +) + +func main() { + flag.Parse() + vals := rand.Perm(*size) + var t, v interface{} + v = vals + var stats runtime.MemStats + for i := 0; i < 10; i++ { + runtime.GC() + } + fmt.Println("-------- BEFORE ----------") + runtime.ReadMemStats(&stats) + fmt.Printf("%+v\n", stats) + start := time.Now() + if *gollrb { + tr := llrb.New() + for _, v := range vals { + tr.ReplaceOrInsert(llrb.Int(v)) + } + t = tr // keep it around + } else { + tr := btree.New(*degree) + for _, v := range vals { + tr.ReplaceOrInsert(btree.Int(v)) + } + t = tr // keep it around + } + fmt.Printf("%v inserts in %v\n", *size, time.Since(start)) + fmt.Println("-------- AFTER ----------") + runtime.ReadMemStats(&stats) + fmt.Printf("%+v\n", stats) + for i := 0; i < 10; i++ { + runtime.GC() + } + fmt.Println("-------- AFTER GC ----------") + runtime.ReadMemStats(&stats) + fmt.Printf("%+v\n", stats) + if t == v { + fmt.Println("to make sure vals and tree aren't GC'd") + } +} diff --git a/vendor/github.com/googleapis/gnostic/.gitignore b/vendor/github.com/googleapis/gnostic/.gitignore new file mode 100644 index 00000000..9db23e56 --- /dev/null +++ b/vendor/github.com/googleapis/gnostic/.gitignore @@ -0,0 +1,13 @@ +# Eclipse +.checkstyle +.project +.settings +# Swift +.build +Packages +# vi +*.swp +# vscode +.vscode +.DS_Store +*~ diff --git a/vendor/github.com/googleapis/gnostic/.travis-install.sh b/vendor/github.com/googleapis/gnostic/.travis-install.sh new file mode 100755 index 00000000..6ae5332c --- /dev/null +++ b/vendor/github.com/googleapis/gnostic/.travis-install.sh @@ -0,0 +1,36 @@ +#!/bin/sh + +# +# Install dependencies that aren't available as Ubuntu packages. +# +# Everything goes into $HOME/local. +# +# Scripts should add +# - $HOME/local/bin to PATH +# - $HOME/local/lib to LD_LIBRARY_PATH +# + +cd +mkdir -p local + +# Install swift +SWIFT_BRANCH=swift-3.0.2-release +SWIFT_VERSION=swift-3.0.2-RELEASE +SWIFT_PLATFORM=ubuntu14.04 +SWIFT_URL=https://swift.org/builds/$SWIFT_BRANCH/$(echo "$SWIFT_PLATFORM" | tr -d .)/$SWIFT_VERSION/$SWIFT_VERSION-$SWIFT_PLATFORM.tar.gz + +echo $SWIFT_URL + +curl -fSsL $SWIFT_URL -o swift.tar.gz +tar -xzf swift.tar.gz --strip-components=2 --directory=local + +# Install protoc +PROTOC_URL=https://github.com/google/protobuf/releases/download/v3.2.0rc2/protoc-3.2.0rc2-linux-x86_64.zip + +echo $PROTOC_URL + +curl -fSsL $PROTOC_URL -o protoc.zip +unzip protoc.zip -d local + +# Verify installation +find local diff --git a/vendor/github.com/googleapis/gnostic/.travis.yml b/vendor/github.com/googleapis/gnostic/.travis.yml new file mode 100644 index 00000000..35c7bfe3 --- /dev/null +++ b/vendor/github.com/googleapis/gnostic/.travis.yml @@ -0,0 +1,43 @@ +# Travis CI build file for OpenAPI Compiler, including Go and Swift plugins + +# Use Ubuntu 14.04 +dist: trusty + +sudo: false + +language: go + +addons: + apt: + packages: + - clang-3.8 + - lldb-3.8 + - libicu-dev + - libtool + - libcurl4-openssl-dev + - libbsd-dev + - build-essential + - libssl-dev + - uuid-dev + - curl + - unzip + +install: + - ./.travis-install.sh + - export PATH=.:$HOME/local/bin:$PATH + - make + +script: + - go test . -v + - pushd plugins/gnostic-go-generator/examples/v2.0/bookstore + - make test + - popd + - export PATH=.:$HOME/local/bin:$PATH + - export LD_LIBRARY_PATH=$HOME/local/lib + - pushd plugins/gnostic-swift-generator + - make + - cd examples/bookstore + - make + - .build/debug/Server & + - make test + diff --git a/vendor/github.com/googleapis/gnostic/COMPILE-PROTOS.sh b/vendor/github.com/googleapis/gnostic/COMPILE-PROTOS.sh new file mode 100755 index 00000000..e771c6e1 --- /dev/null +++ b/vendor/github.com/googleapis/gnostic/COMPILE-PROTOS.sh @@ -0,0 +1,31 @@ +#!/bin/sh +# +# Copyright 2016 Google Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +go get github.com/golang/protobuf/protoc-gen-go + +protoc \ +--go_out=Mgoogle/protobuf/any.proto=github.com/golang/protobuf/ptypes/any:. \ +OpenAPIv2/OpenAPIv2.proto + +protoc \ +--go_out=:. \ +plugins/plugin.proto + +protoc \ +--go_out=Mgoogle/protobuf/any.proto=github.com/golang/protobuf/ptypes/any:. \ +OpenAPIv3/OpenAPIv3.proto + diff --git a/vendor/github.com/googleapis/gnostic/CONTRIBUTING.md b/vendor/github.com/googleapis/gnostic/CONTRIBUTING.md new file mode 100644 index 00000000..6736efd9 --- /dev/null +++ b/vendor/github.com/googleapis/gnostic/CONTRIBUTING.md @@ -0,0 +1,35 @@ +# How to become a contributor and submit your own code + +## Contributor License Agreements + +We'd love to accept your sample apps and patches! Before we can take them, we +have to jump a couple of legal hurdles. + +Please fill out either the individual or corporate Contributor License Agreement +(CLA). + + * If you are an individual writing original source code and you're sure you + own the intellectual property, then you'll need to sign an [individual CLA] + (https://developers.google.com/open-source/cla/individual). + * If you work for a company that wants to allow you to contribute your work, + then you'll need to sign a [corporate CLA] + (https://developers.google.com/open-source/cla/corporate). + +Follow either of the two links above to access the appropriate CLA and +instructions for how to sign and return it. Once we receive it, we'll be able to +accept your pull requests. + +## Contributing A Patch + +1. Submit an issue describing your proposed change to the repo in question. +1. The repo owner will respond to your issue promptly. +1. If your proposed change is accepted, and you haven't already done so, sign a + Contributor License Agreement (see details above). +1. Fork the desired repo, develop and test your code changes. +1. Ensure that your code adheres to the existing style in the sample to which + you are contributing. Refer to the + [Google Cloud Platform Samples Style Guide] + (https://github.com/GoogleCloudPlatform/Template/wiki/style.html) for the + recommended coding standards for this organization. +1. Ensure that your code has an appropriate set of unit tests which all pass. +1. Submit a pull request. diff --git a/vendor/github.com/googleapis/gnostic/Makefile b/vendor/github.com/googleapis/gnostic/Makefile new file mode 100644 index 00000000..13726ee4 --- /dev/null +++ b/vendor/github.com/googleapis/gnostic/Makefile @@ -0,0 +1,15 @@ + +build: + go get + go install + cd generate-gnostic; go get; go install + cd apps/report; go get; go install + cd apps/petstore-builder; go get; go install + cd plugins/gnostic-go-sample; go get; go install + cd plugins/gnostic-go-generator/encode-templates; go get; go install + cd plugins/gnostic-go-generator; go get; go install + rm -f $(GOPATH)/bin/gnostic-go-client $(GOPATH)/bin/gnostic-go-server + ln -s $(GOPATH)/bin/gnostic-go-generator $(GOPATH)/bin/gnostic-go-client + ln -s $(GOPATH)/bin/gnostic-go-generator $(GOPATH)/bin/gnostic-go-server + cd extensions/sample; make + diff --git a/vendor/github.com/googleapis/gnostic/README.md b/vendor/github.com/googleapis/gnostic/README.md new file mode 100644 index 00000000..d350f3f0 --- /dev/null +++ b/vendor/github.com/googleapis/gnostic/README.md @@ -0,0 +1,103 @@ +[![Build Status](https://travis-ci.org/googleapis/gnostic.svg?branch=master)](https://travis-ci.org/googleapis/gnostic) + +# ⨁ gnostic + +This repository contains a Go command line tool which converts +JSON and YAML [OpenAPI](https://github.com/OAI/OpenAPI-Specification) +descriptions to and from equivalent Protocol Buffer representations. + +[Protocol Buffers](https://developers.google.com/protocol-buffers/) +provide a language-neutral, platform-neutral, extensible mechanism +for serializing structured data. +**gnostic**'s Protocol Buffer models for the OpenAPI Specification +can be used to generate code that includes data structures with +explicit fields for the elements of an OpenAPI description. +This makes it possible for developers to work with OpenAPI +descriptions in type-safe ways, which is particularly useful +in strongly-typed languages like Go and Swift. + +**gnostic** reads OpenAPI descriptions into +these generated data structures, reports errors, +resolves internal dependencies, and writes the results +in a binary form that can be used in any language that is +supported by the Protocol Buffer tools. +A plugin interface simplifies integration with API +tools written in a variety of different languages, +and when necessary, Protocol Buffer OpenAPI descriptions +can be reexported as JSON or YAML. + +**gnostic** compilation code and OpenAPI Protocol Buffer +models are automatically generated from an +[OpenAPI JSON Schema](https://github.com/OAI/OpenAPI-Specification/blob/master/schemas/v2.0/schema.json). +Source code for the generator is in the [generate-gnostic](generate-gnostic) directory. + +## Disclaimer + +This is prerelease software and work in progress. Feedback and +contributions are welcome, but we currently make no guarantees of +function or stability. + +## Requirements + +**gnostic** can be run in any environment that supports [Go](http://golang.org) +and the [Google Protocol Buffer Compiler](https://github.com/google/protobuf). + +## Installation + +1. Get this package by downloading it with `go get`. + + go get github.com/googleapis/gnostic + +2. [Optional] Build and run the compiler generator. +This uses the OpenAPI JSON schema to generate a Protocol Buffer language file +that describes the OpenAPI specification and a Go-language file of code that +will read a JSON or YAML OpenAPI representation into the generated protocol +buffers. Pre-generated versions of these files are in the OpenAPIv2 directory. + + cd $GOPATH/src/github.com/googleapis/gnostic/generate-gnostic + go install + cd .. + generate-gnostic --v2 + +3. [Optional] Generate Protocol Buffer support code. +A pre-generated version of this file is checked into the OpenAPIv2 directory. +This step requires a local installation of protoc, the Protocol Buffer Compiler. +You can get protoc [here](https://github.com/google/protobuf). + + ./COMPILE-PROTOS.sh + +4. [Optional] Rebuild **gnostic**. This is only necessary if you've performed steps +2 or 3 above. + + go install github.com/googleapis/gnostic + +5. Run **gnostic**. This will create a file in the current directory named "petstore.pb" that contains a binary +Protocol Buffer description of a sample API. + + gnostic --pb-out=. examples/petstore.json + +6. You can also compile files that you specify with a URL. Here's another way to compile the previous +example. This time we're creating "petstore.text", which contains a textual representation of the +Protocol Buffer description. This is mainly for use in testing and debugging. + + gnostic --text-out=petstore.text https://raw.githubusercontent.com/googleapis/gnostic/master/examples/petstore.json + +7. For a sample application, see apps/report. + + go install github.com/googleapis/gnostic/apps/report + report petstore.pb + +8. **gnostic** supports plugins. This builds and runs a sample plugin +that reports some basic information about an API. The "-" causes the plugin to +write its output to stdout. + + go install github.com/googleapis/gnostic/plugins/gnostic-go-sample + gnostic examples/petstore.json --go-sample-out=- + +## Copyright + +Copyright 2017, Google Inc. + +## License + +Released under the Apache 2.0 license. diff --git a/vendor/github.com/googleapis/gnostic/extensions/COMPILE-EXTENSION.sh b/vendor/github.com/googleapis/gnostic/extensions/COMPILE-EXTENSION.sh old mode 100644 new mode 100755 diff --git a/vendor/github.com/gregjones/httpcache/.travis.yml b/vendor/github.com/gregjones/httpcache/.travis.yml new file mode 100644 index 00000000..2bca4c59 --- /dev/null +++ b/vendor/github.com/gregjones/httpcache/.travis.yml @@ -0,0 +1,18 @@ +sudo: false +language: go +go: + - 1.6.x + - 1.7.x + - 1.8.x + - master +matrix: + allow_failures: + - go: master + fast_finish: true +install: + - # Do nothing. This is needed to prevent default install action "go get -t -v ./..." from happening here (we want it to happen inside script step). +script: + - go get -t -v ./... + - diff -u <(echo -n) <(gofmt -d .) + - go tool vet . + - go test -v -race ./... diff --git a/vendor/github.com/gregjones/httpcache/LICENSE.txt b/vendor/github.com/gregjones/httpcache/LICENSE.txt new file mode 100644 index 00000000..81316beb --- /dev/null +++ b/vendor/github.com/gregjones/httpcache/LICENSE.txt @@ -0,0 +1,7 @@ +Copyright © 2012 Greg Jones (greg.jones@gmail.com) + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the “Software”), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. \ No newline at end of file diff --git a/vendor/github.com/gregjones/httpcache/README.md b/vendor/github.com/gregjones/httpcache/README.md new file mode 100644 index 00000000..61bd830e --- /dev/null +++ b/vendor/github.com/gregjones/httpcache/README.md @@ -0,0 +1,24 @@ +httpcache +========= + +[![Build Status](https://travis-ci.org/gregjones/httpcache.svg?branch=master)](https://travis-ci.org/gregjones/httpcache) [![GoDoc](https://godoc.org/github.com/gregjones/httpcache?status.svg)](https://godoc.org/github.com/gregjones/httpcache) + +Package httpcache provides a http.RoundTripper implementation that works as a mostly RFC-compliant cache for http responses. + +It is only suitable for use as a 'private' cache (i.e. for a web-browser or an API-client and not for a shared proxy). + +Cache Backends +-------------- + +- The built-in 'memory' cache stores responses in an in-memory map. +- [`github.com/gregjones/httpcache/diskcache`](https://github.com/gregjones/httpcache/tree/master/diskcache) provides a filesystem-backed cache using the [diskv](https://github.com/peterbourgon/diskv) library. +- [`github.com/gregjones/httpcache/memcache`](https://github.com/gregjones/httpcache/tree/master/memcache) provides memcache implementations, for both App Engine and 'normal' memcache servers. +- [`sourcegraph.com/sourcegraph/s3cache`](https://sourcegraph.com/github.com/sourcegraph/s3cache) uses Amazon S3 for storage. +- [`github.com/gregjones/httpcache/leveldbcache`](https://github.com/gregjones/httpcache/tree/master/leveldbcache) provides a filesystem-backed cache using [leveldb](https://github.com/syndtr/goleveldb/leveldb). +- [`github.com/die-net/lrucache`](https://github.com/die-net/lrucache) provides an in-memory cache that will evict least-recently used entries. +- [`github.com/die-net/lrucache/twotier`](https://github.com/die-net/lrucache/tree/master/twotier) allows caches to be combined, for example to use lrucache above with a persistent disk-cache. + +License +------- + +- [MIT License](LICENSE.txt) diff --git a/vendor/github.com/gregjones/httpcache/diskcache/diskcache.go b/vendor/github.com/gregjones/httpcache/diskcache/diskcache.go new file mode 100644 index 00000000..42e3129d --- /dev/null +++ b/vendor/github.com/gregjones/httpcache/diskcache/diskcache.go @@ -0,0 +1,61 @@ +// Package diskcache provides an implementation of httpcache.Cache that uses the diskv package +// to supplement an in-memory map with persistent storage +// +package diskcache + +import ( + "bytes" + "crypto/md5" + "encoding/hex" + "github.com/peterbourgon/diskv" + "io" +) + +// Cache is an implementation of httpcache.Cache that supplements the in-memory map with persistent storage +type Cache struct { + d *diskv.Diskv +} + +// Get returns the response corresponding to key if present +func (c *Cache) Get(key string) (resp []byte, ok bool) { + key = keyToFilename(key) + resp, err := c.d.Read(key) + if err != nil { + return []byte{}, false + } + return resp, true +} + +// Set saves a response to the cache as key +func (c *Cache) Set(key string, resp []byte) { + key = keyToFilename(key) + c.d.WriteStream(key, bytes.NewReader(resp), true) +} + +// Delete removes the response with key from the cache +func (c *Cache) Delete(key string) { + key = keyToFilename(key) + c.d.Erase(key) +} + +func keyToFilename(key string) string { + h := md5.New() + io.WriteString(h, key) + return hex.EncodeToString(h.Sum(nil)) +} + +// New returns a new Cache that will store files in basePath +func New(basePath string) *Cache { + return &Cache{ + d: diskv.New(diskv.Options{ + BasePath: basePath, + CacheSizeMax: 100 * 1024 * 1024, // 100MB + }), + } +} + +// NewWithDiskv returns a new Cache using the provided Diskv as underlying +// storage. +func NewWithDiskv(d *diskv.Diskv) *Cache { + return &Cache{d} +} diff --git a/vendor/github.com/gregjones/httpcache/httpcache.go b/vendor/github.com/gregjones/httpcache/httpcache.go new file mode 100644 index 00000000..8239edc2 --- /dev/null +++ b/vendor/github.com/gregjones/httpcache/httpcache.go @@ -0,0 +1,553 @@ +// Package httpcache provides a http.RoundTripper implementation that works as a +// mostly RFC-compliant cache for http responses. +// +// It is only suitable for use as a 'private' cache (i.e. for a web-browser or an API-client +// and not for a shared proxy). +// +package httpcache + +import ( + "bufio" + "bytes" + "errors" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/http/httputil" + "strings" + "sync" + "time" +) + +const ( + stale = iota + fresh + transparent + // XFromCache is the header added to responses that are returned from the cache + XFromCache = "X-From-Cache" +) + +// A Cache interface is used by the Transport to store and retrieve responses. +type Cache interface { + // Get returns the []byte representation of a cached response and a bool + // set to true if the value isn't empty + Get(key string) (responseBytes []byte, ok bool) + // Set stores the []byte representation of a response against a key + Set(key string, responseBytes []byte) + // Delete removes the value associated with the key + Delete(key string) +} + +// cacheKey returns the cache key for req. +func cacheKey(req *http.Request) string { + return req.URL.String() +} + +// CachedResponse returns the cached http.Response for req if present, and nil +// otherwise. +func CachedResponse(c Cache, req *http.Request) (resp *http.Response, err error) { + cachedVal, ok := c.Get(cacheKey(req)) + if !ok { + return + } + + b := bytes.NewBuffer(cachedVal) + return http.ReadResponse(bufio.NewReader(b), req) +} + +// MemoryCache is an implemtation of Cache that stores responses in an in-memory map. +type MemoryCache struct { + mu sync.RWMutex + items map[string][]byte +} + +// Get returns the []byte representation of the response and true if present, false if not +func (c *MemoryCache) Get(key string) (resp []byte, ok bool) { + c.mu.RLock() + resp, ok = c.items[key] + c.mu.RUnlock() + return resp, ok +} + +// Set saves response resp to the cache with key +func (c *MemoryCache) Set(key string, resp []byte) { + c.mu.Lock() + c.items[key] = resp + c.mu.Unlock() +} + +// Delete removes key from the cache +func (c *MemoryCache) Delete(key string) { + c.mu.Lock() + delete(c.items, key) + c.mu.Unlock() +} + +// NewMemoryCache returns a new Cache that will store items in an in-memory map +func NewMemoryCache() *MemoryCache { + c := &MemoryCache{items: map[string][]byte{}} + return c +} + +// Transport is an implementation of http.RoundTripper that will return values from a cache +// where possible (avoiding a network request) and will additionally add validators (etag/if-modified-since) +// to repeated requests allowing servers to return 304 / Not Modified +type Transport struct { + // The RoundTripper interface actually used to make requests + // If nil, http.DefaultTransport is used + Transport http.RoundTripper + Cache Cache + // If true, responses returned from the cache will be given an extra header, X-From-Cache + MarkCachedResponses bool +} + +// NewTransport returns a new Transport with the +// provided Cache implementation and MarkCachedResponses set to true +func NewTransport(c Cache) *Transport { + return &Transport{Cache: c, MarkCachedResponses: true} +} + +// Client returns an *http.Client that caches responses. +func (t *Transport) Client() *http.Client { + return &http.Client{Transport: t} +} + +// varyMatches will return false unless all of the cached values for the headers listed in Vary +// match the new request +func varyMatches(cachedResp *http.Response, req *http.Request) bool { + for _, header := range headerAllCommaSepValues(cachedResp.Header, "vary") { + header = http.CanonicalHeaderKey(header) + if header != "" && req.Header.Get(header) != cachedResp.Header.Get("X-Varied-"+header) { + return false + } + } + return true +} + +// RoundTrip takes a Request and returns a Response +// +// If there is a fresh Response already in cache, then it will be returned without connecting to +// the server. +// +// If there is a stale Response, then any validators it contains will be set on the new request +// to give the server a chance to respond with NotModified. If this happens, then the cached Response +// will be returned. +func (t *Transport) RoundTrip(req *http.Request) (resp *http.Response, err error) { + cacheKey := cacheKey(req) + cacheable := (req.Method == "GET" || req.Method == "HEAD") && req.Header.Get("range") == "" + var cachedResp *http.Response + if cacheable { + cachedResp, err = CachedResponse(t.Cache, req) + } else { + // Need to invalidate an existing value + t.Cache.Delete(cacheKey) + } + + transport := t.Transport + if transport == nil { + transport = http.DefaultTransport + } + + if cacheable && cachedResp != nil && err == nil { + if t.MarkCachedResponses { + cachedResp.Header.Set(XFromCache, "1") + } + + if varyMatches(cachedResp, req) { + // Can only use cached value if the new request doesn't Vary significantly + freshness := getFreshness(cachedResp.Header, req.Header) + if freshness == fresh { + return cachedResp, nil + } + + if freshness == stale { + var req2 *http.Request + // Add validators if caller hasn't already done so + etag := cachedResp.Header.Get("etag") + if etag != "" && req.Header.Get("etag") == "" { + req2 = cloneRequest(req) + req2.Header.Set("if-none-match", etag) + } + lastModified := cachedResp.Header.Get("last-modified") + if lastModified != "" && req.Header.Get("last-modified") == "" { + if req2 == nil { + req2 = cloneRequest(req) + } + req2.Header.Set("if-modified-since", lastModified) + } + if req2 != nil { + req = req2 + } + } + } + + resp, err = transport.RoundTrip(req) + if err == nil && req.Method == "GET" && resp.StatusCode == http.StatusNotModified { + // Replace the 304 response with the one from cache, but update with some new headers + endToEndHeaders := getEndToEndHeaders(resp.Header) + for _, header := range endToEndHeaders { + cachedResp.Header[header] = resp.Header[header] + } + cachedResp.Status = fmt.Sprintf("%d %s", http.StatusOK, http.StatusText(http.StatusOK)) + cachedResp.StatusCode = http.StatusOK + + resp = cachedResp + } else if (err != nil || (cachedResp != nil && resp.StatusCode >= 500)) && + req.Method == "GET" && canStaleOnError(cachedResp.Header, req.Header) { + // In case of transport failure and stale-if-error activated, returns cached content + // when available + cachedResp.Status = fmt.Sprintf("%d %s", http.StatusOK, http.StatusText(http.StatusOK)) + cachedResp.StatusCode = http.StatusOK + return cachedResp, nil + } else { + if err != nil || resp.StatusCode != http.StatusOK { + t.Cache.Delete(cacheKey) + } + if err != nil { + return nil, err + } + } + } else { + reqCacheControl := parseCacheControl(req.Header) + if _, ok := reqCacheControl["only-if-cached"]; ok { + resp = newGatewayTimeoutResponse(req) + } else { + resp, err = transport.RoundTrip(req) + if err != nil { + return nil, err + } + } + } + + if cacheable && canStore(parseCacheControl(req.Header), parseCacheControl(resp.Header)) { + for _, varyKey := range headerAllCommaSepValues(resp.Header, "vary") { + varyKey = http.CanonicalHeaderKey(varyKey) + fakeHeader := "X-Varied-" + varyKey + reqValue := req.Header.Get(varyKey) + if reqValue != "" { + resp.Header.Set(fakeHeader, reqValue) + } + } + switch req.Method { + case "GET": + // Delay caching until EOF is reached. + resp.Body = &cachingReadCloser{ + R: resp.Body, + OnEOF: func(r io.Reader) { + resp := *resp + resp.Body = ioutil.NopCloser(r) + respBytes, err := httputil.DumpResponse(&resp, true) + if err == nil { + t.Cache.Set(cacheKey, respBytes) + } + }, + } + default: + respBytes, err := httputil.DumpResponse(resp, true) + if err == nil { + t.Cache.Set(cacheKey, respBytes) + } + } + } else { + t.Cache.Delete(cacheKey) + } + return resp, nil +} + +// ErrNoDateHeader indicates that the HTTP headers contained no Date header. +var ErrNoDateHeader = errors.New("no Date header") + +// Date parses and returns the value of the Date header. +func Date(respHeaders http.Header) (date time.Time, err error) { + dateHeader := respHeaders.Get("date") + if dateHeader == "" { + err = ErrNoDateHeader + return + } + + return time.Parse(time.RFC1123, dateHeader) +} + +type realClock struct{} + +func (c *realClock) since(d time.Time) time.Duration { + return time.Since(d) +} + +type timer interface { + since(d time.Time) time.Duration +} + +var clock timer = &realClock{} + +// getFreshness will return one of fresh/stale/transparent based on the cache-control +// values of the request and the response +// +// fresh indicates the response can be returned +// stale indicates that the response needs validating before it is returned +// transparent indicates the response should not be used to fulfil the request +// +// Because this is only a private cache, 'public' and 'private' in cache-control aren't +// signficant. Similarly, smax-age isn't used. +func getFreshness(respHeaders, reqHeaders http.Header) (freshness int) { + respCacheControl := parseCacheControl(respHeaders) + reqCacheControl := parseCacheControl(reqHeaders) + if _, ok := reqCacheControl["no-cache"]; ok { + return transparent + } + if _, ok := respCacheControl["no-cache"]; ok { + return stale + } + if _, ok := reqCacheControl["only-if-cached"]; ok { + return fresh + } + + date, err := Date(respHeaders) + if err != nil { + return stale + } + currentAge := clock.since(date) + + var lifetime time.Duration + var zeroDuration time.Duration + + // If a response includes both an Expires header and a max-age directive, + // the max-age directive overrides the Expires header, even if the Expires header is more restrictive. + if maxAge, ok := respCacheControl["max-age"]; ok { + lifetime, err = time.ParseDuration(maxAge + "s") + if err != nil { + lifetime = zeroDuration + } + } else { + expiresHeader := respHeaders.Get("Expires") + if expiresHeader != "" { + expires, err := time.Parse(time.RFC1123, expiresHeader) + if err != nil { + lifetime = zeroDuration + } else { + lifetime = expires.Sub(date) + } + } + } + + if maxAge, ok := reqCacheControl["max-age"]; ok { + // the client is willing to accept a response whose age is no greater than the specified time in seconds + lifetime, err = time.ParseDuration(maxAge + "s") + if err != nil { + lifetime = zeroDuration + } + } + if minfresh, ok := reqCacheControl["min-fresh"]; ok { + // the client wants a response that will still be fresh for at least the specified number of seconds. + minfreshDuration, err := time.ParseDuration(minfresh + "s") + if err == nil { + currentAge = time.Duration(currentAge + minfreshDuration) + } + } + + if maxstale, ok := reqCacheControl["max-stale"]; ok { + // Indicates that the client is willing to accept a response that has exceeded its expiration time. + // If max-stale is assigned a value, then the client is willing to accept a response that has exceeded + // its expiration time by no more than the specified number of seconds. + // If no value is assigned to max-stale, then the client is willing to accept a stale response of any age. + // + // Responses served only because of a max-stale value are supposed to have a Warning header added to them, + // but that seems like a hassle, and is it actually useful? If so, then there needs to be a different + // return-value available here. + if maxstale == "" { + return fresh + } + maxstaleDuration, err := time.ParseDuration(maxstale + "s") + if err == nil { + currentAge = time.Duration(currentAge - maxstaleDuration) + } + } + + if lifetime > currentAge { + return fresh + } + + return stale +} + +// Returns true if either the request or the response includes the stale-if-error +// cache control extension: https://tools.ietf.org/html/rfc5861 +func canStaleOnError(respHeaders, reqHeaders http.Header) bool { + respCacheControl := parseCacheControl(respHeaders) + reqCacheControl := parseCacheControl(reqHeaders) + + var err error + lifetime := time.Duration(-1) + + if staleMaxAge, ok := respCacheControl["stale-if-error"]; ok { + if staleMaxAge != "" { + lifetime, err = time.ParseDuration(staleMaxAge + "s") + if err != nil { + return false + } + } else { + return true + } + } + if staleMaxAge, ok := reqCacheControl["stale-if-error"]; ok { + if staleMaxAge != "" { + lifetime, err = time.ParseDuration(staleMaxAge + "s") + if err != nil { + return false + } + } else { + return true + } + } + + if lifetime >= 0 { + date, err := Date(respHeaders) + if err != nil { + return false + } + currentAge := clock.since(date) + if lifetime > currentAge { + return true + } + } + + return false +} + +func getEndToEndHeaders(respHeaders http.Header) []string { + // These headers are always hop-by-hop + hopByHopHeaders := map[string]struct{}{ + "Connection": struct{}{}, + "Keep-Alive": struct{}{}, + "Proxy-Authenticate": struct{}{}, + "Proxy-Authorization": struct{}{}, + "Te": struct{}{}, + "Trailers": struct{}{}, + "Transfer-Encoding": struct{}{}, + "Upgrade": struct{}{}, + } + + for _, extra := range strings.Split(respHeaders.Get("connection"), ",") { + // any header listed in connection, if present, is also considered hop-by-hop + if strings.Trim(extra, " ") != "" { + hopByHopHeaders[http.CanonicalHeaderKey(extra)] = struct{}{} + } + } + endToEndHeaders := []string{} + for respHeader, _ := range respHeaders { + if _, ok := hopByHopHeaders[respHeader]; !ok { + endToEndHeaders = append(endToEndHeaders, respHeader) + } + } + return endToEndHeaders +} + +func canStore(reqCacheControl, respCacheControl cacheControl) (canStore bool) { + if _, ok := respCacheControl["no-store"]; ok { + return false + } + if _, ok := reqCacheControl["no-store"]; ok { + return false + } + return true +} + +func newGatewayTimeoutResponse(req *http.Request) *http.Response { + var braw bytes.Buffer + braw.WriteString("HTTP/1.1 504 Gateway Timeout\r\n\r\n") + resp, err := http.ReadResponse(bufio.NewReader(&braw), req) + if err != nil { + panic(err) + } + return resp +} + +// cloneRequest returns a clone of the provided *http.Request. +// The clone is a shallow copy of the struct and its Header map. +// (This function copyright goauth2 authors: https://code.google.com/p/goauth2) +func cloneRequest(r *http.Request) *http.Request { + // shallow copy of the struct + r2 := new(http.Request) + *r2 = *r + // deep copy of the Header + r2.Header = make(http.Header) + for k, s := range r.Header { + r2.Header[k] = s + } + return r2 +} + +type cacheControl map[string]string + +func parseCacheControl(headers http.Header) cacheControl { + cc := cacheControl{} + ccHeader := headers.Get("Cache-Control") + for _, part := range strings.Split(ccHeader, ",") { + part = strings.Trim(part, " ") + if part == "" { + continue + } + if strings.ContainsRune(part, '=') { + keyval := strings.Split(part, "=") + cc[strings.Trim(keyval[0], " ")] = strings.Trim(keyval[1], ",") + } else { + cc[part] = "" + } + } + return cc +} + +// headerAllCommaSepValues returns all comma-separated values (each +// with whitespace trimmed) for header name in headers. According to +// Section 4.2 of the HTTP/1.1 spec +// (http://www.w3.org/Protocols/rfc2616/rfc2616-sec4.html#sec4.2), +// values from multiple occurrences of a header should be concatenated, if +// the header's value is a comma-separated list. +func headerAllCommaSepValues(headers http.Header, name string) []string { + var vals []string + for _, val := range headers[http.CanonicalHeaderKey(name)] { + fields := strings.Split(val, ",") + for i, f := range fields { + fields[i] = strings.TrimSpace(f) + } + vals = append(vals, fields...) + } + return vals +} + +// cachingReadCloser is a wrapper around ReadCloser R that calls OnEOF +// handler with a full copy of the content read from R when EOF is +// reached. +type cachingReadCloser struct { + // Underlying ReadCloser. + R io.ReadCloser + // OnEOF is called with a copy of the content of R when EOF is reached. + OnEOF func(io.Reader) + + buf bytes.Buffer // buf stores a copy of the content of R. +} + +// Read reads the next len(p) bytes from R or until R is drained. The +// return value n is the number of bytes read. If R has no data to +// return, err is io.EOF and OnEOF is called with a full copy of what +// has been read so far. +func (r *cachingReadCloser) Read(p []byte) (n int, err error) { + n, err = r.R.Read(p) + r.buf.Write(p[:n]) + if err == io.EOF { + r.OnEOF(bytes.NewReader(r.buf.Bytes())) + } + return n, err +} + +func (r *cachingReadCloser) Close() error { + return r.R.Close() +} + +// NewMemoryCacheTransport returns a new Transport using the in-memory cache implementation +func NewMemoryCacheTransport() *Transport { + c := NewMemoryCache() + t := NewTransport(c) + return t +} diff --git a/vendor/github.com/json-iterator/go/build.sh b/vendor/github.com/json-iterator/go/build.sh old mode 100644 new mode 100755 diff --git a/vendor/github.com/json-iterator/go/test.sh b/vendor/github.com/json-iterator/go/test.sh old mode 100644 new mode 100755 diff --git a/vendor/github.com/matttproud/golang_protobuf_extensions/.travis.yml b/vendor/github.com/matttproud/golang_protobuf_extensions/.travis.yml new file mode 100644 index 00000000..5db25803 --- /dev/null +++ b/vendor/github.com/matttproud/golang_protobuf_extensions/.travis.yml @@ -0,0 +1,8 @@ +language: go + +go: + - 1.5 + - 1.6 + - tip + +script: make -f Makefile.TRAVIS diff --git a/vendor/github.com/matttproud/golang_protobuf_extensions/Makefile.TRAVIS b/vendor/github.com/matttproud/golang_protobuf_extensions/Makefile.TRAVIS new file mode 100644 index 00000000..24f9649e --- /dev/null +++ b/vendor/github.com/matttproud/golang_protobuf_extensions/Makefile.TRAVIS @@ -0,0 +1,15 @@ +all: build cover test vet + +build: + go build -v ./... + +cover: test + $(MAKE) -C pbutil cover + +test: build + go test -v ./... + +vet: build + go vet -v ./... + +.PHONY: build cover test vet diff --git a/vendor/github.com/matttproud/golang_protobuf_extensions/README.md b/vendor/github.com/matttproud/golang_protobuf_extensions/README.md new file mode 100644 index 00000000..751ee696 --- /dev/null +++ b/vendor/github.com/matttproud/golang_protobuf_extensions/README.md @@ -0,0 +1,20 @@ +# Overview +This repository provides various Protocol Buffer extensions for the Go +language (golang), namely support for record length-delimited message +streaming. + +| Java | Go | +| ------------------------------ | --------------------- | +| MessageLite#parseDelimitedFrom | pbutil.ReadDelimited | +| MessageLite#writeDelimitedTo | pbutil.WriteDelimited | + +Because [Code Review 9102043](https://codereview.appspot.com/9102043/) is +destined to never be merged into mainline (i.e., never be promoted to formal +[goprotobuf features](https://github.com/golang/protobuf)), this repository +will live here in the wild. + +# Documentation +We have [generated Go Doc documentation](http://godoc.org/github.com/matttproud/golang_protobuf_extensions/pbutil) here. + +# Testing +[![Build Status](https://travis-ci.org/matttproud/golang_protobuf_extensions.png?branch=master)](https://travis-ci.org/matttproud/golang_protobuf_extensions) diff --git a/vendor/github.com/modern-go/concurrent/test.sh b/vendor/github.com/modern-go/concurrent/test.sh old mode 100644 new mode 100755 diff --git a/vendor/github.com/modern-go/reflect2/test.sh b/vendor/github.com/modern-go/reflect2/test.sh old mode 100644 new mode 100755 diff --git a/vendor/github.com/peterbourgon/diskv/LICENSE b/vendor/github.com/peterbourgon/diskv/LICENSE new file mode 100644 index 00000000..41ce7f16 --- /dev/null +++ b/vendor/github.com/peterbourgon/diskv/LICENSE @@ -0,0 +1,19 @@ +Copyright (c) 2011-2012 Peter Bourgon + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/github.com/peterbourgon/diskv/README.md b/vendor/github.com/peterbourgon/diskv/README.md new file mode 100644 index 00000000..3474739e --- /dev/null +++ b/vendor/github.com/peterbourgon/diskv/README.md @@ -0,0 +1,141 @@ +# What is diskv? + +Diskv (disk-vee) is a simple, persistent key-value store written in the Go +language. It starts with an incredibly simple API for storing arbitrary data on +a filesystem by key, and builds several layers of performance-enhancing +abstraction on top. The end result is a conceptually simple, but highly +performant, disk-backed storage system. + +[![Build Status][1]][2] + +[1]: https://drone.io/github.com/peterbourgon/diskv/status.png +[2]: https://drone.io/github.com/peterbourgon/diskv/latest + + +# Installing + +Install [Go 1][3], either [from source][4] or [with a prepackaged binary][5]. +Then, + +```bash +$ go get github.com/peterbourgon/diskv +``` + +[3]: http://golang.org +[4]: http://golang.org/doc/install/source +[5]: http://golang.org/doc/install + + +# Usage + +```go +package main + +import ( + "fmt" + "github.com/peterbourgon/diskv" +) + +func main() { + // Simplest transform function: put all the data files into the base dir. + flatTransform := func(s string) []string { return []string{} } + + // Initialize a new diskv store, rooted at "my-data-dir", with a 1MB cache. + d := diskv.New(diskv.Options{ + BasePath: "my-data-dir", + Transform: flatTransform, + CacheSizeMax: 1024 * 1024, + }) + + // Write three bytes to the key "alpha". + key := "alpha" + d.Write(key, []byte{'1', '2', '3'}) + + // Read the value back out of the store. + value, _ := d.Read(key) + fmt.Printf("%v\n", value) + + // Erase the key+value from the store (and the disk). + d.Erase(key) +} +``` + +More complex examples can be found in the "examples" subdirectory. + + +# Theory + +## Basic idea + +At its core, diskv is a map of a key (`string`) to arbitrary data (`[]byte`). +The data is written to a single file on disk, with the same name as the key. +The key determines where that file will be stored, via a user-provided +`TransformFunc`, which takes a key and returns a slice (`[]string`) +corresponding to a path list where the key file will be stored. The simplest +TransformFunc, + +```go +func SimpleTransform (key string) []string { + return []string{} +} +``` + +will place all keys in the same, base directory. The design is inspired by +[Redis diskstore][6]; a TransformFunc which emulates the default diskstore +behavior is available in the content-addressable-storage example. + +[6]: http://groups.google.com/group/redis-db/browse_thread/thread/d444bc786689bde9?pli=1 + +**Note** that your TransformFunc should ensure that one valid key doesn't +transform to a subset of another valid key. That is, it shouldn't be possible +to construct valid keys that resolve to directory names. As a concrete example, +if your TransformFunc splits on every 3 characters, then + +```go +d.Write("abcabc", val) // OK: written to /abc/abc/abcabc +d.Write("abc", val) // Error: attempted write to /abc/abc, but it's a directory +``` + +This will be addressed in an upcoming version of diskv. + +Probably the most important design principle behind diskv is that your data is +always flatly available on the disk. diskv will never do anything that would +prevent you from accessing, copying, backing up, or otherwise interacting with +your data via common UNIX commandline tools. + +## Adding a cache + +An in-memory caching layer is provided by combining the BasicStore +functionality with a simple map structure, and keeping it up-to-date as +appropriate. Since the map structure in Go is not threadsafe, it's combined +with a RWMutex to provide safe concurrent access. + +## Adding order + +diskv is a key-value store and therefore inherently unordered. An ordering +system can be injected into the store by passing something which satisfies the +diskv.Index interface. (A default implementation, using Google's +[btree][7] package, is provided.) Basically, diskv keeps an ordered (by a +user-provided Less function) index of the keys, which can be queried. + +[7]: https://github.com/google/btree + +## Adding compression + +Something which implements the diskv.Compression interface may be passed +during store creation, so that all Writes and Reads are filtered through +a compression/decompression pipeline. Several default implementations, +using stdlib compression algorithms, are provided. Note that data is cached +compressed; the cost of decompression is borne with each Read. + +## Streaming + +diskv also now provides ReadStream and WriteStream methods, to allow very large +data to be handled efficiently. + + +# Future plans + + * Needs plenty of robust testing: huge datasets, etc... + * More thorough benchmarking + * Your suggestions for use-cases I haven't thought of diff --git a/vendor/github.com/peterbourgon/diskv/compression.go b/vendor/github.com/peterbourgon/diskv/compression.go new file mode 100644 index 00000000..5192b027 --- /dev/null +++ b/vendor/github.com/peterbourgon/diskv/compression.go @@ -0,0 +1,64 @@ +package diskv + +import ( + "compress/flate" + "compress/gzip" + "compress/zlib" + "io" +) + +// Compression is an interface that Diskv uses to implement compression of +// data. Writer takes a destination io.Writer and returns a WriteCloser that +// compresses all data written through it. Reader takes a source io.Reader and +// returns a ReadCloser that decompresses all data read through it. You may +// define these methods on your own type, or use one of the NewCompression +// helpers. +type Compression interface { + Writer(dst io.Writer) (io.WriteCloser, error) + Reader(src io.Reader) (io.ReadCloser, error) +} + +// NewGzipCompression returns a Gzip-based Compression. +func NewGzipCompression() Compression { + return NewGzipCompressionLevel(flate.DefaultCompression) +} + +// NewGzipCompressionLevel returns a Gzip-based Compression with the given level. +func NewGzipCompressionLevel(level int) Compression { + return &genericCompression{ + wf: func(w io.Writer) (io.WriteCloser, error) { return gzip.NewWriterLevel(w, level) }, + rf: func(r io.Reader) (io.ReadCloser, error) { return gzip.NewReader(r) }, + } +} + +// NewZlibCompression returns a Zlib-based Compression. +func NewZlibCompression() Compression { + return NewZlibCompressionLevel(flate.DefaultCompression) +} + +// NewZlibCompressionLevel returns a Zlib-based Compression with the given level. +func NewZlibCompressionLevel(level int) Compression { + return NewZlibCompressionLevelDict(level, nil) +} + +// NewZlibCompressionLevelDict returns a Zlib-based Compression with the given +// level, based on the given dictionary. +func NewZlibCompressionLevelDict(level int, dict []byte) Compression { + return &genericCompression{ + func(w io.Writer) (io.WriteCloser, error) { return zlib.NewWriterLevelDict(w, level, dict) }, + func(r io.Reader) (io.ReadCloser, error) { return zlib.NewReaderDict(r, dict) }, + } +} + +type genericCompression struct { + wf func(w io.Writer) (io.WriteCloser, error) + rf func(r io.Reader) (io.ReadCloser, error) +} + +func (g *genericCompression) Writer(dst io.Writer) (io.WriteCloser, error) { + return g.wf(dst) +} + +func (g *genericCompression) Reader(src io.Reader) (io.ReadCloser, error) { + return g.rf(src) +} diff --git a/vendor/github.com/peterbourgon/diskv/diskv.go b/vendor/github.com/peterbourgon/diskv/diskv.go new file mode 100644 index 00000000..524dc0a6 --- /dev/null +++ b/vendor/github.com/peterbourgon/diskv/diskv.go @@ -0,0 +1,624 @@ +// Diskv (disk-vee) is a simple, persistent, key-value store. +// It stores all data flatly on the filesystem. + +package diskv + +import ( + "bytes" + "errors" + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + "strings" + "sync" + "syscall" +) + +const ( + defaultBasePath = "diskv" + defaultFilePerm os.FileMode = 0666 + defaultPathPerm os.FileMode = 0777 +) + +var ( + defaultTransform = func(s string) []string { return []string{} } + errCanceled = errors.New("canceled") + errEmptyKey = errors.New("empty key") + errBadKey = errors.New("bad key") + errImportDirectory = errors.New("can't import a directory") +) + +// TransformFunction transforms a key into a slice of strings, with each +// element in the slice representing a directory in the file path where the +// key's entry will eventually be stored. +// +// For example, if TransformFunc transforms "abcdef" to ["ab", "cde", "f"], +// the final location of the data file will be /ab/cde/f/abcdef +type TransformFunction func(s string) []string + +// Options define a set of properties that dictate Diskv behavior. +// All values are optional. +type Options struct { + BasePath string + Transform TransformFunction + CacheSizeMax uint64 // bytes + PathPerm os.FileMode + FilePerm os.FileMode + // If TempDir is set, it will enable filesystem atomic writes by + // writing temporary files to that location before being moved + // to BasePath. + // Note that TempDir MUST be on the same device/partition as + // BasePath. + TempDir string + + Index Index + IndexLess LessFunction + + Compression Compression +} + +// Diskv implements the Diskv interface. You shouldn't construct Diskv +// structures directly; instead, use the New constructor. +type Diskv struct { + Options + mu sync.RWMutex + cache map[string][]byte + cacheSize uint64 +} + +// New returns an initialized Diskv structure, ready to use. +// If the path identified by baseDir already contains data, +// it will be accessible, but not yet cached. +func New(o Options) *Diskv { + if o.BasePath == "" { + o.BasePath = defaultBasePath + } + if o.Transform == nil { + o.Transform = defaultTransform + } + if o.PathPerm == 0 { + o.PathPerm = defaultPathPerm + } + if o.FilePerm == 0 { + o.FilePerm = defaultFilePerm + } + + d := &Diskv{ + Options: o, + cache: map[string][]byte{}, + cacheSize: 0, + } + + if d.Index != nil && d.IndexLess != nil { + d.Index.Initialize(d.IndexLess, d.Keys(nil)) + } + + return d +} + +// Write synchronously writes the key-value pair to disk, making it immediately +// available for reads. Write relies on the filesystem to perform an eventual +// sync to physical media. If you need stronger guarantees, see WriteStream. +func (d *Diskv) Write(key string, val []byte) error { + return d.WriteStream(key, bytes.NewBuffer(val), false) +} + +// WriteStream writes the data represented by the io.Reader to the disk, under +// the provided key. If sync is true, WriteStream performs an explicit sync on +// the file as soon as it's written. +// +// bytes.Buffer provides io.Reader semantics for basic data types. +func (d *Diskv) WriteStream(key string, r io.Reader, sync bool) error { + if len(key) <= 0 { + return errEmptyKey + } + + d.mu.Lock() + defer d.mu.Unlock() + + return d.writeStreamWithLock(key, r, sync) +} + +// createKeyFileWithLock either creates the key file directly, or +// creates a temporary file in TempDir if it is set. +func (d *Diskv) createKeyFileWithLock(key string) (*os.File, error) { + if d.TempDir != "" { + if err := os.MkdirAll(d.TempDir, d.PathPerm); err != nil { + return nil, fmt.Errorf("temp mkdir: %s", err) + } + f, err := ioutil.TempFile(d.TempDir, "") + if err != nil { + return nil, fmt.Errorf("temp file: %s", err) + } + + if err := f.Chmod(d.FilePerm); err != nil { + f.Close() // error deliberately ignored + os.Remove(f.Name()) // error deliberately ignored + return nil, fmt.Errorf("chmod: %s", err) + } + return f, nil + } + + mode := os.O_WRONLY | os.O_CREATE | os.O_TRUNC // overwrite if exists + f, err := os.OpenFile(d.completeFilename(key), mode, d.FilePerm) + if err != nil { + return nil, fmt.Errorf("open file: %s", err) + } + return f, nil +} + +// writeStream does no input validation checking. +func (d *Diskv) writeStreamWithLock(key string, r io.Reader, sync bool) error { + if err := d.ensurePathWithLock(key); err != nil { + return fmt.Errorf("ensure path: %s", err) + } + + f, err := d.createKeyFileWithLock(key) + if err != nil { + return fmt.Errorf("create key file: %s", err) + } + + wc := io.WriteCloser(&nopWriteCloser{f}) + if d.Compression != nil { + wc, err = d.Compression.Writer(f) + if err != nil { + f.Close() // error deliberately ignored + os.Remove(f.Name()) // error deliberately ignored + return fmt.Errorf("compression writer: %s", err) + } + } + + if _, err := io.Copy(wc, r); err != nil { + f.Close() // error deliberately ignored + os.Remove(f.Name()) // error deliberately ignored + return fmt.Errorf("i/o copy: %s", err) + } + + if err := wc.Close(); err != nil { + f.Close() // error deliberately ignored + os.Remove(f.Name()) // error deliberately ignored + return fmt.Errorf("compression close: %s", err) + } + + if sync { + if err := f.Sync(); err != nil { + f.Close() // error deliberately ignored + os.Remove(f.Name()) // error deliberately ignored + return fmt.Errorf("file sync: %s", err) + } + } + + if err := f.Close(); err != nil { + return fmt.Errorf("file close: %s", err) + } + + if f.Name() != d.completeFilename(key) { + if err := os.Rename(f.Name(), d.completeFilename(key)); err != nil { + os.Remove(f.Name()) // error deliberately ignored + return fmt.Errorf("rename: %s", err) + } + } + + if d.Index != nil { + d.Index.Insert(key) + } + + d.bustCacheWithLock(key) // cache only on read + + return nil +} + +// Import imports the source file into diskv under the destination key. If the +// destination key already exists, it's overwritten. If move is true, the +// source file is removed after a successful import. +func (d *Diskv) Import(srcFilename, dstKey string, move bool) (err error) { + if dstKey == "" { + return errEmptyKey + } + + if fi, err := os.Stat(srcFilename); err != nil { + return err + } else if fi.IsDir() { + return errImportDirectory + } + + d.mu.Lock() + defer d.mu.Unlock() + + if err := d.ensurePathWithLock(dstKey); err != nil { + return fmt.Errorf("ensure path: %s", err) + } + + if move { + if err := syscall.Rename(srcFilename, d.completeFilename(dstKey)); err == nil { + d.bustCacheWithLock(dstKey) + return nil + } else if err != syscall.EXDEV { + // If it failed due to being on a different device, fall back to copying + return err + } + } + + f, err := os.Open(srcFilename) + if err != nil { + return err + } + defer f.Close() + err = d.writeStreamWithLock(dstKey, f, false) + if err == nil && move { + err = os.Remove(srcFilename) + } + return err +} + +// Read reads the key and returns the value. +// If the key is available in the cache, Read won't touch the disk. +// If the key is not in the cache, Read will have the side-effect of +// lazily caching the value. +func (d *Diskv) Read(key string) ([]byte, error) { + rc, err := d.ReadStream(key, false) + if err != nil { + return []byte{}, err + } + defer rc.Close() + return ioutil.ReadAll(rc) +} + +// ReadStream reads the key and returns the value (data) as an io.ReadCloser. +// If the value is cached from a previous read, and direct is false, +// ReadStream will use the cached value. Otherwise, it will return a handle to +// the file on disk, and cache the data on read. +// +// If direct is true, ReadStream will lazily delete any cached value for the +// key, and return a direct handle to the file on disk. +// +// If compression is enabled, ReadStream taps into the io.Reader stream prior +// to decompression, and caches the compressed data. +func (d *Diskv) ReadStream(key string, direct bool) (io.ReadCloser, error) { + d.mu.RLock() + defer d.mu.RUnlock() + + if val, ok := d.cache[key]; ok { + if !direct { + buf := bytes.NewBuffer(val) + if d.Compression != nil { + return d.Compression.Reader(buf) + } + return ioutil.NopCloser(buf), nil + } + + go func() { + d.mu.Lock() + defer d.mu.Unlock() + d.uncacheWithLock(key, uint64(len(val))) + }() + } + + return d.readWithRLock(key) +} + +// read ignores the cache, and returns an io.ReadCloser representing the +// decompressed data for the given key, streamed from the disk. Clients should +// acquire a read lock on the Diskv and check the cache themselves before +// calling read. +func (d *Diskv) readWithRLock(key string) (io.ReadCloser, error) { + filename := d.completeFilename(key) + + fi, err := os.Stat(filename) + if err != nil { + return nil, err + } + if fi.IsDir() { + return nil, os.ErrNotExist + } + + f, err := os.Open(filename) + if err != nil { + return nil, err + } + + var r io.Reader + if d.CacheSizeMax > 0 { + r = newSiphon(f, d, key) + } else { + r = &closingReader{f} + } + + var rc = io.ReadCloser(ioutil.NopCloser(r)) + if d.Compression != nil { + rc, err = d.Compression.Reader(r) + if err != nil { + return nil, err + } + } + + return rc, nil +} + +// closingReader provides a Reader that automatically closes the +// embedded ReadCloser when it reaches EOF +type closingReader struct { + rc io.ReadCloser +} + +func (cr closingReader) Read(p []byte) (int, error) { + n, err := cr.rc.Read(p) + if err == io.EOF { + if closeErr := cr.rc.Close(); closeErr != nil { + return n, closeErr // close must succeed for Read to succeed + } + } + return n, err +} + +// siphon is like a TeeReader: it copies all data read through it to an +// internal buffer, and moves that buffer to the cache at EOF. +type siphon struct { + f *os.File + d *Diskv + key string + buf *bytes.Buffer +} + +// newSiphon constructs a siphoning reader that represents the passed file. +// When a successful series of reads ends in an EOF, the siphon will write +// the buffered data to Diskv's cache under the given key. +func newSiphon(f *os.File, d *Diskv, key string) io.Reader { + return &siphon{ + f: f, + d: d, + key: key, + buf: &bytes.Buffer{}, + } +} + +// Read implements the io.Reader interface for siphon. +func (s *siphon) Read(p []byte) (int, error) { + n, err := s.f.Read(p) + + if err == nil { + return s.buf.Write(p[0:n]) // Write must succeed for Read to succeed + } + + if err == io.EOF { + s.d.cacheWithoutLock(s.key, s.buf.Bytes()) // cache may fail + if closeErr := s.f.Close(); closeErr != nil { + return n, closeErr // close must succeed for Read to succeed + } + return n, err + } + + return n, err +} + +// Erase synchronously erases the given key from the disk and the cache. +func (d *Diskv) Erase(key string) error { + d.mu.Lock() + defer d.mu.Unlock() + + d.bustCacheWithLock(key) + + // erase from index + if d.Index != nil { + d.Index.Delete(key) + } + + // erase from disk + filename := d.completeFilename(key) + if s, err := os.Stat(filename); err == nil { + if s.IsDir() { + return errBadKey + } + if err = os.Remove(filename); err != nil { + return err + } + } else { + // Return err as-is so caller can do os.IsNotExist(err). + return err + } + + // clean up and return + d.pruneDirsWithLock(key) + return nil +} + +// EraseAll will delete all of the data from the store, both in the cache and on +// the disk. Note that EraseAll doesn't distinguish diskv-related data from non- +// diskv-related data. Care should be taken to always specify a diskv base +// directory that is exclusively for diskv data. +func (d *Diskv) EraseAll() error { + d.mu.Lock() + defer d.mu.Unlock() + d.cache = make(map[string][]byte) + d.cacheSize = 0 + if d.TempDir != "" { + os.RemoveAll(d.TempDir) // errors ignored + } + return os.RemoveAll(d.BasePath) +} + +// Has returns true if the given key exists. +func (d *Diskv) Has(key string) bool { + d.mu.Lock() + defer d.mu.Unlock() + + if _, ok := d.cache[key]; ok { + return true + } + + filename := d.completeFilename(key) + s, err := os.Stat(filename) + if err != nil { + return false + } + if s.IsDir() { + return false + } + + return true +} + +// Keys returns a channel that will yield every key accessible by the store, +// in undefined order. If a cancel channel is provided, closing it will +// terminate and close the keys channel. +func (d *Diskv) Keys(cancel <-chan struct{}) <-chan string { + return d.KeysPrefix("", cancel) +} + +// KeysPrefix returns a channel that will yield every key accessible by the +// store with the given prefix, in undefined order. If a cancel channel is +// provided, closing it will terminate and close the keys channel. If the +// provided prefix is the empty string, all keys will be yielded. +func (d *Diskv) KeysPrefix(prefix string, cancel <-chan struct{}) <-chan string { + var prepath string + if prefix == "" { + prepath = d.BasePath + } else { + prepath = d.pathFor(prefix) + } + c := make(chan string) + go func() { + filepath.Walk(prepath, walker(c, prefix, cancel)) + close(c) + }() + return c +} + +// walker returns a function which satisfies the filepath.WalkFunc interface. +// It sends every non-directory file entry down the channel c. +func walker(c chan<- string, prefix string, cancel <-chan struct{}) filepath.WalkFunc { + return func(path string, info os.FileInfo, err error) error { + if err != nil { + return err + } + + if info.IsDir() || !strings.HasPrefix(info.Name(), prefix) { + return nil // "pass" + } + + select { + case c <- info.Name(): + case <-cancel: + return errCanceled + } + + return nil + } +} + +// pathFor returns the absolute path for location on the filesystem where the +// data for the given key will be stored. +func (d *Diskv) pathFor(key string) string { + return filepath.Join(d.BasePath, filepath.Join(d.Transform(key)...)) +} + +// ensurePathWithLock is a helper function that generates all necessary +// directories on the filesystem for the given key. +func (d *Diskv) ensurePathWithLock(key string) error { + return os.MkdirAll(d.pathFor(key), d.PathPerm) +} + +// completeFilename returns the absolute path to the file for the given key. +func (d *Diskv) completeFilename(key string) string { + return filepath.Join(d.pathFor(key), key) +} + +// cacheWithLock attempts to cache the given key-value pair in the store's +// cache. It can fail if the value is larger than the cache's maximum size. +func (d *Diskv) cacheWithLock(key string, val []byte) error { + valueSize := uint64(len(val)) + if err := d.ensureCacheSpaceWithLock(valueSize); err != nil { + return fmt.Errorf("%s; not caching", err) + } + + // be very strict about memory guarantees + if (d.cacheSize + valueSize) > d.CacheSizeMax { + panic(fmt.Sprintf("failed to make room for value (%d/%d)", valueSize, d.CacheSizeMax)) + } + + d.cache[key] = val + d.cacheSize += valueSize + return nil +} + +// cacheWithoutLock acquires the store's (write) mutex and calls cacheWithLock. +func (d *Diskv) cacheWithoutLock(key string, val []byte) error { + d.mu.Lock() + defer d.mu.Unlock() + return d.cacheWithLock(key, val) +} + +func (d *Diskv) bustCacheWithLock(key string) { + if val, ok := d.cache[key]; ok { + d.uncacheWithLock(key, uint64(len(val))) + } +} + +func (d *Diskv) uncacheWithLock(key string, sz uint64) { + d.cacheSize -= sz + delete(d.cache, key) +} + +// pruneDirsWithLock deletes empty directories in the path walk leading to the +// key k. Typically this function is called after an Erase is made. +func (d *Diskv) pruneDirsWithLock(key string) error { + pathlist := d.Transform(key) + for i := range pathlist { + dir := filepath.Join(d.BasePath, filepath.Join(pathlist[:len(pathlist)-i]...)) + + // thanks to Steven Blenkinsop for this snippet + switch fi, err := os.Stat(dir); true { + case err != nil: + return err + case !fi.IsDir(): + panic(fmt.Sprintf("corrupt dirstate at %s", dir)) + } + + nlinks, err := filepath.Glob(filepath.Join(dir, "*")) + if err != nil { + return err + } else if len(nlinks) > 0 { + return nil // has subdirs -- do not prune + } + if err = os.Remove(dir); err != nil { + return err + } + } + + return nil +} + +// ensureCacheSpaceWithLock deletes entries from the cache in arbitrary order +// until the cache has at least valueSize bytes available. +func (d *Diskv) ensureCacheSpaceWithLock(valueSize uint64) error { + if valueSize > d.CacheSizeMax { + return fmt.Errorf("value size (%d bytes) too large for cache (%d bytes)", valueSize, d.CacheSizeMax) + } + + safe := func() bool { return (d.cacheSize + valueSize) <= d.CacheSizeMax } + + for key, val := range d.cache { + if safe() { + break + } + + d.uncacheWithLock(key, uint64(len(val))) + } + + if !safe() { + panic(fmt.Sprintf("%d bytes still won't fit in the cache! (max %d bytes)", valueSize, d.CacheSizeMax)) + } + + return nil +} + +// nopWriteCloser wraps an io.Writer and provides a no-op Close method to +// satisfy the io.WriteCloser interface. +type nopWriteCloser struct { + io.Writer +} + +func (wc *nopWriteCloser) Write(p []byte) (int, error) { return wc.Writer.Write(p) } +func (wc *nopWriteCloser) Close() error { return nil } diff --git a/vendor/github.com/peterbourgon/diskv/index.go b/vendor/github.com/peterbourgon/diskv/index.go new file mode 100644 index 00000000..96fee515 --- /dev/null +++ b/vendor/github.com/peterbourgon/diskv/index.go @@ -0,0 +1,115 @@ +package diskv + +import ( + "sync" + + "github.com/google/btree" +) + +// Index is a generic interface for things that can +// provide an ordered list of keys. +type Index interface { + Initialize(less LessFunction, keys <-chan string) + Insert(key string) + Delete(key string) + Keys(from string, n int) []string +} + +// LessFunction is used to initialize an Index of keys in a specific order. +type LessFunction func(string, string) bool + +// btreeString is a custom data type that satisfies the BTree Less interface, +// making the strings it wraps sortable by the BTree package. +type btreeString struct { + s string + l LessFunction +} + +// Less satisfies the BTree.Less interface using the btreeString's LessFunction. +func (s btreeString) Less(i btree.Item) bool { + return s.l(s.s, i.(btreeString).s) +} + +// BTreeIndex is an implementation of the Index interface using google/btree. +type BTreeIndex struct { + sync.RWMutex + LessFunction + *btree.BTree +} + +// Initialize populates the BTree tree with data from the keys channel, +// according to the passed less function. It's destructive to the BTreeIndex. +func (i *BTreeIndex) Initialize(less LessFunction, keys <-chan string) { + i.Lock() + defer i.Unlock() + i.LessFunction = less + i.BTree = rebuild(less, keys) +} + +// Insert inserts the given key (only) into the BTree tree. +func (i *BTreeIndex) Insert(key string) { + i.Lock() + defer i.Unlock() + if i.BTree == nil || i.LessFunction == nil { + panic("uninitialized index") + } + i.BTree.ReplaceOrInsert(btreeString{s: key, l: i.LessFunction}) +} + +// Delete removes the given key (only) from the BTree tree. +func (i *BTreeIndex) Delete(key string) { + i.Lock() + defer i.Unlock() + if i.BTree == nil || i.LessFunction == nil { + panic("uninitialized index") + } + i.BTree.Delete(btreeString{s: key, l: i.LessFunction}) +} + +// Keys yields a maximum of n keys in order. If the passed 'from' key is empty, +// Keys will return the first n keys. If the passed 'from' key is non-empty, the +// first key in the returned slice will be the key that immediately follows the +// passed key, in key order. +func (i *BTreeIndex) Keys(from string, n int) []string { + i.RLock() + defer i.RUnlock() + + if i.BTree == nil || i.LessFunction == nil { + panic("uninitialized index") + } + + if i.BTree.Len() <= 0 { + return []string{} + } + + btreeFrom := btreeString{s: from, l: i.LessFunction} + skipFirst := true + if len(from) <= 0 || !i.BTree.Has(btreeFrom) { + // no such key, so fabricate an always-smallest item + btreeFrom = btreeString{s: "", l: func(string, string) bool { return true }} + skipFirst = false + } + + keys := []string{} + iterator := func(i btree.Item) bool { + keys = append(keys, i.(btreeString).s) + return len(keys) < n + } + i.BTree.AscendGreaterOrEqual(btreeFrom, iterator) + + if skipFirst && len(keys) > 0 { + keys = keys[1:] + } + + return keys +} + +// rebuildIndex does the work of regenerating the index +// with the given keys. +func rebuild(less LessFunction, keys <-chan string) *btree.BTree { + tree := btree.New(2) + for key := range keys { + tree.ReplaceOrInsert(btreeString{s: key, l: less}) + } + return tree +} diff --git a/vendor/github.com/prometheus/client_golang/.gitignore b/vendor/github.com/prometheus/client_golang/.gitignore new file mode 100644 index 00000000..f6fc2e8e --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/.gitignore @@ -0,0 +1,26 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe + +*~ +*# +.build diff --git a/vendor/github.com/prometheus/client_golang/.travis.yml b/vendor/github.com/prometheus/client_golang/.travis.yml new file mode 100644 index 00000000..85b51152 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/.travis.yml @@ -0,0 +1,10 @@ +sudo: false +language: go + +go: + - 1.6.3 + - 1.7 + - 1.8.1 + +script: + - go test -short ./... diff --git a/vendor/github.com/prometheus/client_golang/CHANGELOG.md b/vendor/github.com/prometheus/client_golang/CHANGELOG.md new file mode 100644 index 00000000..330788a4 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/CHANGELOG.md @@ -0,0 +1,109 @@ +## 0.8.0 / 2016-08-17 +* [CHANGE] Registry is doing more consistency checks. This might break + existing setups that used to export inconsistent metrics. +* [CHANGE] Pushing to Pushgateway moved to package `push` and changed to allow + arbitrary grouping. +* [CHANGE] Removed `SelfCollector`. +* [CHANGE] Removed `PanicOnCollectError` and `EnableCollectChecks` methods. +* [CHANGE] Moved packages to the prometheus/common repo: `text`, `model`, + `extraction`. +* [CHANGE] Deprecated a number of functions. +* [FEATURE] Allow custom registries. Added `Registerer` and `Gatherer` + interfaces. +* [FEATURE] Separated HTTP exposition, allowing custom HTTP handlers (package + `promhttp`) and enabling the creation of other exposition mechanisms. +* [FEATURE] `MustRegister` is variadic now, allowing registration of many + collectors in one call. +* [FEATURE] Added HTTP API v1 package. +* [ENHANCEMENT] Numerous documentation improvements. +* [ENHANCEMENT] Improved metric sorting. +* [ENHANCEMENT] Inlined fnv64a hashing for improved performance. +* [ENHANCEMENT] Several test improvements. +* [BUGFIX] Handle collisions in MetricVec. + +## 0.7.0 / 2015-07-27 +* [CHANGE] Rename ExporterLabelPrefix to ExportedLabelPrefix. +* [BUGFIX] Closed gaps in metric consistency check. +* [BUGFIX] Validate LabelName/LabelSet on JSON unmarshaling. +* [ENHANCEMENT] Document the possibility to create "empty" metrics in + a metric vector. +* [ENHANCEMENT] Fix and clarify various doc comments and the README.md. +* [ENHANCEMENT] (Kind of) solve "The Proxy Problem" of http.InstrumentHandler. +* [ENHANCEMENT] Change responseWriterDelegator.written to int64. + +## 0.6.0 / 2015-06-01 +* [CHANGE] Rename process_goroutines to go_goroutines. +* [ENHANCEMENT] Validate label names during YAML decoding. +* [ENHANCEMENT] Add LabelName regular expression. +* [BUGFIX] Ensure alignment of struct members for 32-bit systems. + +## 0.5.0 / 2015-05-06 +* [BUGFIX] Removed a weakness in the fingerprinting aka signature code. + This makes fingerprinting slower and more allocation-heavy, but the + weakness was too severe to be tolerated. +* [CHANGE] As a result of the above, Metric.Fingerprint is now returning + a different fingerprint. To keep the same fingerprint, the new method + Metric.FastFingerprint was introduced, which will be used by the + Prometheus server for storage purposes (implying that a collision + detection has to be added, too). +* [ENHANCEMENT] The Metric.Equal and Metric.Before do not depend on + fingerprinting anymore, removing the possibility of an undetected + fingerprint collision. +* [FEATURE] The Go collector in the exposition library includes garbage + collection stats. +* [FEATURE] The exposition library allows to create constant "throw-away" + summaries and histograms. +* [CHANGE] A number of new reserved labels and prefixes. + +## 0.4.0 / 2015-04-08 +* [CHANGE] Return NaN when Summaries have no observations yet. +* [BUGFIX] Properly handle Summary decay upon Write(). +* [BUGFIX] Fix the documentation link to the consumption library. +* [FEATURE] Allow the metric family injection hook to merge with existing + metric families. +* [ENHANCEMENT] Removed cgo dependency and conditional compilation of procfs. +* [MAINTENANCE] Adjusted to changes in matttproud/golang_protobuf_extensions. + +## 0.3.2 / 2015-03-11 +* [BUGFIX] Fixed the receiver type of COWMetric.Set(). This method is + only used by the Prometheus server internally. +* [CLEANUP] Added licenses of vendored code left out by godep. + +## 0.3.1 / 2015-03-04 +* [ENHANCEMENT] Switched fingerprinting functions from own free list to + sync.Pool. +* [CHANGE] Makefile uses Go 1.4.2 now (only relevant for examples and tests). + +## 0.3.0 / 2015-03-03 +* [CHANGE] Changed the fingerprinting for metrics. THIS WILL INVALIDATE ALL + PERSISTED FINGERPRINTS. IF YOU COMPILE THE PROMETHEUS SERVER WITH THIS + VERSION, YOU HAVE TO WIPE THE PREVIOUSLY CREATED STORAGE. +* [CHANGE] LabelValuesToSignature removed. (Nobody had used it, and it was + arguably broken.) +* [CHANGE] Vendored dependencies. Those are only used by the Makefile. If + client_golang is used as a library, the vendoring will stay out of your way. +* [BUGFIX] Remove a weakness in the fingerprinting for metrics. (This made + the fingerprinting change above necessary.) +* [FEATURE] Added new fingerprinting functions SignatureForLabels and + SignatureWithoutLabels to be used by the Prometheus server. These functions + require fewer allocations than the ones currently used by the server. + +## 0.2.0 / 2015-02-23 +* [FEATURE] Introduce new Histagram metric type. +* [CHANGE] Ignore process collector errors for now (better error handling + pending). +* [CHANGE] Use clear error interface for process pidFn. +* [BUGFIX] Fix Go download links for several archs and OSes. +* [ENHANCEMENT] Massively improve Gauge and Counter performance. +* [ENHANCEMENT] Catch illegal label names for summaries in histograms. +* [ENHANCEMENT] Reduce allocations during fingerprinting. +* [ENHANCEMENT] Remove cgo dependency. procfs package will only be included if + both cgo is available and the build is for an OS with procfs. +* [CLEANUP] Clean up code style issues. +* [CLEANUP] Mark slow test as such and exclude them from travis. +* [CLEANUP] Update protobuf library package name. +* [CLEANUP] Updated vendoring of beorn7/perks. + +## 0.1.0 / 2015-02-02 +* [CLEANUP] Introduced semantic versioning and changelog. From now on, + changes will be reported in this file. diff --git a/vendor/github.com/prometheus/client_golang/CONTRIBUTING.md b/vendor/github.com/prometheus/client_golang/CONTRIBUTING.md new file mode 100644 index 00000000..40503edb --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/CONTRIBUTING.md @@ -0,0 +1,18 @@ +# Contributing + +Prometheus uses GitHub to manage reviews of pull requests. + +* If you have a trivial fix or improvement, go ahead and create a pull request, + addressing (with `@...`) the maintainer of this repository (see + [MAINTAINERS.md](MAINTAINERS.md)) in the description of the pull request. + +* If you plan to do something more involved, first discuss your ideas + on our [mailing list](https://groups.google.com/forum/?fromgroups#!forum/prometheus-developers). + This will avoid unnecessary work and surely give you and us a good deal + of inspiration. + +* Relevant coding style guidelines are the [Go Code Review + Comments](https://code.google.com/p/go-wiki/wiki/CodeReviewComments) + and the _Formatting and style_ section of Peter Bourgon's [Go: Best + Practices for Production + Environments](http://peter.bourgon.org/go-in-production/#formatting-and-style). diff --git a/vendor/github.com/prometheus/client_golang/MAINTAINERS.md b/vendor/github.com/prometheus/client_golang/MAINTAINERS.md new file mode 100644 index 00000000..3ede55fe --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/MAINTAINERS.md @@ -0,0 +1 @@ +* Björn Rabenstein diff --git a/vendor/github.com/prometheus/client_golang/README.md b/vendor/github.com/prometheus/client_golang/README.md new file mode 100644 index 00000000..479290d2 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/README.md @@ -0,0 +1,47 @@ +# Prometheus Go client library + +[![Build Status](https://travis-ci.org/prometheus/client_golang.svg?branch=master)](https://travis-ci.org/prometheus/client_golang) +[![Go Report Card](https://goreportcard.com/badge/github.com/prometheus/client_golang)](https://goreportcard.com/report/github.com/prometheus/client_golang) + +This is the [Go](http://golang.org) client library for +[Prometheus](http://prometheus.io). It has two separate parts, one for +instrumenting application code, and one for creating clients that talk to the +Prometheus HTTP API. + +## Instrumenting applications + +[![code-coverage](http://gocover.io/_badge/github.com/prometheus/client_golang/prometheus)](http://gocover.io/github.com/prometheus/client_golang/prometheus) [![go-doc](https://godoc.org/github.com/prometheus/client_golang/prometheus?status.svg)](https://godoc.org/github.com/prometheus/client_golang/prometheus) + +The +[`prometheus` directory](https://github.com/prometheus/client_golang/tree/master/prometheus) +contains the instrumentation library. See the +[best practices section](http://prometheus.io/docs/practices/naming/) of the +Prometheus documentation to learn more about instrumenting applications. + +The +[`examples` directory](https://github.com/prometheus/client_golang/tree/master/examples) +contains simple examples of instrumented code. + +## Client for the Prometheus HTTP API + +[![code-coverage](http://gocover.io/_badge/github.com/prometheus/client_golang/api/prometheus)](http://gocover.io/github.com/prometheus/client_golang/api/prometheus) [![go-doc](https://godoc.org/github.com/prometheus/client_golang/api/prometheus?status.svg)](https://godoc.org/github.com/prometheus/client_golang/api/prometheus) + +The +[`api/prometheus` directory](https://github.com/prometheus/client_golang/tree/master/api/prometheus) +contains the client for the +[Prometheus HTTP API](http://prometheus.io/docs/querying/api/). It allows you +to write Go applications that query time series data from a Prometheus +server. It is still in alpha stage. + +## Where is `model`, `extraction`, and `text`? + +The `model` packages has been moved to +[`prometheus/common/model`](https://github.com/prometheus/common/tree/master/model). + +The `extraction` and `text` packages are now contained in +[`prometheus/common/expfmt`](https://github.com/prometheus/common/tree/master/expfmt). + +## Contributing and community + +See the [contributing guidelines](CONTRIBUTING.md) and the +[Community section](http://prometheus.io/community/) of the homepage. diff --git a/vendor/github.com/prometheus/client_golang/VERSION b/vendor/github.com/prometheus/client_golang/VERSION new file mode 100644 index 00000000..a3df0a69 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/VERSION @@ -0,0 +1 @@ +0.8.0 diff --git a/vendor/github.com/prometheus/client_golang/prometheus/counter.go b/vendor/github.com/prometheus/client_golang/prometheus/counter.go index ee37949a..72d5256a 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/counter.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/counter.go @@ -30,16 +30,8 @@ type Counter interface { Metric Collector - // Set is used to set the Counter to an arbitrary value. It is only used - // if you have to transfer a value from an external counter into this - // Prometheus metric. Do not use it for regular handling of a - // Prometheus counter (as it can be used to break the contract of - // monotonically increasing values). - // - // Deprecated: Use NewConstMetric to create a counter for an external - // value. A Counter should never be set. - Set(float64) - // Inc increments the counter by 1. + // Inc increments the counter by 1. Use Add to increment it by arbitrary + // non-negative values. Inc() // Add adds the given value to the counter. It panics if the value is < // 0. diff --git a/vendor/github.com/prometheus/client_golang/prometheus/desc.go b/vendor/github.com/prometheus/client_golang/prometheus/desc.go index 77f4b30e..1835b16f 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/desc.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/desc.go @@ -16,20 +16,15 @@ package prometheus import ( "errors" "fmt" - "regexp" "sort" "strings" "github.com/golang/protobuf/proto" + "github.com/prometheus/common/model" dto "github.com/prometheus/client_model/go" ) -var ( - metricNameRE = regexp.MustCompile(`^[a-zA-Z_][a-zA-Z0-9_:]*$`) - labelNameRE = regexp.MustCompile("^[a-zA-Z_][a-zA-Z0-9_]*$") -) - // reservedLabelPrefix is a prefix which is not legal in user-supplied // label names. const reservedLabelPrefix = "__" @@ -78,7 +73,7 @@ type Desc struct { // Help string. Each Desc with the same fqName must have the same // dimHash. dimHash uint64 - // err is an error that occured during construction. It is reported on + // err is an error that occurred during construction. It is reported on // registration time. err error } @@ -103,7 +98,7 @@ func NewDesc(fqName, help string, variableLabels []string, constLabels Labels) * d.err = errors.New("empty help string") return d } - if !metricNameRE.MatchString(fqName) { + if !model.IsValidMetricName(model.LabelValue(fqName)) { d.err = fmt.Errorf("%q is not a valid metric name", fqName) return d } @@ -200,6 +195,6 @@ func (d *Desc) String() string { } func checkLabelName(l string) bool { - return labelNameRE.MatchString(l) && + return model.LabelName(l).IsValid() && !strings.HasPrefix(l, reservedLabelPrefix) } diff --git a/vendor/github.com/prometheus/client_golang/prometheus/doc.go b/vendor/github.com/prometheus/client_golang/prometheus/doc.go index b15a2d3b..278969dc 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/doc.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/doc.go @@ -17,7 +17,7 @@ // Pushgateway (package push). // // All exported functions and methods are safe to be used concurrently unless -//specified otherwise. +// specified otherwise. // // A Basic Example // @@ -26,6 +26,7 @@ // package main // // import ( +// "log" // "net/http" // // "github.com/prometheus/client_golang/prometheus" @@ -59,7 +60,7 @@ // // The Handler function provides a default handler to expose metrics // // via an HTTP server. "/metrics" is the usual endpoint for that. // http.Handle("/metrics", promhttp.Handler()) -// http.ListenAndServe(":8080", nil) +// log.Fatal(http.ListenAndServe(":8080", nil)) // } // // @@ -69,7 +70,7 @@ // Metrics // // The number of exported identifiers in this package might appear a bit -// overwhelming. Hovever, in addition to the basic plumbing shown in the example +// overwhelming. However, in addition to the basic plumbing shown in the example // above, you only need to understand the different metric types and their // vector versions for basic usage. // @@ -95,8 +96,8 @@ // SummaryVec, HistogramVec, and UntypedVec are not. // // To create instances of Metrics and their vector versions, you need a suitable -// …Opts struct, i.e. GaugeOpts, CounterOpts, SummaryOpts, -// HistogramOpts, or UntypedOpts. +// …Opts struct, i.e. GaugeOpts, CounterOpts, SummaryOpts, HistogramOpts, or +// UntypedOpts. // // Custom Collectors and constant Metrics // @@ -114,8 +115,8 @@ // Metric instances “on the fly” using NewConstMetric, NewConstHistogram, and // NewConstSummary (and their respective Must… versions). That will happen in // the Collect method. The Describe method has to return separate Desc -// instances, representative of the “throw-away” metrics to be created -// later. NewDesc comes in handy to create those Desc instances. +// instances, representative of the “throw-away” metrics to be created later. +// NewDesc comes in handy to create those Desc instances. // // The Collector example illustrates the use case. You can also look at the // source code of the processCollector (mirroring process metrics), the @@ -129,32 +130,32 @@ // Advanced Uses of the Registry // // While MustRegister is the by far most common way of registering a Collector, -// sometimes you might want to handle the errors the registration might -// cause. As suggested by the name, MustRegister panics if an error occurs. With -// the Register function, the error is returned and can be handled. +// sometimes you might want to handle the errors the registration might cause. +// As suggested by the name, MustRegister panics if an error occurs. With the +// Register function, the error is returned and can be handled. // // An error is returned if the registered Collector is incompatible or // inconsistent with already registered metrics. The registry aims for -// consistency of the collected metrics according to the Prometheus data -// model. Inconsistencies are ideally detected at registration time, not at -// collect time. The former will usually be detected at start-up time of a -// program, while the latter will only happen at scrape time, possibly not even -// on the first scrape if the inconsistency only becomes relevant later. That is -// the main reason why a Collector and a Metric have to describe themselves to -// the registry. +// consistency of the collected metrics according to the Prometheus data model. +// Inconsistencies are ideally detected at registration time, not at collect +// time. The former will usually be detected at start-up time of a program, +// while the latter will only happen at scrape time, possibly not even on the +// first scrape if the inconsistency only becomes relevant later. That is the +// main reason why a Collector and a Metric have to describe themselves to the +// registry. // // So far, everything we did operated on the so-called default registry, as it // can be found in the global DefaultRegistry variable. With NewRegistry, you // can create a custom registry, or you can even implement the Registerer or -// Gatherer interfaces yourself. The methods Register and Unregister work in -// the same way on a custom registry as the global functions Register and -// Unregister on the default registry. +// Gatherer interfaces yourself. The methods Register and Unregister work in the +// same way on a custom registry as the global functions Register and Unregister +// on the default registry. // -// There are a number of uses for custom registries: You can use registries -// with special properties, see NewPedanticRegistry. You can avoid global state, -// as it is imposed by the DefaultRegistry. You can use multiple registries at -// the same time to expose different metrics in different ways. You can use -// separate registries for testing purposes. +// There are a number of uses for custom registries: You can use registries with +// special properties, see NewPedanticRegistry. You can avoid global state, as +// it is imposed by the DefaultRegistry. You can use multiple registries at the +// same time to expose different metrics in different ways. You can use separate +// registries for testing purposes. // // Also note that the DefaultRegistry comes registered with a Collector for Go // runtime metrics (via NewGoCollector) and a Collector for process metrics (via @@ -166,16 +167,20 @@ // The Registry implements the Gatherer interface. The caller of the Gather // method can then expose the gathered metrics in some way. Usually, the metrics // are served via HTTP on the /metrics endpoint. That's happening in the example -// above. The tools to expose metrics via HTTP are in the promhttp -// sub-package. (The top-level functions in the prometheus package are -// deprecated.) +// above. The tools to expose metrics via HTTP are in the promhttp sub-package. +// (The top-level functions in the prometheus package are deprecated.) // // Pushing to the Pushgateway // // Function for pushing to the Pushgateway can be found in the push sub-package. // +// Graphite Bridge +// +// Functions and examples to push metrics from a Gatherer to Graphite can be +// found in the graphite sub-package. +// // Other Means of Exposition // -// More ways of exposing metrics can easily be added. Sending metrics to -// Graphite would be an example that will soon be implemented. +// More ways of exposing metrics can easily be added by following the approaches +// of the existing implementations. package prometheus diff --git a/vendor/github.com/prometheus/client_golang/prometheus/gauge.go b/vendor/github.com/prometheus/client_golang/prometheus/gauge.go index 8b70e514..9ab5a3d6 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/gauge.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/gauge.go @@ -27,16 +27,21 @@ type Gauge interface { // Set sets the Gauge to an arbitrary value. Set(float64) - // Inc increments the Gauge by 1. + // Inc increments the Gauge by 1. Use Add to increment it by arbitrary + // values. Inc() - // Dec decrements the Gauge by 1. + // Dec decrements the Gauge by 1. Use Sub to decrement it by arbitrary + // values. Dec() - // Add adds the given value to the Gauge. (The value can be - // negative, resulting in a decrease of the Gauge.) + // Add adds the given value to the Gauge. (The value can be negative, + // resulting in a decrease of the Gauge.) Add(float64) // Sub subtracts the given value from the Gauge. (The value can be // negative, resulting in an increase of the Gauge.) Sub(float64) + + // SetToCurrentTime sets the Gauge to the current Unix time in seconds. + SetToCurrentTime() } // GaugeOpts is an alias for Opts. See there for doc comments. diff --git a/vendor/github.com/prometheus/client_golang/prometheus/go_collector.go b/vendor/github.com/prometheus/client_golang/prometheus/go_collector.go index abc9d4ec..f9676455 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/go_collector.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/go_collector.go @@ -8,8 +8,9 @@ import ( ) type goCollector struct { - goroutines Gauge - gcDesc *Desc + goroutinesDesc *Desc + threadsDesc *Desc + gcDesc *Desc // metrics to describe and collect metrics memStatsMetrics @@ -19,11 +20,14 @@ type goCollector struct { // go process. func NewGoCollector() Collector { return &goCollector{ - goroutines: NewGauge(GaugeOpts{ - Namespace: "go", - Name: "goroutines", - Help: "Number of goroutines that currently exist.", - }), + goroutinesDesc: NewDesc( + "go_goroutines", + "Number of goroutines that currently exist.", + nil, nil), + threadsDesc: NewDesc( + "go_threads", + "Number of OS threads created", + nil, nil), gcDesc: NewDesc( "go_gc_duration_seconds", "A summary of the GC invocation durations.", @@ -48,7 +52,7 @@ func NewGoCollector() Collector { }, { desc: NewDesc( memstatNamespace("sys_bytes"), - "Number of bytes obtained by system. Sum of all system allocations.", + "Number of bytes obtained from system.", nil, nil, ), eval: func(ms *runtime.MemStats) float64 { return float64(ms.Sys) }, @@ -111,12 +115,12 @@ func NewGoCollector() Collector { valType: GaugeValue, }, { desc: NewDesc( - memstatNamespace("heap_released_bytes_total"), - "Total number of heap bytes released to OS.", + memstatNamespace("heap_released_bytes"), + "Number of heap bytes released to OS.", nil, nil, ), eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapReleased) }, - valType: CounterValue, + valType: GaugeValue, }, { desc: NewDesc( memstatNamespace("heap_objects"), @@ -213,6 +217,14 @@ func NewGoCollector() Collector { ), eval: func(ms *runtime.MemStats) float64 { return float64(ms.LastGC) / 1e9 }, valType: GaugeValue, + }, { + desc: NewDesc( + memstatNamespace("gc_cpu_fraction"), + "The fraction of this program's available CPU time used by the GC since the program started.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return ms.GCCPUFraction }, + valType: GaugeValue, }, }, } @@ -224,9 +236,9 @@ func memstatNamespace(s string) string { // Describe returns all descriptions of the collector. func (c *goCollector) Describe(ch chan<- *Desc) { - ch <- c.goroutines.Desc() + ch <- c.goroutinesDesc + ch <- c.threadsDesc ch <- c.gcDesc - for _, i := range c.metrics { ch <- i.desc } @@ -234,8 +246,9 @@ func (c *goCollector) Describe(ch chan<- *Desc) { // Collect returns the current state of all metrics of the collector. func (c *goCollector) Collect(ch chan<- Metric) { - c.goroutines.Set(float64(runtime.NumGoroutine())) - ch <- c.goroutines + ch <- MustNewConstMetric(c.goroutinesDesc, GaugeValue, float64(runtime.NumGoroutine())) + n, _ := runtime.ThreadCreateProfile(nil) + ch <- MustNewConstMetric(c.threadsDesc, GaugeValue, float64(n)) var stats debug.GCStats stats.PauseQuantiles = make([]time.Duration, 5) diff --git a/vendor/github.com/prometheus/client_golang/prometheus/histogram.go b/vendor/github.com/prometheus/client_golang/prometheus/histogram.go index 9719e8fa..f46eff6a 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/histogram.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/histogram.go @@ -308,23 +308,23 @@ func NewHistogramVec(opts HistogramOpts, labelNames []string) *HistogramVec { } // GetMetricWithLabelValues replaces the method of the same name in -// MetricVec. The difference is that this method returns a Histogram and not a -// Metric so that no type conversion is required. -func (m *HistogramVec) GetMetricWithLabelValues(lvs ...string) (Histogram, error) { +// MetricVec. The difference is that this method returns an Observer and not a +// Metric so that no type conversion to an Observer is required. +func (m *HistogramVec) GetMetricWithLabelValues(lvs ...string) (Observer, error) { metric, err := m.MetricVec.GetMetricWithLabelValues(lvs...) if metric != nil { - return metric.(Histogram), err + return metric.(Observer), err } return nil, err } // GetMetricWith replaces the method of the same name in MetricVec. The -// difference is that this method returns a Histogram and not a Metric so that no -// type conversion is required. -func (m *HistogramVec) GetMetricWith(labels Labels) (Histogram, error) { +// difference is that this method returns an Observer and not a Metric so that no +// type conversion to an Observer is required. +func (m *HistogramVec) GetMetricWith(labels Labels) (Observer, error) { metric, err := m.MetricVec.GetMetricWith(labels) if metric != nil { - return metric.(Histogram), err + return metric.(Observer), err } return nil, err } @@ -333,15 +333,15 @@ func (m *HistogramVec) GetMetricWith(labels Labels) (Histogram, error) { // GetMetricWithLabelValues would have returned an error. By not returning an // error, WithLabelValues allows shortcuts like // myVec.WithLabelValues("404", "GET").Observe(42.21) -func (m *HistogramVec) WithLabelValues(lvs ...string) Histogram { - return m.MetricVec.WithLabelValues(lvs...).(Histogram) +func (m *HistogramVec) WithLabelValues(lvs ...string) Observer { + return m.MetricVec.WithLabelValues(lvs...).(Observer) } // With works as GetMetricWith, but panics where GetMetricWithLabels would have // returned an error. By not returning an error, With allows shortcuts like // myVec.With(Labels{"code": "404", "method": "GET"}).Observe(42.21) -func (m *HistogramVec) With(labels Labels) Histogram { - return m.MetricVec.With(labels).(Histogram) +func (m *HistogramVec) With(labels Labels) Observer { + return m.MetricVec.With(labels).(Observer) } type constHistogram struct { diff --git a/vendor/github.com/prometheus/client_golang/prometheus/http.go b/vendor/github.com/prometheus/client_golang/prometheus/http.go index 67ee5ac7..d485ce0b 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/http.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/http.go @@ -62,7 +62,8 @@ func giveBuf(buf *bytes.Buffer) { // // Deprecated: Please note the issues described in the doc comment of // InstrumentHandler. You might want to consider using promhttp.Handler instead -// (which is non instrumented). +// (which is not instrumented, but can be instrumented with the tooling provided +// in package promhttp). func Handler() http.Handler { return InstrumentHandler("prometheus", UninstrumentedHandler()) } @@ -158,7 +159,8 @@ func nowSeries(t ...time.Time) nower { // value. http_requests_total is a metric vector partitioned by HTTP method // (label name "method") and HTTP status code (label name "code"). // -// Deprecated: InstrumentHandler has several issues: +// Deprecated: InstrumentHandler has several issues. Use the tooling provided in +// package promhttp instead. The issues are the following: // // - It uses Summaries rather than Histograms. Summaries are not useful if // aggregation across multiple instances is required. @@ -172,9 +174,8 @@ func nowSeries(t ...time.Time) nower { // httputil.ReverseProxy is a prominent example for a handler // performing such writes. // -// Upcoming versions of this package will provide ways of instrumenting HTTP -// handlers that are more flexible and have fewer issues. Please prefer direct -// instrumentation in the meantime. +// - It has additional issues with HTTP/2, cf. +// https://github.com/prometheus/client_golang/issues/272. func InstrumentHandler(handlerName string, handler http.Handler) http.HandlerFunc { return InstrumentHandlerFunc(handlerName, handler.ServeHTTP) } @@ -184,12 +185,13 @@ func InstrumentHandler(handlerName string, handler http.Handler) http.HandlerFun // issues). // // Deprecated: InstrumentHandlerFunc is deprecated for the same reasons as -// InstrumentHandler is. +// InstrumentHandler is. Use the tooling provided in package promhttp instead. func InstrumentHandlerFunc(handlerName string, handlerFunc func(http.ResponseWriter, *http.Request)) http.HandlerFunc { return InstrumentHandlerFuncWithOpts( SummaryOpts{ Subsystem: "http", ConstLabels: Labels{"handler": handlerName}, + Objectives: map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001}, }, handlerFunc, ) @@ -222,7 +224,7 @@ func InstrumentHandlerFunc(handlerName string, handlerFunc func(http.ResponseWri // SummaryOpts. // // Deprecated: InstrumentHandlerWithOpts is deprecated for the same reasons as -// InstrumentHandler is. +// InstrumentHandler is. Use the tooling provided in package promhttp instead. func InstrumentHandlerWithOpts(opts SummaryOpts, handler http.Handler) http.HandlerFunc { return InstrumentHandlerFuncWithOpts(opts, handler.ServeHTTP) } @@ -233,7 +235,7 @@ func InstrumentHandlerWithOpts(opts SummaryOpts, handler http.Handler) http.Hand // SummaryOpts are used. // // Deprecated: InstrumentHandlerFuncWithOpts is deprecated for the same reasons -// as InstrumentHandler is. +// as InstrumentHandler is. Use the tooling provided in package promhttp instead. func InstrumentHandlerFuncWithOpts(opts SummaryOpts, handlerFunc func(http.ResponseWriter, *http.Request)) http.HandlerFunc { reqCnt := NewCounterVec( CounterOpts{ @@ -245,34 +247,52 @@ func InstrumentHandlerFuncWithOpts(opts SummaryOpts, handlerFunc func(http.Respo }, instLabels, ) + if err := Register(reqCnt); err != nil { + if are, ok := err.(AlreadyRegisteredError); ok { + reqCnt = are.ExistingCollector.(*CounterVec) + } else { + panic(err) + } + } opts.Name = "request_duration_microseconds" opts.Help = "The HTTP request latencies in microseconds." reqDur := NewSummary(opts) + if err := Register(reqDur); err != nil { + if are, ok := err.(AlreadyRegisteredError); ok { + reqDur = are.ExistingCollector.(Summary) + } else { + panic(err) + } + } opts.Name = "request_size_bytes" opts.Help = "The HTTP request sizes in bytes." reqSz := NewSummary(opts) + if err := Register(reqSz); err != nil { + if are, ok := err.(AlreadyRegisteredError); ok { + reqSz = are.ExistingCollector.(Summary) + } else { + panic(err) + } + } opts.Name = "response_size_bytes" opts.Help = "The HTTP response sizes in bytes." resSz := NewSummary(opts) - - regReqCnt := MustRegisterOrGet(reqCnt).(*CounterVec) - regReqDur := MustRegisterOrGet(reqDur).(Summary) - regReqSz := MustRegisterOrGet(reqSz).(Summary) - regResSz := MustRegisterOrGet(resSz).(Summary) + if err := Register(resSz); err != nil { + if are, ok := err.(AlreadyRegisteredError); ok { + resSz = are.ExistingCollector.(Summary) + } else { + panic(err) + } + } return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { now := time.Now() delegate := &responseWriterDelegator{ResponseWriter: w} - out := make(chan int) - urlLen := 0 - if r.URL != nil { - urlLen = len(r.URL.String()) - } - go computeApproximateRequestSize(r, out, urlLen) + out := computeApproximateRequestSize(r) _, cn := w.(http.CloseNotifier) _, fl := w.(http.Flusher) @@ -290,30 +310,44 @@ func InstrumentHandlerFuncWithOpts(opts SummaryOpts, handlerFunc func(http.Respo method := sanitizeMethod(r.Method) code := sanitizeCode(delegate.status) - regReqCnt.WithLabelValues(method, code).Inc() - regReqDur.Observe(elapsed) - regResSz.Observe(float64(delegate.written)) - regReqSz.Observe(float64(<-out)) + reqCnt.WithLabelValues(method, code).Inc() + reqDur.Observe(elapsed) + resSz.Observe(float64(delegate.written)) + reqSz.Observe(float64(<-out)) }) } -func computeApproximateRequestSize(r *http.Request, out chan int, s int) { - s += len(r.Method) - s += len(r.Proto) - for name, values := range r.Header { - s += len(name) - for _, value := range values { - s += len(value) +func computeApproximateRequestSize(r *http.Request) <-chan int { + // Get URL length in current go routine for avoiding a race condition. + // HandlerFunc that runs in parallel may modify the URL. + s := 0 + if r.URL != nil { + s += len(r.URL.String()) + } + + out := make(chan int, 1) + + go func() { + s += len(r.Method) + s += len(r.Proto) + for name, values := range r.Header { + s += len(name) + for _, value := range values { + s += len(value) + } } - } - s += len(r.Host) + s += len(r.Host) - // N.B. r.Form and r.MultipartForm are assumed to be included in r.URL. + // N.B. r.Form and r.MultipartForm are assumed to be included in r.URL. - if r.ContentLength != -1 { - s += int(r.ContentLength) - } - out <- s + if r.ContentLength != -1 { + s += int(r.ContentLength) + } + out <- s + close(out) + }() + + return out } type responseWriterDelegator struct { diff --git a/vendor/github.com/prometheus/client_golang/prometheus/observer.go b/vendor/github.com/prometheus/client_golang/prometheus/observer.go new file mode 100644 index 00000000..b0520e85 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/observer.go @@ -0,0 +1,50 @@ +// Copyright 2017 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +// Observer is the interface that wraps the Observe method, which is used by +// Histogram and Summary to add observations. +type Observer interface { + Observe(float64) +} + +// The ObserverFunc type is an adapter to allow the use of ordinary +// functions as Observers. If f is a function with the appropriate +// signature, ObserverFunc(f) is an Observer that calls f. +// +// This adapter is usually used in connection with the Timer type, and there are +// two general use cases: +// +// The most common one is to use a Gauge as the Observer for a Timer. +// See the "Gauge" Timer example. +// +// The more advanced use case is to create a function that dynamically decides +// which Observer to use for observing the duration. See the "Complex" Timer +// example. +type ObserverFunc func(float64) + +// Observe calls f(value). It implements Observer. +func (f ObserverFunc) Observe(value float64) { + f(value) +} + +// ObserverVec is an interface implemented by `HistogramVec` and `SummaryVec`. +type ObserverVec interface { + GetMetricWith(Labels) (Observer, error) + GetMetricWithLabelValues(lvs ...string) (Observer, error) + With(Labels) Observer + WithLabelValues(...string) Observer + + Collector +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/process_collector.go b/vendor/github.com/prometheus/client_golang/prometheus/process_collector.go index e31e62e7..94b2553e 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/process_collector.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/process_collector.go @@ -19,10 +19,10 @@ type processCollector struct { pid int collectFn func(chan<- Metric) pidFn func() (int, error) - cpuTotal Counter - openFDs, maxFDs Gauge - vsize, rss Gauge - startTime Gauge + cpuTotal *Desc + openFDs, maxFDs *Desc + vsize, rss *Desc + startTime *Desc } // NewProcessCollector returns a collector which exports the current state of @@ -44,40 +44,45 @@ func NewProcessCollectorPIDFn( pidFn func() (int, error), namespace string, ) Collector { + ns := "" + if len(namespace) > 0 { + ns = namespace + "_" + } + c := processCollector{ pidFn: pidFn, collectFn: func(chan<- Metric) {}, - cpuTotal: NewCounter(CounterOpts{ - Namespace: namespace, - Name: "process_cpu_seconds_total", - Help: "Total user and system CPU time spent in seconds.", - }), - openFDs: NewGauge(GaugeOpts{ - Namespace: namespace, - Name: "process_open_fds", - Help: "Number of open file descriptors.", - }), - maxFDs: NewGauge(GaugeOpts{ - Namespace: namespace, - Name: "process_max_fds", - Help: "Maximum number of open file descriptors.", - }), - vsize: NewGauge(GaugeOpts{ - Namespace: namespace, - Name: "process_virtual_memory_bytes", - Help: "Virtual memory size in bytes.", - }), - rss: NewGauge(GaugeOpts{ - Namespace: namespace, - Name: "process_resident_memory_bytes", - Help: "Resident memory size in bytes.", - }), - startTime: NewGauge(GaugeOpts{ - Namespace: namespace, - Name: "process_start_time_seconds", - Help: "Start time of the process since unix epoch in seconds.", - }), + cpuTotal: NewDesc( + ns+"process_cpu_seconds_total", + "Total user and system CPU time spent in seconds.", + nil, nil, + ), + openFDs: NewDesc( + ns+"process_open_fds", + "Number of open file descriptors.", + nil, nil, + ), + maxFDs: NewDesc( + ns+"process_max_fds", + "Maximum number of open file descriptors.", + nil, nil, + ), + vsize: NewDesc( + ns+"process_virtual_memory_bytes", + "Virtual memory size in bytes.", + nil, nil, + ), + rss: NewDesc( + ns+"process_resident_memory_bytes", + "Resident memory size in bytes.", + nil, nil, + ), + startTime: NewDesc( + ns+"process_start_time_seconds", + "Start time of the process since unix epoch in seconds.", + nil, nil, + ), } // Set up process metric collection if supported by the runtime. @@ -90,12 +95,12 @@ func NewProcessCollectorPIDFn( // Describe returns all descriptions of the collector. func (c *processCollector) Describe(ch chan<- *Desc) { - ch <- c.cpuTotal.Desc() - ch <- c.openFDs.Desc() - ch <- c.maxFDs.Desc() - ch <- c.vsize.Desc() - ch <- c.rss.Desc() - ch <- c.startTime.Desc() + ch <- c.cpuTotal + ch <- c.openFDs + ch <- c.maxFDs + ch <- c.vsize + ch <- c.rss + ch <- c.startTime } // Collect returns the current state of all metrics of the collector. @@ -117,26 +122,19 @@ func (c *processCollector) processCollect(ch chan<- Metric) { } if stat, err := p.NewStat(); err == nil { - c.cpuTotal.Set(stat.CPUTime()) - ch <- c.cpuTotal - c.vsize.Set(float64(stat.VirtualMemory())) - ch <- c.vsize - c.rss.Set(float64(stat.ResidentMemory())) - ch <- c.rss - + ch <- MustNewConstMetric(c.cpuTotal, CounterValue, stat.CPUTime()) + ch <- MustNewConstMetric(c.vsize, GaugeValue, float64(stat.VirtualMemory())) + ch <- MustNewConstMetric(c.rss, GaugeValue, float64(stat.ResidentMemory())) if startTime, err := stat.StartTime(); err == nil { - c.startTime.Set(startTime) - ch <- c.startTime + ch <- MustNewConstMetric(c.startTime, GaugeValue, startTime) } } if fds, err := p.FileDescriptorsLen(); err == nil { - c.openFDs.Set(float64(fds)) - ch <- c.openFDs + ch <- MustNewConstMetric(c.openFDs, GaugeValue, float64(fds)) } if limits, err := p.NewLimits(); err == nil { - c.maxFDs.Set(float64(limits.OpenFiles)) - ch <- c.maxFDs + ch <- MustNewConstMetric(c.maxFDs, GaugeValue, float64(limits.OpenFiles)) } } diff --git a/vendor/github.com/prometheus/client_golang/prometheus/registry.go b/vendor/github.com/prometheus/client_golang/prometheus/registry.go index 32a3986b..8c6b5bd8 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/registry.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/registry.go @@ -152,38 +152,6 @@ func MustRegister(cs ...Collector) { DefaultRegisterer.MustRegister(cs...) } -// RegisterOrGet registers the provided Collector with the DefaultRegisterer and -// returns the Collector, unless an equal Collector was registered before, in -// which case that Collector is returned. -// -// Deprecated: RegisterOrGet is merely a convenience function for the -// implementation as described in the documentation for -// AlreadyRegisteredError. As the use case is relatively rare, this function -// will be removed in a future version of this package to clean up the -// namespace. -func RegisterOrGet(c Collector) (Collector, error) { - if err := Register(c); err != nil { - if are, ok := err.(AlreadyRegisteredError); ok { - return are.ExistingCollector, nil - } - return nil, err - } - return c, nil -} - -// MustRegisterOrGet behaves like RegisterOrGet but panics instead of returning -// an error. -// -// Deprecated: This is deprecated for the same reason RegisterOrGet is. See -// there for details. -func MustRegisterOrGet(c Collector) Collector { - c, err := RegisterOrGet(c) - if err != nil { - panic(err) - } - return c -} - // Unregister removes the registration of the provided Collector from the // DefaultRegisterer. // @@ -201,25 +169,6 @@ func (gf GathererFunc) Gather() ([]*dto.MetricFamily, error) { return gf() } -// SetMetricFamilyInjectionHook replaces the DefaultGatherer with one that -// gathers from the previous DefaultGatherers but then merges the MetricFamily -// protobufs returned from the provided hook function with the MetricFamily -// protobufs returned from the original DefaultGatherer. -// -// Deprecated: This function manipulates the DefaultGatherer variable. Consider -// the implications, i.e. don't do this concurrently with any uses of the -// DefaultGatherer. In the rare cases where you need to inject MetricFamily -// protobufs directly, it is recommended to use a custom Registry and combine it -// with a custom Gatherer using the Gatherers type (see -// there). SetMetricFamilyInjectionHook only exists for compatibility reasons -// with previous versions of this package. -func SetMetricFamilyInjectionHook(hook func() []*dto.MetricFamily) { - DefaultGatherer = Gatherers{ - DefaultGatherer, - GathererFunc(func() ([]*dto.MetricFamily, error) { return hook(), nil }), - } -} - // AlreadyRegisteredError is returned by the Register method if the Collector to // be registered has already been registered before, or a different Collector // that collects the same metrics has been registered before. Registration fails @@ -294,7 +243,7 @@ func (r *Registry) Register(c Collector) error { }() r.mtx.Lock() defer r.mtx.Unlock() - // Coduct various tests... + // Conduct various tests... for desc := range descChan { // Is the descriptor valid at all? @@ -447,7 +396,7 @@ func (r *Registry) Gather() ([]*dto.MetricFamily, error) { // Drain metricChan in case of premature return. defer func() { - for _ = range metricChan { + for range metricChan { } }() @@ -683,7 +632,7 @@ func (s metricSorter) Less(i, j int) bool { return s[i].GetTimestampMs() < s[j].GetTimestampMs() } -// normalizeMetricFamilies returns a MetricFamily slice whith empty +// normalizeMetricFamilies returns a MetricFamily slice with empty // MetricFamilies pruned and the remaining MetricFamilies sorted by name within // the slice, with the contained Metrics sorted within each MetricFamily. func normalizeMetricFamilies(metricFamiliesByName map[string]*dto.MetricFamily) []*dto.MetricFamily { diff --git a/vendor/github.com/prometheus/client_golang/prometheus/summary.go b/vendor/github.com/prometheus/client_golang/prometheus/summary.go index bce05bf9..1c65e25e 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/summary.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/summary.go @@ -54,6 +54,9 @@ type Summary interface { } // DefObjectives are the default Summary quantile values. +// +// Deprecated: DefObjectives will not be used as the default objectives in +// v0.10 of the library. The default Summary will have no quantiles then. var ( DefObjectives = map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001} @@ -113,9 +116,15 @@ type SummaryOpts struct { ConstLabels Labels // Objectives defines the quantile rank estimates with their respective - // absolute error. If Objectives[q] = e, then the value reported - // for q will be the φ-quantile value for some φ between q-e and q+e. - // The default value is DefObjectives. + // absolute error. If Objectives[q] = e, then the value reported for q + // will be the φ-quantile value for some φ between q-e and q+e. The + // default value is DefObjectives. It is used if Objectives is left at + // its zero value (i.e. nil). To create a Summary without Objectives, + // set it to an empty map (i.e. map[float64]float64{}). + // + // Deprecated: Note that the current value of DefObjectives is + // deprecated. It will be replaced by an empty map in v0.10 of the + // library. Please explicitly set Objectives to the desired value. Objectives map[float64]float64 // MaxAge defines the duration for which an observation stays relevant @@ -183,7 +192,7 @@ func newSummary(desc *Desc, opts SummaryOpts, labelValues ...string) Summary { } } - if len(opts.Objectives) == 0 { + if opts.Objectives == nil { opts.Objectives = DefObjectives } @@ -410,24 +419,24 @@ func NewSummaryVec(opts SummaryOpts, labelNames []string) *SummaryVec { } } -// GetMetricWithLabelValues replaces the method of the same name in -// MetricVec. The difference is that this method returns a Summary and not a -// Metric so that no type conversion is required. -func (m *SummaryVec) GetMetricWithLabelValues(lvs ...string) (Summary, error) { +// GetMetricWithLabelValues replaces the method of the same name in MetricVec. +// The difference is that this method returns an Observer and not a Metric so +// that no type conversion to an Observer is required. +func (m *SummaryVec) GetMetricWithLabelValues(lvs ...string) (Observer, error) { metric, err := m.MetricVec.GetMetricWithLabelValues(lvs...) if metric != nil { - return metric.(Summary), err + return metric.(Observer), err } return nil, err } // GetMetricWith replaces the method of the same name in MetricVec. The -// difference is that this method returns a Summary and not a Metric so that no -// type conversion is required. -func (m *SummaryVec) GetMetricWith(labels Labels) (Summary, error) { +// difference is that this method returns an Observer and not a Metric so that +// no type conversion to an Observer is required. +func (m *SummaryVec) GetMetricWith(labels Labels) (Observer, error) { metric, err := m.MetricVec.GetMetricWith(labels) if metric != nil { - return metric.(Summary), err + return metric.(Observer), err } return nil, err } @@ -436,15 +445,15 @@ func (m *SummaryVec) GetMetricWith(labels Labels) (Summary, error) { // GetMetricWithLabelValues would have returned an error. By not returning an // error, WithLabelValues allows shortcuts like // myVec.WithLabelValues("404", "GET").Observe(42.21) -func (m *SummaryVec) WithLabelValues(lvs ...string) Summary { - return m.MetricVec.WithLabelValues(lvs...).(Summary) +func (m *SummaryVec) WithLabelValues(lvs ...string) Observer { + return m.MetricVec.WithLabelValues(lvs...).(Observer) } // With works as GetMetricWith, but panics where GetMetricWithLabels would have // returned an error. By not returning an error, With allows shortcuts like // myVec.With(Labels{"code": "404", "method": "GET"}).Observe(42.21) -func (m *SummaryVec) With(labels Labels) Summary { - return m.MetricVec.With(labels).(Summary) +func (m *SummaryVec) With(labels Labels) Observer { + return m.MetricVec.With(labels).(Observer) } type constSummary struct { diff --git a/vendor/github.com/prometheus/client_golang/prometheus/timer.go b/vendor/github.com/prometheus/client_golang/prometheus/timer.go new file mode 100644 index 00000000..12b65699 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/timer.go @@ -0,0 +1,48 @@ +// Copyright 2016 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +import "time" + +// Timer is a helper type to time functions. Use NewTimer to create new +// instances. +type Timer struct { + begin time.Time + observer Observer +} + +// NewTimer creates a new Timer. The provided Observer is used to observe a +// duration in seconds. Timer is usually used to time a function call in the +// following way: +// func TimeMe() { +// timer := NewTimer(myHistogram) +// defer timer.ObserveDuration() +// // Do actual work. +// } +func NewTimer(o Observer) *Timer { + return &Timer{ + begin: time.Now(), + observer: o, + } +} + +// ObserveDuration records the duration passed since the Timer was created with +// NewTimer. It calls the Observe method of the Observer provided during +// construction with the duration in seconds as an argument. ObserveDuration is +// usually called with a defer statement. +func (t *Timer) ObserveDuration() { + if t.observer != nil { + t.observer.Observe(time.Since(t.begin).Seconds()) + } +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/untyped.go b/vendor/github.com/prometheus/client_golang/prometheus/untyped.go index 5faf7e6e..065501d3 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/untyped.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/untyped.go @@ -20,6 +20,11 @@ package prometheus // no type information is implied. // // To create Untyped instances, use NewUntyped. +// +// Deprecated: The Untyped type is deprecated because it doesn't make sense in +// direct instrumentation. If you need to mirror an external metric of unknown +// type (usually while writing exporters), Use MustNewConstMetric to create an +// untyped metric instance on the fly. type Untyped interface { Metric Collector diff --git a/vendor/github.com/prometheus/client_golang/prometheus/value.go b/vendor/github.com/prometheus/client_golang/prometheus/value.go index a944c377..ff75ce58 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/value.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/value.go @@ -19,6 +19,7 @@ import ( "math" "sort" "sync/atomic" + "time" dto "github.com/prometheus/client_model/go" @@ -43,7 +44,7 @@ var errInconsistentCardinality = errors.New("inconsistent label cardinality") // ValueType. This is a low-level building block used by the library to back the // implementations of Counter, Gauge, and Untyped. type value struct { - // valBits containst the bits of the represented float64 value. It has + // valBits contains the bits of the represented float64 value. It has // to go first in the struct to guarantee alignment for atomic // operations. http://golang.org/pkg/sync/atomic/#pkg-note-BUG valBits uint64 @@ -80,6 +81,10 @@ func (v *value) Set(val float64) { atomic.StoreUint64(&v.valBits, math.Float64bits(val)) } +func (v *value) SetToCurrentTime() { + v.Set(float64(time.Now().UnixNano()) / 1e9) +} + func (v *value) Inc() { v.Add(1) } diff --git a/vendor/github.com/prometheus/client_model/.gitignore b/vendor/github.com/prometheus/client_model/.gitignore new file mode 100644 index 00000000..2f7896d1 --- /dev/null +++ b/vendor/github.com/prometheus/client_model/.gitignore @@ -0,0 +1 @@ +target/ diff --git a/vendor/github.com/prometheus/client_golang/AUTHORS.md b/vendor/github.com/prometheus/client_model/AUTHORS.md similarity index 70% rename from vendor/github.com/prometheus/client_golang/AUTHORS.md rename to vendor/github.com/prometheus/client_model/AUTHORS.md index c5275d5a..e8b3efa6 100644 --- a/vendor/github.com/prometheus/client_golang/AUTHORS.md +++ b/vendor/github.com/prometheus/client_model/AUTHORS.md @@ -8,11 +8,6 @@ Maintainers of this repository: The following individuals have contributed code to this repository (listed in alphabetical order): -* Bernerd Schaefer * Björn Rabenstein -* Daniel Bornkessel -* Jeff Younker -* Julius Volz * Matt T. Proud * Tobias Schmidt - diff --git a/vendor/github.com/prometheus/client_model/CONTRIBUTING.md b/vendor/github.com/prometheus/client_model/CONTRIBUTING.md new file mode 100644 index 00000000..573d5874 --- /dev/null +++ b/vendor/github.com/prometheus/client_model/CONTRIBUTING.md @@ -0,0 +1,18 @@ +# Contributing + +Prometheus uses GitHub to manage reviews of pull requests. + +* If you have a trivial fix or improvement, go ahead and create a pull + request, addressing (with `@...`) one or more of the maintainers + (see [AUTHORS.md](AUTHORS.md)) in the description of the pull request. + +* If you plan to do something more involved, first discuss your ideas + on our [mailing list](https://groups.google.com/forum/?fromgroups#!forum/prometheus-developers). + This will avoid unnecessary work and surely give you and us a good deal + of inspiration. + +* Relevant coding style guidelines for the Go parts are the [Go Code Review + Comments](https://code.google.com/p/go-wiki/wiki/CodeReviewComments) + and the _Formatting and style_ section of Peter Bourgon's [Go: Best + Practices for Production + Environments](http://peter.bourgon.org/go-in-production/#formatting-and-style). diff --git a/vendor/github.com/prometheus/client_model/Makefile b/vendor/github.com/prometheus/client_model/Makefile new file mode 100644 index 00000000..9cc23b34 --- /dev/null +++ b/vendor/github.com/prometheus/client_model/Makefile @@ -0,0 +1,61 @@ +# Copyright 2013 Prometheus Team +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at + +# http://www.apache.org/licenses/LICENSE-2.0 + +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +KEY_ID ?= _DEFINE_ME_ + +all: cpp go java python ruby + +SUFFIXES: + +cpp: cpp/metrics.pb.cc cpp/metrics.pb.h + +cpp/metrics.pb.cc: metrics.proto + protoc $< --cpp_out=cpp/ + +cpp/metrics.pb.h: metrics.proto + protoc $< --cpp_out=cpp/ + +go: go/metrics.pb.go + +go/metrics.pb.go: metrics.proto + protoc $< --go_out=go/ + +java: src/main/java/io/prometheus/client/Metrics.java pom.xml + mvn clean compile package + +src/main/java/io/prometheus/client/Metrics.java: metrics.proto + protoc $< --java_out=src/main/java + +python: python/prometheus/client/model/metrics_pb2.py + +python/prometheus/client/model/metrics_pb2.py: metrics.proto + protoc $< --python_out=python/prometheus/client/model + +ruby: + $(MAKE) -C ruby build + +clean: + -rm -rf cpp/* + -rm -rf go/* + -rm -rf java/* + -rm -rf python/* + -$(MAKE) -C ruby clean + -mvn clean + +maven-deploy-snapshot: java + mvn clean deploy -Dgpg.keyname=$(KEY_ID) -DperformRelease=true + +maven-deploy-release: java + mvn clean release:clean release:prepare release:perform -Dgpg.keyname=$(KEY_ID) -DperformRelease=true + +.PHONY: all clean cpp go java maven-deploy-snapshot maven-deploy-release python ruby diff --git a/vendor/github.com/prometheus/client_model/README.md b/vendor/github.com/prometheus/client_model/README.md new file mode 100644 index 00000000..a710042d --- /dev/null +++ b/vendor/github.com/prometheus/client_model/README.md @@ -0,0 +1,26 @@ +# Background +Under most circumstances, manually downloading this repository should never +be required. + +# Prerequisites +# Base +* [Google Protocol Buffers](https://developers.google.com/protocol-buffers) + +## Java +* [Apache Maven](http://maven.apache.org) +* [Prometheus Maven Repository](https://github.com/prometheus/io.prometheus-maven-repository) checked out into ../io.prometheus-maven-repository + +## Go +* [Go](http://golang.org) +* [goprotobuf](https://code.google.com/p/goprotobuf) + +## Ruby +* [Ruby](https://www.ruby-lang.org) +* [bundler](https://rubygems.org/gems/bundler) + +# Building + $ make + +# Getting Started + * The Go source code is periodically indexed: [Go Protocol Buffer Model](http://godoc.org/github.com/prometheus/client_model/go). + * All of the core developers are accessible via the [Prometheus Developers Mailinglist](https://groups.google.com/forum/?fromgroups#!forum/prometheus-developers). diff --git a/vendor/github.com/prometheus/client_model/go/metrics.pb.go b/vendor/github.com/prometheus/client_model/go/metrics.pb.go index 9805432c..b065f868 100644 --- a/vendor/github.com/prometheus/client_model/go/metrics.pb.go +++ b/vendor/github.com/prometheus/client_model/go/metrics.pb.go @@ -1,23 +1,34 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. +// Code generated by protoc-gen-go. // source: metrics.proto +// DO NOT EDIT! -package io_prometheus_client // import "github.com/prometheus/client_model/go" +/* +Package io_prometheus_client is a generated protocol buffer package. + +It is generated from these files: + metrics.proto + +It has these top-level messages: + LabelPair + Gauge + Counter + Quantile + Summary + Untyped + Histogram + Bucket + Metric + MetricFamily +*/ +package io_prometheus_client import proto "github.com/golang/protobuf/proto" -import fmt "fmt" import math "math" // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal -var _ = fmt.Errorf var _ = math.Inf -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package - type MetricType int32 const ( @@ -59,41 +70,16 @@ func (x *MetricType) UnmarshalJSON(data []byte) error { *x = MetricType(value) return nil } -func (MetricType) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_metrics_c97c9a2b9560cb8f, []int{0} -} type LabelPair struct { - Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` - Value *string `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Value *string `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"` + XXX_unrecognized []byte `json:"-"` } func (m *LabelPair) Reset() { *m = LabelPair{} } func (m *LabelPair) String() string { return proto.CompactTextString(m) } func (*LabelPair) ProtoMessage() {} -func (*LabelPair) Descriptor() ([]byte, []int) { - return fileDescriptor_metrics_c97c9a2b9560cb8f, []int{0} -} -func (m *LabelPair) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_LabelPair.Unmarshal(m, b) -} -func (m *LabelPair) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_LabelPair.Marshal(b, m, deterministic) -} -func (dst *LabelPair) XXX_Merge(src proto.Message) { - xxx_messageInfo_LabelPair.Merge(dst, src) -} -func (m *LabelPair) XXX_Size() int { - return xxx_messageInfo_LabelPair.Size(m) -} -func (m *LabelPair) XXX_DiscardUnknown() { - xxx_messageInfo_LabelPair.DiscardUnknown(m) -} - -var xxx_messageInfo_LabelPair proto.InternalMessageInfo func (m *LabelPair) GetName() string { if m != nil && m.Name != nil { @@ -110,35 +96,13 @@ func (m *LabelPair) GetValue() string { } type Gauge struct { - Value *float64 `protobuf:"fixed64,1,opt,name=value" json:"value,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + Value *float64 `protobuf:"fixed64,1,opt,name=value" json:"value,omitempty"` + XXX_unrecognized []byte `json:"-"` } func (m *Gauge) Reset() { *m = Gauge{} } func (m *Gauge) String() string { return proto.CompactTextString(m) } func (*Gauge) ProtoMessage() {} -func (*Gauge) Descriptor() ([]byte, []int) { - return fileDescriptor_metrics_c97c9a2b9560cb8f, []int{1} -} -func (m *Gauge) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Gauge.Unmarshal(m, b) -} -func (m *Gauge) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Gauge.Marshal(b, m, deterministic) -} -func (dst *Gauge) XXX_Merge(src proto.Message) { - xxx_messageInfo_Gauge.Merge(dst, src) -} -func (m *Gauge) XXX_Size() int { - return xxx_messageInfo_Gauge.Size(m) -} -func (m *Gauge) XXX_DiscardUnknown() { - xxx_messageInfo_Gauge.DiscardUnknown(m) -} - -var xxx_messageInfo_Gauge proto.InternalMessageInfo func (m *Gauge) GetValue() float64 { if m != nil && m.Value != nil { @@ -148,35 +112,13 @@ func (m *Gauge) GetValue() float64 { } type Counter struct { - Value *float64 `protobuf:"fixed64,1,opt,name=value" json:"value,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + Value *float64 `protobuf:"fixed64,1,opt,name=value" json:"value,omitempty"` + XXX_unrecognized []byte `json:"-"` } func (m *Counter) Reset() { *m = Counter{} } func (m *Counter) String() string { return proto.CompactTextString(m) } func (*Counter) ProtoMessage() {} -func (*Counter) Descriptor() ([]byte, []int) { - return fileDescriptor_metrics_c97c9a2b9560cb8f, []int{2} -} -func (m *Counter) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Counter.Unmarshal(m, b) -} -func (m *Counter) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Counter.Marshal(b, m, deterministic) -} -func (dst *Counter) XXX_Merge(src proto.Message) { - xxx_messageInfo_Counter.Merge(dst, src) -} -func (m *Counter) XXX_Size() int { - return xxx_messageInfo_Counter.Size(m) -} -func (m *Counter) XXX_DiscardUnknown() { - xxx_messageInfo_Counter.DiscardUnknown(m) -} - -var xxx_messageInfo_Counter proto.InternalMessageInfo func (m *Counter) GetValue() float64 { if m != nil && m.Value != nil { @@ -186,36 +128,14 @@ func (m *Counter) GetValue() float64 { } type Quantile struct { - Quantile *float64 `protobuf:"fixed64,1,opt,name=quantile" json:"quantile,omitempty"` - Value *float64 `protobuf:"fixed64,2,opt,name=value" json:"value,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + Quantile *float64 `protobuf:"fixed64,1,opt,name=quantile" json:"quantile,omitempty"` + Value *float64 `protobuf:"fixed64,2,opt,name=value" json:"value,omitempty"` + XXX_unrecognized []byte `json:"-"` } func (m *Quantile) Reset() { *m = Quantile{} } func (m *Quantile) String() string { return proto.CompactTextString(m) } func (*Quantile) ProtoMessage() {} -func (*Quantile) Descriptor() ([]byte, []int) { - return fileDescriptor_metrics_c97c9a2b9560cb8f, []int{3} -} -func (m *Quantile) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Quantile.Unmarshal(m, b) -} -func (m *Quantile) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Quantile.Marshal(b, m, deterministic) -} -func (dst *Quantile) XXX_Merge(src proto.Message) { - xxx_messageInfo_Quantile.Merge(dst, src) -} -func (m *Quantile) XXX_Size() int { - return xxx_messageInfo_Quantile.Size(m) -} -func (m *Quantile) XXX_DiscardUnknown() { - xxx_messageInfo_Quantile.DiscardUnknown(m) -} - -var xxx_messageInfo_Quantile proto.InternalMessageInfo func (m *Quantile) GetQuantile() float64 { if m != nil && m.Quantile != nil { @@ -232,37 +152,15 @@ func (m *Quantile) GetValue() float64 { } type Summary struct { - SampleCount *uint64 `protobuf:"varint,1,opt,name=sample_count,json=sampleCount" json:"sample_count,omitempty"` - SampleSum *float64 `protobuf:"fixed64,2,opt,name=sample_sum,json=sampleSum" json:"sample_sum,omitempty"` - Quantile []*Quantile `protobuf:"bytes,3,rep,name=quantile" json:"quantile,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + SampleCount *uint64 `protobuf:"varint,1,opt,name=sample_count" json:"sample_count,omitempty"` + SampleSum *float64 `protobuf:"fixed64,2,opt,name=sample_sum" json:"sample_sum,omitempty"` + Quantile []*Quantile `protobuf:"bytes,3,rep,name=quantile" json:"quantile,omitempty"` + XXX_unrecognized []byte `json:"-"` } func (m *Summary) Reset() { *m = Summary{} } func (m *Summary) String() string { return proto.CompactTextString(m) } func (*Summary) ProtoMessage() {} -func (*Summary) Descriptor() ([]byte, []int) { - return fileDescriptor_metrics_c97c9a2b9560cb8f, []int{4} -} -func (m *Summary) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Summary.Unmarshal(m, b) -} -func (m *Summary) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Summary.Marshal(b, m, deterministic) -} -func (dst *Summary) XXX_Merge(src proto.Message) { - xxx_messageInfo_Summary.Merge(dst, src) -} -func (m *Summary) XXX_Size() int { - return xxx_messageInfo_Summary.Size(m) -} -func (m *Summary) XXX_DiscardUnknown() { - xxx_messageInfo_Summary.DiscardUnknown(m) -} - -var xxx_messageInfo_Summary proto.InternalMessageInfo func (m *Summary) GetSampleCount() uint64 { if m != nil && m.SampleCount != nil { @@ -286,35 +184,13 @@ func (m *Summary) GetQuantile() []*Quantile { } type Untyped struct { - Value *float64 `protobuf:"fixed64,1,opt,name=value" json:"value,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + Value *float64 `protobuf:"fixed64,1,opt,name=value" json:"value,omitempty"` + XXX_unrecognized []byte `json:"-"` } func (m *Untyped) Reset() { *m = Untyped{} } func (m *Untyped) String() string { return proto.CompactTextString(m) } func (*Untyped) ProtoMessage() {} -func (*Untyped) Descriptor() ([]byte, []int) { - return fileDescriptor_metrics_c97c9a2b9560cb8f, []int{5} -} -func (m *Untyped) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Untyped.Unmarshal(m, b) -} -func (m *Untyped) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Untyped.Marshal(b, m, deterministic) -} -func (dst *Untyped) XXX_Merge(src proto.Message) { - xxx_messageInfo_Untyped.Merge(dst, src) -} -func (m *Untyped) XXX_Size() int { - return xxx_messageInfo_Untyped.Size(m) -} -func (m *Untyped) XXX_DiscardUnknown() { - xxx_messageInfo_Untyped.DiscardUnknown(m) -} - -var xxx_messageInfo_Untyped proto.InternalMessageInfo func (m *Untyped) GetValue() float64 { if m != nil && m.Value != nil { @@ -324,37 +200,15 @@ func (m *Untyped) GetValue() float64 { } type Histogram struct { - SampleCount *uint64 `protobuf:"varint,1,opt,name=sample_count,json=sampleCount" json:"sample_count,omitempty"` - SampleSum *float64 `protobuf:"fixed64,2,opt,name=sample_sum,json=sampleSum" json:"sample_sum,omitempty"` - Bucket []*Bucket `protobuf:"bytes,3,rep,name=bucket" json:"bucket,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + SampleCount *uint64 `protobuf:"varint,1,opt,name=sample_count" json:"sample_count,omitempty"` + SampleSum *float64 `protobuf:"fixed64,2,opt,name=sample_sum" json:"sample_sum,omitempty"` + Bucket []*Bucket `protobuf:"bytes,3,rep,name=bucket" json:"bucket,omitempty"` + XXX_unrecognized []byte `json:"-"` } func (m *Histogram) Reset() { *m = Histogram{} } func (m *Histogram) String() string { return proto.CompactTextString(m) } func (*Histogram) ProtoMessage() {} -func (*Histogram) Descriptor() ([]byte, []int) { - return fileDescriptor_metrics_c97c9a2b9560cb8f, []int{6} -} -func (m *Histogram) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Histogram.Unmarshal(m, b) -} -func (m *Histogram) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Histogram.Marshal(b, m, deterministic) -} -func (dst *Histogram) XXX_Merge(src proto.Message) { - xxx_messageInfo_Histogram.Merge(dst, src) -} -func (m *Histogram) XXX_Size() int { - return xxx_messageInfo_Histogram.Size(m) -} -func (m *Histogram) XXX_DiscardUnknown() { - xxx_messageInfo_Histogram.DiscardUnknown(m) -} - -var xxx_messageInfo_Histogram proto.InternalMessageInfo func (m *Histogram) GetSampleCount() uint64 { if m != nil && m.SampleCount != nil { @@ -378,36 +232,14 @@ func (m *Histogram) GetBucket() []*Bucket { } type Bucket struct { - CumulativeCount *uint64 `protobuf:"varint,1,opt,name=cumulative_count,json=cumulativeCount" json:"cumulative_count,omitempty"` - UpperBound *float64 `protobuf:"fixed64,2,opt,name=upper_bound,json=upperBound" json:"upper_bound,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + CumulativeCount *uint64 `protobuf:"varint,1,opt,name=cumulative_count" json:"cumulative_count,omitempty"` + UpperBound *float64 `protobuf:"fixed64,2,opt,name=upper_bound" json:"upper_bound,omitempty"` + XXX_unrecognized []byte `json:"-"` } func (m *Bucket) Reset() { *m = Bucket{} } func (m *Bucket) String() string { return proto.CompactTextString(m) } func (*Bucket) ProtoMessage() {} -func (*Bucket) Descriptor() ([]byte, []int) { - return fileDescriptor_metrics_c97c9a2b9560cb8f, []int{7} -} -func (m *Bucket) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Bucket.Unmarshal(m, b) -} -func (m *Bucket) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Bucket.Marshal(b, m, deterministic) -} -func (dst *Bucket) XXX_Merge(src proto.Message) { - xxx_messageInfo_Bucket.Merge(dst, src) -} -func (m *Bucket) XXX_Size() int { - return xxx_messageInfo_Bucket.Size(m) -} -func (m *Bucket) XXX_DiscardUnknown() { - xxx_messageInfo_Bucket.DiscardUnknown(m) -} - -var xxx_messageInfo_Bucket proto.InternalMessageInfo func (m *Bucket) GetCumulativeCount() uint64 { if m != nil && m.CumulativeCount != nil { @@ -424,41 +256,19 @@ func (m *Bucket) GetUpperBound() float64 { } type Metric struct { - Label []*LabelPair `protobuf:"bytes,1,rep,name=label" json:"label,omitempty"` - Gauge *Gauge `protobuf:"bytes,2,opt,name=gauge" json:"gauge,omitempty"` - Counter *Counter `protobuf:"bytes,3,opt,name=counter" json:"counter,omitempty"` - Summary *Summary `protobuf:"bytes,4,opt,name=summary" json:"summary,omitempty"` - Untyped *Untyped `protobuf:"bytes,5,opt,name=untyped" json:"untyped,omitempty"` - Histogram *Histogram `protobuf:"bytes,7,opt,name=histogram" json:"histogram,omitempty"` - TimestampMs *int64 `protobuf:"varint,6,opt,name=timestamp_ms,json=timestampMs" json:"timestamp_ms,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + Label []*LabelPair `protobuf:"bytes,1,rep,name=label" json:"label,omitempty"` + Gauge *Gauge `protobuf:"bytes,2,opt,name=gauge" json:"gauge,omitempty"` + Counter *Counter `protobuf:"bytes,3,opt,name=counter" json:"counter,omitempty"` + Summary *Summary `protobuf:"bytes,4,opt,name=summary" json:"summary,omitempty"` + Untyped *Untyped `protobuf:"bytes,5,opt,name=untyped" json:"untyped,omitempty"` + Histogram *Histogram `protobuf:"bytes,7,opt,name=histogram" json:"histogram,omitempty"` + TimestampMs *int64 `protobuf:"varint,6,opt,name=timestamp_ms" json:"timestamp_ms,omitempty"` + XXX_unrecognized []byte `json:"-"` } func (m *Metric) Reset() { *m = Metric{} } func (m *Metric) String() string { return proto.CompactTextString(m) } func (*Metric) ProtoMessage() {} -func (*Metric) Descriptor() ([]byte, []int) { - return fileDescriptor_metrics_c97c9a2b9560cb8f, []int{8} -} -func (m *Metric) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Metric.Unmarshal(m, b) -} -func (m *Metric) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Metric.Marshal(b, m, deterministic) -} -func (dst *Metric) XXX_Merge(src proto.Message) { - xxx_messageInfo_Metric.Merge(dst, src) -} -func (m *Metric) XXX_Size() int { - return xxx_messageInfo_Metric.Size(m) -} -func (m *Metric) XXX_DiscardUnknown() { - xxx_messageInfo_Metric.DiscardUnknown(m) -} - -var xxx_messageInfo_Metric proto.InternalMessageInfo func (m *Metric) GetLabel() []*LabelPair { if m != nil { @@ -510,38 +320,16 @@ func (m *Metric) GetTimestampMs() int64 { } type MetricFamily struct { - Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` - Help *string `protobuf:"bytes,2,opt,name=help" json:"help,omitempty"` - Type *MetricType `protobuf:"varint,3,opt,name=type,enum=io.prometheus.client.MetricType" json:"type,omitempty"` - Metric []*Metric `protobuf:"bytes,4,rep,name=metric" json:"metric,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Help *string `protobuf:"bytes,2,opt,name=help" json:"help,omitempty"` + Type *MetricType `protobuf:"varint,3,opt,name=type,enum=io.prometheus.client.MetricType" json:"type,omitempty"` + Metric []*Metric `protobuf:"bytes,4,rep,name=metric" json:"metric,omitempty"` + XXX_unrecognized []byte `json:"-"` } func (m *MetricFamily) Reset() { *m = MetricFamily{} } func (m *MetricFamily) String() string { return proto.CompactTextString(m) } func (*MetricFamily) ProtoMessage() {} -func (*MetricFamily) Descriptor() ([]byte, []int) { - return fileDescriptor_metrics_c97c9a2b9560cb8f, []int{9} -} -func (m *MetricFamily) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_MetricFamily.Unmarshal(m, b) -} -func (m *MetricFamily) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_MetricFamily.Marshal(b, m, deterministic) -} -func (dst *MetricFamily) XXX_Merge(src proto.Message) { - xxx_messageInfo_MetricFamily.Merge(dst, src) -} -func (m *MetricFamily) XXX_Size() int { - return xxx_messageInfo_MetricFamily.Size(m) -} -func (m *MetricFamily) XXX_DiscardUnknown() { - xxx_messageInfo_MetricFamily.DiscardUnknown(m) -} - -var xxx_messageInfo_MetricFamily proto.InternalMessageInfo func (m *MetricFamily) GetName() string { if m != nil && m.Name != nil { @@ -572,58 +360,5 @@ func (m *MetricFamily) GetMetric() []*Metric { } func init() { - proto.RegisterType((*LabelPair)(nil), "io.prometheus.client.LabelPair") - proto.RegisterType((*Gauge)(nil), "io.prometheus.client.Gauge") - proto.RegisterType((*Counter)(nil), "io.prometheus.client.Counter") - proto.RegisterType((*Quantile)(nil), "io.prometheus.client.Quantile") - proto.RegisterType((*Summary)(nil), "io.prometheus.client.Summary") - proto.RegisterType((*Untyped)(nil), "io.prometheus.client.Untyped") - proto.RegisterType((*Histogram)(nil), "io.prometheus.client.Histogram") - proto.RegisterType((*Bucket)(nil), "io.prometheus.client.Bucket") - proto.RegisterType((*Metric)(nil), "io.prometheus.client.Metric") - proto.RegisterType((*MetricFamily)(nil), "io.prometheus.client.MetricFamily") proto.RegisterEnum("io.prometheus.client.MetricType", MetricType_name, MetricType_value) } - -func init() { proto.RegisterFile("metrics.proto", fileDescriptor_metrics_c97c9a2b9560cb8f) } - -var fileDescriptor_metrics_c97c9a2b9560cb8f = []byte{ - // 591 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x54, 0x4f, 0x4f, 0xdb, 0x4e, - 0x14, 0xfc, 0x99, 0xd8, 0x09, 0x7e, 0x86, 0x5f, 0xad, 0x15, 0x07, 0xab, 0x2d, 0x25, 0xcd, 0x89, - 0xf6, 0x10, 0x54, 0x04, 0xaa, 0x44, 0xdb, 0x03, 0x50, 0x1a, 0x2a, 0xd5, 0x40, 0x37, 0xc9, 0x81, - 0x5e, 0xac, 0x8d, 0x59, 0x25, 0x56, 0xbd, 0xb6, 0x6b, 0xef, 0x22, 0xe5, 0xdc, 0x43, 0xbf, 0x47, - 0xbf, 0x68, 0xab, 0xfd, 0xe3, 0x18, 0x24, 0xc3, 0xa9, 0xb7, 0xb7, 0xf3, 0x66, 0xde, 0x8e, 0x77, - 0xc7, 0x0b, 0x9b, 0x8c, 0xf2, 0x32, 0x89, 0xab, 0x61, 0x51, 0xe6, 0x3c, 0x47, 0x5b, 0x49, 0x2e, - 0x2b, 0x46, 0xf9, 0x82, 0x8a, 0x6a, 0x18, 0xa7, 0x09, 0xcd, 0xf8, 0xe0, 0x10, 0xdc, 0x2f, 0x64, - 0x46, 0xd3, 0x2b, 0x92, 0x94, 0x08, 0x81, 0x9d, 0x11, 0x46, 0x03, 0xab, 0x6f, 0xed, 0xba, 0x58, - 0xd5, 0x68, 0x0b, 0x9c, 0x5b, 0x92, 0x0a, 0x1a, 0xac, 0x29, 0x50, 0x2f, 0x06, 0xdb, 0xe0, 0x8c, - 0x88, 0x98, 0xdf, 0x69, 0x4b, 0x8d, 0x55, 0xb7, 0x77, 0xa0, 0x77, 0x9a, 0x8b, 0x8c, 0xd3, 0xf2, - 0x01, 0xc2, 0x7b, 0x58, 0xff, 0x2a, 0x48, 0xc6, 0x93, 0x94, 0xa2, 0xa7, 0xb0, 0xfe, 0xc3, 0xd4, - 0x86, 0xb4, 0x5a, 0xdf, 0xdf, 0x7d, 0xa5, 0xfe, 0x65, 0x41, 0x6f, 0x2c, 0x18, 0x23, 0xe5, 0x12, - 0xbd, 0x84, 0x8d, 0x8a, 0xb0, 0x22, 0xa5, 0x51, 0x2c, 0x77, 0x54, 0x13, 0x6c, 0xec, 0x69, 0x4c, - 0x99, 0x40, 0xdb, 0x00, 0x86, 0x52, 0x09, 0x66, 0x26, 0xb9, 0x1a, 0x19, 0x0b, 0x86, 0x8e, 0xee, - 0xec, 0xdf, 0xe9, 0x77, 0x76, 0xbd, 0xfd, 0x17, 0xc3, 0xb6, 0xb3, 0x1a, 0xd6, 0x8e, 0x1b, 0x7f, - 0xf2, 0x43, 0xa7, 0x19, 0x5f, 0x16, 0xf4, 0xe6, 0x81, 0x0f, 0xfd, 0x69, 0x81, 0x7b, 0x9e, 0x54, - 0x3c, 0x9f, 0x97, 0x84, 0xfd, 0x03, 0xb3, 0x07, 0xd0, 0x9d, 0x89, 0xf8, 0x3b, 0xe5, 0xc6, 0xea, - 0xf3, 0x76, 0xab, 0x27, 0x8a, 0x83, 0x0d, 0x77, 0x30, 0x81, 0xae, 0x46, 0xd0, 0x2b, 0xf0, 0x63, - 0xc1, 0x44, 0x4a, 0x78, 0x72, 0x7b, 0xdf, 0xc5, 0x93, 0x06, 0xd7, 0x4e, 0x76, 0xc0, 0x13, 0x45, - 0x41, 0xcb, 0x68, 0x96, 0x8b, 0xec, 0xc6, 0x58, 0x01, 0x05, 0x9d, 0x48, 0x64, 0xf0, 0x67, 0x0d, - 0xba, 0xa1, 0xca, 0x18, 0x3a, 0x04, 0x27, 0x95, 0x31, 0x0a, 0x2c, 0xe5, 0x6a, 0xa7, 0xdd, 0xd5, - 0x2a, 0x69, 0x58, 0xb3, 0xd1, 0x1b, 0x70, 0xe6, 0x32, 0x46, 0x6a, 0xb8, 0xb7, 0xff, 0xac, 0x5d, - 0xa6, 0x92, 0x86, 0x35, 0x13, 0xbd, 0x85, 0x5e, 0xac, 0xa3, 0x15, 0x74, 0x94, 0x68, 0xbb, 0x5d, - 0x64, 0xf2, 0x87, 0x6b, 0xb6, 0x14, 0x56, 0x3a, 0x33, 0x81, 0xfd, 0x98, 0xd0, 0x04, 0x0b, 0xd7, - 0x6c, 0x29, 0x14, 0xfa, 0x8e, 0x03, 0xe7, 0x31, 0xa1, 0x09, 0x02, 0xae, 0xd9, 0xe8, 0x03, 0xb8, - 0x8b, 0xfa, 0xea, 0x83, 0x9e, 0x92, 0x3e, 0x70, 0x30, 0xab, 0x84, 0xe0, 0x46, 0x21, 0xc3, 0xc2, - 0x13, 0x46, 0x2b, 0x4e, 0x58, 0x11, 0xb1, 0x2a, 0xe8, 0xf6, 0xad, 0xdd, 0x0e, 0xf6, 0x56, 0x58, - 0x58, 0x0d, 0x7e, 0x5b, 0xb0, 0xa1, 0x6f, 0xe0, 0x13, 0x61, 0x49, 0xba, 0x6c, 0xfd, 0x83, 0x11, - 0xd8, 0x0b, 0x9a, 0x16, 0xe6, 0x07, 0x56, 0x35, 0x3a, 0x00, 0x5b, 0x7a, 0x54, 0x47, 0xf8, 0xff, - 0x7e, 0xbf, 0xdd, 0x95, 0x9e, 0x3c, 0x59, 0x16, 0x14, 0x2b, 0xb6, 0x0c, 0x9f, 0x7e, 0x53, 0x02, - 0xfb, 0xb1, 0xf0, 0x69, 0x1d, 0x36, 0xdc, 0xd7, 0x21, 0x40, 0x33, 0x09, 0x79, 0xd0, 0x3b, 0xbd, - 0x9c, 0x5e, 0x4c, 0xce, 0xb0, 0xff, 0x1f, 0x72, 0xc1, 0x19, 0x1d, 0x4f, 0x47, 0x67, 0xbe, 0x25, - 0xf1, 0xf1, 0x34, 0x0c, 0x8f, 0xf1, 0xb5, 0xbf, 0x26, 0x17, 0xd3, 0x8b, 0xc9, 0xf5, 0xd5, 0xd9, - 0x47, 0xbf, 0x83, 0x36, 0xc1, 0x3d, 0xff, 0x3c, 0x9e, 0x5c, 0x8e, 0xf0, 0x71, 0xe8, 0xdb, 0x27, - 0x18, 0x5a, 0x5f, 0xb2, 0x6f, 0x47, 0xf3, 0x84, 0x2f, 0xc4, 0x6c, 0x18, 0xe7, 0x6c, 0xaf, 0xe9, - 0xee, 0xe9, 0x6e, 0xc4, 0xf2, 0x1b, 0x9a, 0xee, 0xcd, 0xf3, 0x77, 0x49, 0x1e, 0x35, 0xdd, 0x48, - 0x77, 0xff, 0x06, 0x00, 0x00, 0xff, 0xff, 0x45, 0x21, 0x7f, 0x64, 0x2b, 0x05, 0x00, 0x00, -} diff --git a/vendor/github.com/prometheus/client_model/metrics.proto b/vendor/github.com/prometheus/client_model/metrics.proto new file mode 100644 index 00000000..0b84af92 --- /dev/null +++ b/vendor/github.com/prometheus/client_model/metrics.proto @@ -0,0 +1,81 @@ +// Copyright 2013 Prometheus Team +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto2"; + +package io.prometheus.client; +option java_package = "io.prometheus.client"; + +message LabelPair { + optional string name = 1; + optional string value = 2; +} + +enum MetricType { + COUNTER = 0; + GAUGE = 1; + SUMMARY = 2; + UNTYPED = 3; + HISTOGRAM = 4; +} + +message Gauge { + optional double value = 1; +} + +message Counter { + optional double value = 1; +} + +message Quantile { + optional double quantile = 1; + optional double value = 2; +} + +message Summary { + optional uint64 sample_count = 1; + optional double sample_sum = 2; + repeated Quantile quantile = 3; +} + +message Untyped { + optional double value = 1; +} + +message Histogram { + optional uint64 sample_count = 1; + optional double sample_sum = 2; + repeated Bucket bucket = 3; // Ordered in increasing order of upper_bound, +Inf bucket is optional. +} + +message Bucket { + optional uint64 cumulative_count = 1; // Cumulative in increasing order. + optional double upper_bound = 2; // Inclusive. +} + +message Metric { + repeated LabelPair label = 1; + optional Gauge gauge = 2; + optional Counter counter = 3; + optional Summary summary = 4; + optional Untyped untyped = 5; + optional Histogram histogram = 7; + optional int64 timestamp_ms = 6; +} + +message MetricFamily { + optional string name = 1; + optional string help = 2; + optional MetricType type = 3; + repeated Metric metric = 4; +} diff --git a/vendor/github.com/prometheus/client_model/pom.xml b/vendor/github.com/prometheus/client_model/pom.xml new file mode 100644 index 00000000..4d34c901 --- /dev/null +++ b/vendor/github.com/prometheus/client_model/pom.xml @@ -0,0 +1,130 @@ + + + 4.0.0 + + io.prometheus.client + model + 0.0.3-SNAPSHOT + + + org.sonatype.oss + oss-parent + 7 + + + Prometheus Client Data Model + http://github.com/prometheus/client_model + + Prometheus Client Data Model: Generated Protocol Buffer Assets + + + + + The Apache Software License, Version 2.0 + http://www.apache.org/licenses/LICENSE-2.0.txt + repo + + + + + scm:git:git@github.com:prometheus/client_model.git + scm:git:git@github.com:prometheus/client_model.git + git@github.com:prometheus/client_model.git + + + + + mtp + Matt T. Proud + matt.proud@gmail.com + + + + + + com.google.protobuf + protobuf-java + 2.5.0 + + + + + + + org.apache.maven.plugins + maven-javadoc-plugin + 2.8 + + UTF-8 + UTF-8 + true + + + + generate-javadoc-site-report + site + + javadoc + + + + attach-javadocs + + jar + + + + + + maven-compiler-plugin + + 1.6 + 1.6 + + 3.1 + + + org.apache.maven.plugins + maven-source-plugin + 2.2.1 + + + attach-sources + + jar + + + + + + + + + release-sign-artifacts + + + performRelease + true + + + + + + org.apache.maven.plugins + maven-gpg-plugin + 1.4 + + + sign-artifacts + verify + + sign + + + + + + + + + diff --git a/vendor/github.com/prometheus/client_model/setup.py b/vendor/github.com/prometheus/client_model/setup.py new file mode 100644 index 00000000..67b9f20e --- /dev/null +++ b/vendor/github.com/prometheus/client_model/setup.py @@ -0,0 +1,23 @@ +#!/usr/bin/python + +from setuptools import setup + +setup( + name = 'prometheus_client_model', + version = '0.0.1', + author = 'Matt T. Proud', + author_email = 'matt.proud@gmail.com', + description = 'Data model artifacts for the Prometheus client.', + license = 'Apache License 2.0', + url = 'http://github.com/prometheus/client_model', + packages = ['prometheus', 'prometheus/client', 'prometheus/client/model'], + package_dir = {'': 'python'}, + requires = ['protobuf(==2.4.1)'], + platforms = 'Platform Independent', + classifiers = ['Development Status :: 3 - Alpha', + 'Intended Audience :: Developers', + 'Intended Audience :: System Administrators', + 'License :: OSI Approved :: Apache Software License', + 'Operating System :: OS Independent', + 'Topic :: Software Development :: Testing', + 'Topic :: System :: Monitoring']) diff --git a/vendor/github.com/prometheus/common/.travis.yml b/vendor/github.com/prometheus/common/.travis.yml new file mode 100644 index 00000000..2fe8e9ad --- /dev/null +++ b/vendor/github.com/prometheus/common/.travis.yml @@ -0,0 +1,6 @@ +sudo: false + +language: go +go: + - 1.7.5 + - tip diff --git a/vendor/github.com/prometheus/common/CONTRIBUTING.md b/vendor/github.com/prometheus/common/CONTRIBUTING.md new file mode 100644 index 00000000..40503edb --- /dev/null +++ b/vendor/github.com/prometheus/common/CONTRIBUTING.md @@ -0,0 +1,18 @@ +# Contributing + +Prometheus uses GitHub to manage reviews of pull requests. + +* If you have a trivial fix or improvement, go ahead and create a pull request, + addressing (with `@...`) the maintainer of this repository (see + [MAINTAINERS.md](MAINTAINERS.md)) in the description of the pull request. + +* If you plan to do something more involved, first discuss your ideas + on our [mailing list](https://groups.google.com/forum/?fromgroups#!forum/prometheus-developers). + This will avoid unnecessary work and surely give you and us a good deal + of inspiration. + +* Relevant coding style guidelines are the [Go Code Review + Comments](https://code.google.com/p/go-wiki/wiki/CodeReviewComments) + and the _Formatting and style_ section of Peter Bourgon's [Go: Best + Practices for Production + Environments](http://peter.bourgon.org/go-in-production/#formatting-and-style). diff --git a/vendor/github.com/prometheus/common/MAINTAINERS.md b/vendor/github.com/prometheus/common/MAINTAINERS.md new file mode 100644 index 00000000..1b315216 --- /dev/null +++ b/vendor/github.com/prometheus/common/MAINTAINERS.md @@ -0,0 +1 @@ +* Fabian Reinartz diff --git a/vendor/github.com/prometheus/common/README.md b/vendor/github.com/prometheus/common/README.md new file mode 100644 index 00000000..98f6ce24 --- /dev/null +++ b/vendor/github.com/prometheus/common/README.md @@ -0,0 +1,12 @@ +# Common +[![Build Status](https://travis-ci.org/prometheus/common.svg)](https://travis-ci.org/prometheus/common) + +This repository contains Go libraries that are shared across Prometheus +components and libraries. + +* **config**: Common configuration structures +* **expfmt**: Decoding and encoding for the exposition format +* **log**: A logging wrapper around [logrus](https://github.com/Sirupsen/logrus) +* **model**: Shared data structures +* **route**: A routing wrapper around [httprouter](https://github.com/julienschmidt/httprouter) using `context.Context` +* **version**: Version informations and metric diff --git a/vendor/github.com/prometheus/common/expfmt/decode.go b/vendor/github.com/prometheus/common/expfmt/decode.go index c092723e..a7a42d5e 100644 --- a/vendor/github.com/prometheus/common/expfmt/decode.go +++ b/vendor/github.com/prometheus/common/expfmt/decode.go @@ -164,9 +164,9 @@ func (sd *SampleDecoder) Decode(s *model.Vector) error { } // ExtractSamples builds a slice of samples from the provided metric -// families. If an error occurrs during sample extraction, it continues to +// families. If an error occurs during sample extraction, it continues to // extract from the remaining metric families. The returned error is the last -// error that has occurred. +// error that has occured. func ExtractSamples(o *DecodeOptions, fams ...*dto.MetricFamily) (model.Vector, error) { var ( all model.Vector diff --git a/vendor/github.com/prometheus/common/expfmt/expfmt.go b/vendor/github.com/prometheus/common/expfmt/expfmt.go index c71bcb98..371ac750 100644 --- a/vendor/github.com/prometheus/common/expfmt/expfmt.go +++ b/vendor/github.com/prometheus/common/expfmt/expfmt.go @@ -26,7 +26,7 @@ const ( // The Content-Type values for the different wire protocols. FmtUnknown Format = `` - FmtText Format = `text/plain; version=` + TextVersion + `; charset=utf-8` + FmtText Format = `text/plain; version=` + TextVersion FmtProtoDelim Format = ProtoFmt + ` encoding=delimited` FmtProtoText Format = ProtoFmt + ` encoding=text` FmtProtoCompact Format = ProtoFmt + ` encoding=compact-text` diff --git a/vendor/github.com/prometheus/common/expfmt/text_parse.go b/vendor/github.com/prometheus/common/expfmt/text_parse.go index ec3d86ba..ef9a1507 100644 --- a/vendor/github.com/prometheus/common/expfmt/text_parse.go +++ b/vendor/github.com/prometheus/common/expfmt/text_parse.go @@ -315,10 +315,6 @@ func (p *TextParser) startLabelValue() stateFn { if p.readTokenAsLabelValue(); p.err != nil { return nil } - if !model.LabelValue(p.currentToken.String()).IsValid() { - p.parseError(fmt.Sprintf("invalid label value %q", p.currentToken.String())) - return nil - } p.currentLabelPair.Value = proto.String(p.currentToken.String()) // Special treatment of summaries: // - Quantile labels are special, will result in dto.Quantile later. @@ -359,7 +355,7 @@ func (p *TextParser) startLabelValue() stateFn { } return p.readingValue default: - p.parseError(fmt.Sprintf("unexpected end of label value %q", p.currentLabelPair.GetValue())) + p.parseError(fmt.Sprintf("unexpected end of label value %q", p.currentLabelPair.Value)) return nil } } @@ -556,8 +552,8 @@ func (p *TextParser) readTokenUntilWhitespace() { // byte considered is the byte already read (now in p.currentByte). The first // newline byte encountered is still copied into p.currentByte, but not into // p.currentToken. If recognizeEscapeSequence is true, two escape sequences are -// recognized: '\\' translates into '\', and '\n' into a line-feed character. -// All other escape sequences are invalid and cause an error. +// recognized: '\\' tranlates into '\', and '\n' into a line-feed character. All +// other escape sequences are invalid and cause an error. func (p *TextParser) readTokenUntilNewline(recognizeEscapeSequence bool) { p.currentToken.Reset() escaped := false diff --git a/vendor/github.com/prometheus/common/model/silence.go b/vendor/github.com/prometheus/common/model/silence.go index bb99889d..7538e299 100644 --- a/vendor/github.com/prometheus/common/model/silence.go +++ b/vendor/github.com/prometheus/common/model/silence.go @@ -59,8 +59,8 @@ func (m *Matcher) Validate() error { return nil } -// Silence defines the representation of a silence definition in the Prometheus -// eco-system. +// Silence defines the representation of a silence definiton +// in the Prometheus eco-system. type Silence struct { ID uint64 `json:"id,omitempty"` diff --git a/vendor/github.com/prometheus/common/model/time.go b/vendor/github.com/prometheus/common/model/time.go index 74ed5a9f..548968ae 100644 --- a/vendor/github.com/prometheus/common/model/time.go +++ b/vendor/github.com/prometheus/common/model/time.go @@ -163,21 +163,9 @@ func (t *Time) UnmarshalJSON(b []byte) error { // This type should not propagate beyond the scope of input/output processing. type Duration time.Duration -// Set implements pflag/flag.Value -func (d *Duration) Set(s string) error { - var err error - *d, err = ParseDuration(s) - return err -} - -// Type implements pflag.Value -func (d *Duration) Type() string { - return "duration" -} - var durationRE = regexp.MustCompile("^([0-9]+)(y|w|d|h|m|s|ms)$") -// ParseDuration parses a string into a time.Duration, assuming that a year +// StringToDuration parses a string into a time.Duration, assuming that a year // always has 365d, a week always has 7d, and a day always has 24h. func ParseDuration(durationStr string) (Duration, error) { matches := durationRE.FindStringSubmatch(durationStr) @@ -214,9 +202,6 @@ func (d Duration) String() string { ms = int64(time.Duration(d) / time.Millisecond) unit = "ms" ) - if ms == 0 { - return "0s" - } factors := map[string]int64{ "y": 1000 * 60 * 60 * 24 * 365, "w": 1000 * 60 * 60 * 24 * 7, diff --git a/vendor/github.com/prometheus/common/model/value.go b/vendor/github.com/prometheus/common/model/value.go index c9d8fb1a..c9ed3ffd 100644 --- a/vendor/github.com/prometheus/common/model/value.go +++ b/vendor/github.com/prometheus/common/model/value.go @@ -100,7 +100,7 @@ func (s *SamplePair) UnmarshalJSON(b []byte) error { } // Equal returns true if this SamplePair and o have equal Values and equal -// Timestamps. The semantics of Value equality is defined by SampleValue.Equal. +// Timestamps. The sematics of Value equality is defined by SampleValue.Equal. func (s *SamplePair) Equal(o *SamplePair) bool { return s == o || (s.Value.Equal(o.Value) && s.Timestamp.Equal(o.Timestamp)) } @@ -117,7 +117,7 @@ type Sample struct { } // Equal compares first the metrics, then the timestamp, then the value. The -// semantics of value equality is defined by SampleValue.Equal. +// sematics of value equality is defined by SampleValue.Equal. func (s *Sample) Equal(o *Sample) bool { if s == o { return true diff --git a/vendor/github.com/prometheus/procfs/.gitignore b/vendor/github.com/prometheus/procfs/.gitignore deleted file mode 100644 index 25e3659a..00000000 --- a/vendor/github.com/prometheus/procfs/.gitignore +++ /dev/null @@ -1 +0,0 @@ -/fixtures/ diff --git a/vendor/github.com/prometheus/procfs/.travis.yml b/vendor/github.com/prometheus/procfs/.travis.yml index 66a0b7cf..a9e28bf5 100644 --- a/vendor/github.com/prometheus/procfs/.travis.yml +++ b/vendor/github.com/prometheus/procfs/.travis.yml @@ -1,12 +1,5 @@ sudo: false - language: go - go: -- 1.9.x -- 1.10.x - -go_import_path: github.com/prometheus/procfs - -script: -- make style check_license vet test staticcheck + - 1.6.4 + - 1.7.4 diff --git a/vendor/github.com/prometheus/procfs/Makefile b/vendor/github.com/prometheus/procfs/Makefile index 4d109839..c264a49d 100644 --- a/vendor/github.com/prometheus/procfs/Makefile +++ b/vendor/github.com/prometheus/procfs/Makefile @@ -1,77 +1,6 @@ -# Copyright 2018 The Prometheus Authors -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# Ensure GOBIN is not set during build so that promu is installed to the correct path -unexport GOBIN - -GO ?= go -GOFMT ?= $(GO)fmt -FIRST_GOPATH := $(firstword $(subst :, ,$(shell $(GO) env GOPATH))) -STATICCHECK := $(FIRST_GOPATH)/bin/staticcheck -pkgs = $(shell $(GO) list ./... | grep -v /vendor/) - -PREFIX ?= $(shell pwd) -BIN_DIR ?= $(shell pwd) - -ifdef DEBUG - bindata_flags = -debug -endif - -STATICCHECK_IGNORE = - -all: format staticcheck build test - -style: - @echo ">> checking code style" - @! $(GOFMT) -d $(shell find . -path ./vendor -prune -o -name '*.go' -print) | grep '^' - -check_license: - @echo ">> checking license header" - @./scripts/check_license.sh - -test: fixtures/.unpacked sysfs/fixtures/.unpacked - @echo ">> running all tests" - @$(GO) test -race $(shell $(GO) list ./... | grep -v /vendor/ | grep -v examples) - -format: - @echo ">> formatting code" - @$(GO) fmt $(pkgs) - -vet: - @echo ">> vetting code" - @$(GO) vet $(pkgs) - -staticcheck: $(STATICCHECK) - @echo ">> running staticcheck" - @$(STATICCHECK) -ignore "$(STATICCHECK_IGNORE)" $(pkgs) - -%/.unpacked: %.ttar - ./ttar -C $(dir $*) -x -f $*.ttar - touch $@ - -update_fixtures: fixtures.ttar sysfs/fixtures.ttar - -%fixtures.ttar: %/fixtures - rm -v $(dir $*)fixtures/.unpacked - ./ttar -C $(dir $*) -c -f $*fixtures.ttar fixtures/ - -$(FIRST_GOPATH)/bin/staticcheck: - @GOOS= GOARCH= $(GO) get -u honnef.co/go/tools/cmd/staticcheck - -.PHONY: all style check_license format test vet staticcheck - -# Declaring the binaries at their default locations as PHONY targets is a hack -# to ensure the latest version is downloaded on every make execution. -# If this is not desired, copy/symlink these binaries to a different path and -# set the respective environment variables. -.PHONY: $(GOPATH)/bin/staticcheck +ci: + ! gofmt -l *.go | read nothing + go vet + go test -v ./... + go get github.com/golang/lint/golint + golint *.go diff --git a/vendor/github.com/prometheus/procfs/buddyinfo.go b/vendor/github.com/prometheus/procfs/buddyinfo.go index d3a82680..680a9842 100644 --- a/vendor/github.com/prometheus/procfs/buddyinfo.go +++ b/vendor/github.com/prometheus/procfs/buddyinfo.go @@ -62,7 +62,7 @@ func parseBuddyInfo(r io.Reader) ([]BuddyInfo, error) { for scanner.Scan() { var err error line := scanner.Text() - parts := strings.Fields(line) + parts := strings.Fields(string(line)) if len(parts) < 4 { return nil, fmt.Errorf("invalid number of fields when parsing buddyinfo") diff --git a/vendor/github.com/prometheus/procfs/fixtures.ttar b/vendor/github.com/prometheus/procfs/fixtures.ttar deleted file mode 100644 index 13c831ef..00000000 --- a/vendor/github.com/prometheus/procfs/fixtures.ttar +++ /dev/null @@ -1,462 +0,0 @@ -# Archive created by ttar -c -f fixtures.ttar fixtures/ -Directory: fixtures -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/26231 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/26231/cmdline -Lines: 1 -vimNULLBYTEtest.goNULLBYTE+10NULLBYTEEOF -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/26231/comm -Lines: 1 -vim -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/26231/cwd -SymlinkTo: /usr/bin -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/26231/exe -SymlinkTo: /usr/bin/vim -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/26231/fd -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/26231/fd/0 -SymlinkTo: ../../symlinktargets/abc -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/26231/fd/1 -SymlinkTo: ../../symlinktargets/def -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/26231/fd/10 -SymlinkTo: ../../symlinktargets/xyz -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/26231/fd/2 -SymlinkTo: ../../symlinktargets/ghi -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/26231/fd/3 -SymlinkTo: ../../symlinktargets/uvw -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/26231/io -Lines: 7 -rchar: 750339 -wchar: 818609 -syscr: 7405 -syscw: 5245 -read_bytes: 1024 -write_bytes: 2048 -cancelled_write_bytes: -1024 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/26231/limits -Lines: 17 -Limit Soft Limit Hard Limit Units -Max cpu time unlimited unlimited seconds -Max file size unlimited unlimited bytes -Max data size unlimited unlimited bytes -Max stack size 8388608 unlimited bytes -Max core file size 0 unlimited bytes -Max resident set unlimited unlimited bytes -Max processes 62898 62898 processes -Max open files 2048 4096 files -Max locked memory 65536 65536 bytes -Max address space 8589934592 unlimited bytes -Max file locks unlimited unlimited locks -Max pending signals 62898 62898 signals -Max msgqueue size 819200 819200 bytes -Max nice priority 0 0 -Max realtime priority 0 0 -Max realtime timeout unlimited unlimited us -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/26231/mountstats -Lines: 19 -device rootfs mounted on / with fstype rootfs -device sysfs mounted on /sys with fstype sysfs -device proc mounted on /proc with fstype proc -device /dev/sda1 mounted on / with fstype ext4 -device 192.168.1.1:/srv/test mounted on /mnt/nfs/test with fstype nfs4 statvers=1.1 - opts: rw,vers=4.0,rsize=1048576,wsize=1048576,namlen=255,acregmin=3,acregmax=60,acdirmin=30,acdirmax=60,hard,proto=tcp,port=0,timeo=600,retrans=2,sec=sys,clientaddr=192.168.1.5,local_lock=none - age: 13968 - caps: caps=0xfff7,wtmult=512,dtsize=32768,bsize=0,namlen=255 - nfsv4: bm0=0xfdffafff,bm1=0xf9be3e,bm2=0x0,acl=0x0,pnfs=not configured - sec: flavor=1,pseudoflavor=1 - events: 52 226 0 0 1 13 398 0 0 331 0 47 0 0 77 0 0 77 0 0 0 0 0 0 0 0 0 - bytes: 1207640230 0 0 0 1210214218 0 295483 0 - RPC iostats version: 1.0 p/v: 100003/4 (nfs) - xprt: tcp 832 0 1 0 11 6428 6428 0 12154 0 24 26 5726 - per-op statistics - NULL: 0 0 0 0 0 0 0 0 - READ: 1298 1298 0 207680 1210292152 6 79386 79407 - WRITE: 0 0 0 0 0 0 0 0 - -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/26231/net -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/26231/net/dev -Lines: 4 -Inter-| Receive | Transmit - face |bytes packets errs drop fifo frame compressed multicast|bytes packets errs drop fifo colls carrier compressed - lo: 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 - eth0: 438 5 0 0 0 0 0 0 648 8 0 0 0 0 0 0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/26231/ns -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/26231/ns/mnt -SymlinkTo: mnt:[4026531840] -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/26231/ns/net -SymlinkTo: net:[4026531993] -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/26231/root -SymlinkTo: / -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/26231/stat -Lines: 1 -26231 (vim) R 5392 7446 5392 34835 7446 4218880 32533 309516 26 82 1677 44 158 99 20 0 1 0 82375 56274944 1981 18446744073709551615 4194304 6294284 140736914091744 140736914087944 139965136429984 0 0 12288 1870679807 0 0 0 17 0 0 0 31 0 0 8391624 8481048 16420864 140736914093252 140736914093279 140736914093279 140736914096107 0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/26232 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/26232/cmdline -Lines: 0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/26232/comm -Lines: 1 -ata_sff -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/26232/cwd -SymlinkTo: /does/not/exist -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/26232/fd -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/26232/fd/0 -SymlinkTo: ../../symlinktargets/abc -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/26232/fd/1 -SymlinkTo: ../../symlinktargets/def -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/26232/fd/2 -SymlinkTo: ../../symlinktargets/ghi -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/26232/fd/3 -SymlinkTo: ../../symlinktargets/uvw -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/26232/fd/4 -SymlinkTo: ../../symlinktargets/xyz -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/26232/limits -Lines: 17 -Limit Soft Limit Hard Limit Units -Max cpu time unlimited unlimited seconds -Max file size unlimited unlimited bytes -Max data size unlimited unlimited bytes -Max stack size 8388608 unlimited bytes -Max core file size 0 unlimited bytes -Max resident set unlimited unlimited bytes -Max processes 29436 29436 processes -Max open files 1024 4096 files -Max locked memory 65536 65536 bytes -Max address space unlimited unlimited bytes -Max file locks unlimited unlimited locks -Max pending signals 29436 29436 signals -Max msgqueue size 819200 819200 bytes -Max nice priority 0 0 -Max realtime priority 0 0 -Max realtime timeout unlimited unlimited us -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/26232/root -SymlinkTo: /does/not/exist -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/26232/stat -Lines: 1 -33 (ata_sff) S 2 0 0 0 -1 69238880 0 0 0 0 0 0 0 0 0 -20 1 0 5 0 0 18446744073709551615 0 0 0 0 0 0 0 2147483647 0 18446744073709551615 0 0 17 1 0 0 0 0 0 0 0 0 0 0 0 0 0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/26233 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/26233/cmdline -Lines: 1 -com.github.uiautomatorNULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTEEOF -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/584 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/584/stat -Lines: 2 -1020 ((a b ) ( c d) ) R 28378 1020 28378 34842 1020 4218880 286 0 0 0 0 0 0 0 20 0 1 0 10839175 10395648 155 18446744073709551615 4194304 4238788 140736466511168 140736466511168 140609271124624 0 0 0 0 0 0 0 17 5 0 0 0 0 0 6336016 6337300 25579520 140736466515030 140736466515061 140736466515061 140736466518002 0 -#!/bin/cat /proc/self/stat -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/buddyinfo -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/buddyinfo/short -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/buddyinfo/short/buddyinfo -Lines: 3 -Node 0, zone -Node 0, zone -Node 0, zone -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/buddyinfo/sizemismatch -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/buddyinfo/sizemismatch/buddyinfo -Lines: 3 -Node 0, zone DMA 1 0 1 0 2 1 1 0 1 1 3 -Node 0, zone DMA32 759 572 791 475 194 45 12 0 0 0 0 0 -Node 0, zone Normal 4381 1093 185 1530 567 102 4 0 0 0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/buddyinfo/valid -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/buddyinfo/valid/buddyinfo -Lines: 3 -Node 0, zone DMA 1 0 1 0 2 1 1 0 1 1 3 -Node 0, zone DMA32 759 572 791 475 194 45 12 0 0 0 0 -Node 0, zone Normal 4381 1093 185 1530 567 102 4 0 0 0 0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/fs -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/fs/xfs -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/fs/xfs/stat -Lines: 23 -extent_alloc 92447 97589 92448 93751 -abt 0 0 0 0 -blk_map 1767055 188820 184891 92447 92448 2140766 0 -bmbt 0 0 0 0 -dir 185039 92447 92444 136422 -trans 706 944304 0 -ig 185045 58807 0 126238 0 33637 22 -log 2883 113448 9 17360 739 -push_ail 945014 0 134260 15483 0 3940 464 159985 0 40 -xstrat 92447 0 -rw 107739 94045 -attr 4 0 0 0 -icluster 8677 7849 135802 -vnodes 92601 0 0 0 92444 92444 92444 0 -buf 2666287 7122 2659202 3599 2 7085 0 10297 7085 -abtb2 184941 1277345 13257 13278 0 0 0 0 0 0 0 0 0 0 2746147 -abtc2 345295 2416764 172637 172658 0 0 0 0 0 0 0 0 0 0 21406023 -bmbt2 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 -ibt2 343004 1358467 0 0 0 0 0 0 0 0 0 0 0 0 0 -fibt2 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 -qm 0 0 0 0 0 0 0 0 -xpc 399724544 92823103 86219234 -debug 0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/mdstat -Lines: 26 -Personalities : [linear] [multipath] [raid0] [raid1] [raid6] [raid5] [raid4] [raid10] -md3 : active raid6 sda1[8] sdh1[7] sdg1[6] sdf1[5] sde1[11] sdd1[3] sdc1[10] sdb1[9] - 5853468288 blocks super 1.2 level 6, 64k chunk, algorithm 2 [8/8] [UUUUUUUU] - -md127 : active raid1 sdi2[0] sdj2[1] - 312319552 blocks [2/2] [UU] - -md0 : active raid1 sdk[2](S) sdi1[0] sdj1[1] - 248896 blocks [2/2] [UU] - -md4 : inactive raid1 sda3[0] sdb3[1] - 4883648 blocks [2/2] [UU] - -md6 : active raid1 sdb2[2] sda2[0] - 195310144 blocks [2/1] [U_] - [=>...................] recovery = 8.5% (16775552/195310144) finish=17.0min speed=259783K/sec - -md8 : active raid1 sdb1[1] sda1[0] - 195310144 blocks [2/2] [UU] - [=>...................] resync = 8.5% (16775552/195310144) finish=17.0min speed=259783K/sec - -md7 : active raid6 sdb1[0] sde1[3] sdd1[2] sdc1[1] - 7813735424 blocks super 1.2 level 6, 512k chunk, algorithm 2 [4/3] [U_UU] - bitmap: 0/30 pages [0KB], 65536KB chunk - -unused devices: -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/net -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/net/dev -Lines: 6 -Inter-| Receive | Transmit - face |bytes packets errs drop fifo frame compressed multicast|bytes packets errs drop fifo colls carrier compressed -vethf345468: 648 8 0 0 0 0 0 0 438 5 0 0 0 0 0 0 - lo: 1664039048 1566805 0 0 0 0 0 0 1664039048 1566805 0 0 0 0 0 0 -docker0: 2568 38 0 0 0 0 0 0 438 5 0 0 0 0 0 0 - eth0: 874354587 1036395 0 0 0 0 0 0 563352563 732147 0 0 0 0 0 0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/net/ip_vs -Lines: 21 -IP Virtual Server version 1.2.1 (size=4096) -Prot LocalAddress:Port Scheduler Flags - -> RemoteAddress:Port Forward Weight ActiveConn InActConn -TCP C0A80016:0CEA wlc - -> C0A85216:0CEA Tunnel 100 248 2 - -> C0A85318:0CEA Tunnel 100 248 2 - -> C0A85315:0CEA Tunnel 100 248 1 -TCP C0A80039:0CEA wlc - -> C0A85416:0CEA Tunnel 0 0 0 - -> C0A85215:0CEA Tunnel 100 1499 0 - -> C0A83215:0CEA Tunnel 100 1498 0 -TCP C0A80037:0CEA wlc - -> C0A8321A:0CEA Tunnel 0 0 0 - -> C0A83120:0CEA Tunnel 100 0 0 -TCP [2620:0000:0000:0000:0000:0000:0000:0001]:0050 sh - -> [2620:0000:0000:0000:0000:0000:0000:0002]:0050 Route 1 0 0 - -> [2620:0000:0000:0000:0000:0000:0000:0003]:0050 Route 1 0 0 - -> [2620:0000:0000:0000:0000:0000:0000:0004]:0050 Route 1 1 1 -FWM 10001000 wlc - -> C0A8321A:0CEA Route 0 0 1 - -> C0A83215:0CEA Route 0 0 2 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/net/ip_vs_stats -Lines: 6 - Total Incoming Outgoing Incoming Outgoing - Conns Packets Packets Bytes Bytes - 16AA370 E33656E5 0 51D8C8883AB3 0 - - Conns/s Pkts/s Pkts/s Bytes/s Bytes/s - 4 1FB3C 0 1282A8F 0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/net/rpc -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/net/rpc/nfs -Lines: 5 -net 18628 0 18628 6 -rpc 4329785 0 4338291 -proc2 18 2 69 0 0 4410 0 0 0 0 0 0 0 0 0 0 0 99 2 -proc3 22 1 4084749 29200 94754 32580 186 47747 7981 8639 0 6356 0 6962 0 7958 0 0 241 4 4 2 39 -proc4 61 1 0 0 0 0 0 0 0 0 0 0 0 1 1 0 0 0 0 0 0 0 2 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/net/rpc/nfsd -Lines: 11 -rc 0 6 18622 -fh 0 0 0 0 0 -io 157286400 0 -th 8 0 0.000 0.000 0.000 0.000 0.000 0.000 0.000 0.000 0.000 0.000 -ra 32 0 0 0 0 0 0 0 0 0 0 0 -net 18628 0 18628 6 -rpc 18628 0 0 0 0 -proc2 18 2 69 0 0 4410 0 0 0 0 0 0 0 0 0 0 0 99 2 -proc3 22 2 112 0 2719 111 0 0 0 0 0 0 0 0 0 0 0 27 216 0 2 1 0 -proc4 2 2 10853 -proc4ops 72 0 0 0 1098 2 0 0 0 0 8179 5896 0 0 0 0 5900 0 0 2 0 2 0 9609 0 2 150 1272 0 0 0 1236 0 0 0 0 3 3 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/net/xfrm_stat -Lines: 28 -XfrmInError 1 -XfrmInBufferError 2 -XfrmInHdrError 4 -XfrmInNoStates 3 -XfrmInStateProtoError 40 -XfrmInStateModeError 100 -XfrmInStateSeqError 6000 -XfrmInStateExpired 4 -XfrmInStateMismatch 23451 -XfrmInStateInvalid 55555 -XfrmInTmplMismatch 51 -XfrmInNoPols 65432 -XfrmInPolBlock 100 -XfrmInPolError 10000 -XfrmOutError 1000000 -XfrmOutBundleGenError 43321 -XfrmOutBundleCheckError 555 -XfrmOutNoStates 869 -XfrmOutStateProtoError 4542 -XfrmOutStateModeError 4 -XfrmOutStateSeqError 543 -XfrmOutStateExpired 565 -XfrmOutPolBlock 43456 -XfrmOutPolDead 7656 -XfrmOutPolError 1454 -XfrmFwdHdrError 6654 -XfrmOutStateInvalid 28765 -XfrmAcquireError 24532 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/self -SymlinkTo: 26231 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/stat -Lines: 16 -cpu 301854 612 111922 8979004 3552 2 3944 0 0 0 -cpu0 44490 19 21045 1087069 220 1 3410 0 0 0 -cpu1 47869 23 16474 1110787 591 0 46 0 0 0 -cpu2 46504 36 15916 1112321 441 0 326 0 0 0 -cpu3 47054 102 15683 1113230 533 0 60 0 0 0 -cpu4 28413 25 10776 1140321 217 0 8 0 0 0 -cpu5 29271 101 11586 1136270 672 0 30 0 0 0 -cpu6 29152 36 10276 1139721 319 0 29 0 0 0 -cpu7 29098 268 10164 1139282 555 0 31 0 0 0 -intr 8885917 17 0 0 0 0 0 0 0 1 79281 0 0 0 0 0 0 0 231237 0 0 0 0 250586 103 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 223424 190745 13 906 1283803 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 -ctxt 38014093 -btime 1418183276 -processes 26442 -procs_running 2 -procs_blocked 1 -softirq 5057579 250191 1481983 1647 211099 186066 0 1783454 622196 12499 508444 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/symlinktargets -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/symlinktargets/README -Lines: 2 -This directory contains some empty files that are the symlinks the files in the "fd" directory point to. -They are otherwise ignored by the tests -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/symlinktargets/abc -Lines: 0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/symlinktargets/def -Lines: 0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/symlinktargets/ghi -Lines: 0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/symlinktargets/uvw -Lines: 0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/symlinktargets/xyz -Lines: 0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/.unpacked -Lines: 0 -Mode: 664 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/vendor/github.com/prometheus/procfs/fs.go b/vendor/github.com/prometheus/procfs/fs.go index b6c6b2ce..17546756 100644 --- a/vendor/github.com/prometheus/procfs/fs.go +++ b/vendor/github.com/prometheus/procfs/fs.go @@ -1,16 +1,3 @@ -// Copyright 2018 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - package procfs import ( @@ -18,7 +5,6 @@ import ( "os" "path" - "github.com/prometheus/procfs/nfs" "github.com/prometheus/procfs/xfs" ) @@ -58,25 +44,3 @@ func (fs FS) XFSStats() (*xfs.Stats, error) { return xfs.ParseStats(f) } - -// NFSClientRPCStats retrieves NFS client RPC statistics. -func (fs FS) NFSClientRPCStats() (*nfs.ClientRPCStats, error) { - f, err := os.Open(fs.Path("net/rpc/nfs")) - if err != nil { - return nil, err - } - defer f.Close() - - return nfs.ParseClientRPCStats(f) -} - -// NFSdServerRPCStats retrieves NFS daemon RPC statistics. -func (fs FS) NFSdServerRPCStats() (*nfs.ServerRPCStats, error) { - f, err := os.Open(fs.Path("net/rpc/nfsd")) - if err != nil { - return nil, err - } - defer f.Close() - - return nfs.ParseServerRPCStats(f) -} diff --git a/vendor/github.com/prometheus/procfs/internal/util/parse.go b/vendor/github.com/prometheus/procfs/internal/util/parse.go deleted file mode 100644 index 1ad21c91..00000000 --- a/vendor/github.com/prometheus/procfs/internal/util/parse.go +++ /dev/null @@ -1,46 +0,0 @@ -// Copyright 2018 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package util - -import "strconv" - -// ParseUint32s parses a slice of strings into a slice of uint32s. -func ParseUint32s(ss []string) ([]uint32, error) { - us := make([]uint32, 0, len(ss)) - for _, s := range ss { - u, err := strconv.ParseUint(s, 10, 32) - if err != nil { - return nil, err - } - - us = append(us, uint32(u)) - } - - return us, nil -} - -// ParseUint64s parses a slice of strings into a slice of uint64s. -func ParseUint64s(ss []string) ([]uint64, error) { - us := make([]uint64, 0, len(ss)) - for _, s := range ss { - u, err := strconv.ParseUint(s, 10, 64) - if err != nil { - return nil, err - } - - us = append(us, u) - } - - return us, nil -} diff --git a/vendor/github.com/prometheus/procfs/ipvs.go b/vendor/github.com/prometheus/procfs/ipvs.go index e36d4a3b..696d114e 100644 --- a/vendor/github.com/prometheus/procfs/ipvs.go +++ b/vendor/github.com/prometheus/procfs/ipvs.go @@ -1,16 +1,3 @@ -// Copyright 2018 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - package procfs import ( @@ -44,16 +31,16 @@ type IPVSStats struct { type IPVSBackendStatus struct { // The local (virtual) IP address. LocalAddress net.IP - // The remote (real) IP address. - RemoteAddress net.IP // The local (virtual) port. LocalPort uint16 - // The remote (real) port. - RemotePort uint16 // The local firewall mark LocalMark string // The transport protocol (TCP, UDP). Proto string + // The remote (real) IP address. + RemoteAddress net.IP + // The remote (real) port. + RemotePort uint16 // The current number of active connections for this virtual/real address pair. ActiveConn uint64 // The current number of inactive connections for this virtual/real address pair. @@ -164,7 +151,7 @@ func parseIPVSBackendStatus(file io.Reader) ([]IPVSBackendStatus, error) { ) for scanner.Scan() { - fields := strings.Fields(scanner.Text()) + fields := strings.Fields(string(scanner.Text())) if len(fields) == 0 { continue } diff --git a/vendor/github.com/prometheus/procfs/mdstat.go b/vendor/github.com/prometheus/procfs/mdstat.go index 9dc19583..d7a248c0 100644 --- a/vendor/github.com/prometheus/procfs/mdstat.go +++ b/vendor/github.com/prometheus/procfs/mdstat.go @@ -1,16 +1,3 @@ -// Copyright 2018 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - package procfs import ( diff --git a/vendor/github.com/prometheus/procfs/mountstats.go b/vendor/github.com/prometheus/procfs/mountstats.go index 7a8a1e09..6b2b0ba9 100644 --- a/vendor/github.com/prometheus/procfs/mountstats.go +++ b/vendor/github.com/prometheus/procfs/mountstats.go @@ -1,16 +1,3 @@ -// Copyright 2018 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - package procfs // While implementing parsing of /proc/[pid]/mountstats, this blog was used @@ -39,11 +26,8 @@ const ( statVersion10 = "1.0" statVersion11 = "1.1" - fieldTransport10TCPLen = 10 - fieldTransport10UDPLen = 7 - - fieldTransport11TCPLen = 13 - fieldTransport11UDPLen = 10 + fieldTransport10Len = 10 + fieldTransport11Len = 13 ) // A Mount is a device mount parsed from /proc/[pid]/mountstats. @@ -189,8 +173,6 @@ type NFSOperationStats struct { // A NFSTransportStats contains statistics for the NFS mount RPC requests and // responses. type NFSTransportStats struct { - // The transport protocol used for the NFS mount. - Protocol string // The local port used for the NFS mount. Port uint64 // Number of times the client has had to establish a connection from scratch @@ -365,7 +347,7 @@ func parseMountStatsNFS(s *bufio.Scanner, statVersion string) (*MountStatsNFS, e return nil, fmt.Errorf("not enough information for NFS transport stats: %v", ss) } - tstats, err := parseNFSTransportStats(ss[1:], statVersion) + tstats, err := parseNFSTransportStats(ss[2:], statVersion) if err != nil { return nil, err } @@ -527,33 +509,13 @@ func parseNFSOperationStats(s *bufio.Scanner) ([]NFSOperationStats, error) { // parseNFSTransportStats parses a NFSTransportStats line using an input set of // integer fields matched to a specific stats version. func parseNFSTransportStats(ss []string, statVersion string) (*NFSTransportStats, error) { - // Extract the protocol field. It is the only string value in the line - protocol := ss[0] - ss = ss[1:] - switch statVersion { case statVersion10: - var expectedLength int - if protocol == "tcp" { - expectedLength = fieldTransport10TCPLen - } else if protocol == "udp" { - expectedLength = fieldTransport10UDPLen - } else { - return nil, fmt.Errorf("invalid NFS protocol \"%s\" in stats 1.0 statement: %v", protocol, ss) - } - if len(ss) != expectedLength { + if len(ss) != fieldTransport10Len { return nil, fmt.Errorf("invalid NFS transport stats 1.0 statement: %v", ss) } case statVersion11: - var expectedLength int - if protocol == "tcp" { - expectedLength = fieldTransport11TCPLen - } else if protocol == "udp" { - expectedLength = fieldTransport11UDPLen - } else { - return nil, fmt.Errorf("invalid NFS protocol \"%s\" in stats 1.1 statement: %v", protocol, ss) - } - if len(ss) != expectedLength { + if len(ss) != fieldTransport11Len { return nil, fmt.Errorf("invalid NFS transport stats 1.1 statement: %v", ss) } default: @@ -561,13 +523,12 @@ func parseNFSTransportStats(ss []string, statVersion string) (*NFSTransportStats } // Allocate enough for v1.1 stats since zero value for v1.1 stats will be okay - // in a v1.0 response. Since the stat length is bigger for TCP stats, we use - // the TCP length here. + // in a v1.0 response. // // Note: slice length must be set to length of v1.1 stats to avoid a panic when // only v1.0 stats are present. // See: https://github.com/prometheus/node_exporter/issues/571. - ns := make([]uint64, fieldTransport11TCPLen) + ns := make([]uint64, fieldTransport11Len) for i, s := range ss { n, err := strconv.ParseUint(s, 10, 64) if err != nil { @@ -577,18 +538,7 @@ func parseNFSTransportStats(ss []string, statVersion string) (*NFSTransportStats ns[i] = n } - // The fields differ depending on the transport protocol (TCP or UDP) - // From https://utcc.utoronto.ca/%7Ecks/space/blog/linux/NFSMountstatsXprt - // - // For the udp RPC transport there is no connection count, connect idle time, - // or idle time (fields #3, #4, and #5); all other fields are the same. So - // we set them to 0 here. - if protocol == "udp" { - ns = append(ns[:2], append(make([]uint64, 3), ns[2:]...)...) - } - return &NFSTransportStats{ - Protocol: protocol, Port: ns[0], Bind: ns[1], Connect: ns[2], diff --git a/vendor/github.com/prometheus/procfs/net_dev.go b/vendor/github.com/prometheus/procfs/net_dev.go deleted file mode 100644 index 3f252337..00000000 --- a/vendor/github.com/prometheus/procfs/net_dev.go +++ /dev/null @@ -1,216 +0,0 @@ -// Copyright 2018 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package procfs - -import ( - "bufio" - "errors" - "os" - "sort" - "strconv" - "strings" -) - -// NetDevLine is single line parsed from /proc/net/dev or /proc/[pid]/net/dev. -type NetDevLine struct { - Name string `json:"name"` // The name of the interface. - RxBytes uint64 `json:"rx_bytes"` // Cumulative count of bytes received. - RxPackets uint64 `json:"rx_packets"` // Cumulative count of packets received. - RxErrors uint64 `json:"rx_errors"` // Cumulative count of receive errors encountered. - RxDropped uint64 `json:"rx_dropped"` // Cumulative count of packets dropped while receiving. - RxFIFO uint64 `json:"rx_fifo"` // Cumulative count of FIFO buffer errors. - RxFrame uint64 `json:"rx_frame"` // Cumulative count of packet framing errors. - RxCompressed uint64 `json:"rx_compressed"` // Cumulative count of compressed packets received by the device driver. - RxMulticast uint64 `json:"rx_multicast"` // Cumulative count of multicast frames received by the device driver. - TxBytes uint64 `json:"tx_bytes"` // Cumulative count of bytes transmitted. - TxPackets uint64 `json:"tx_packets"` // Cumulative count of packets transmitted. - TxErrors uint64 `json:"tx_errors"` // Cumulative count of transmit errors encountered. - TxDropped uint64 `json:"tx_dropped"` // Cumulative count of packets dropped while transmitting. - TxFIFO uint64 `json:"tx_fifo"` // Cumulative count of FIFO buffer errors. - TxCollisions uint64 `json:"tx_collisions"` // Cumulative count of collisions detected on the interface. - TxCarrier uint64 `json:"tx_carrier"` // Cumulative count of carrier losses detected by the device driver. - TxCompressed uint64 `json:"tx_compressed"` // Cumulative count of compressed packets transmitted by the device driver. -} - -// NetDev is parsed from /proc/net/dev or /proc/[pid]/net/dev. The map keys -// are interface names. -type NetDev map[string]NetDevLine - -// NewNetDev returns kernel/system statistics read from /proc/net/dev. -func NewNetDev() (NetDev, error) { - fs, err := NewFS(DefaultMountPoint) - if err != nil { - return nil, err - } - - return fs.NewNetDev() -} - -// NewNetDev returns kernel/system statistics read from /proc/net/dev. -func (fs FS) NewNetDev() (NetDev, error) { - return newNetDev(fs.Path("net/dev")) -} - -// NewNetDev returns kernel/system statistics read from /proc/[pid]/net/dev. -func (p Proc) NewNetDev() (NetDev, error) { - return newNetDev(p.path("net/dev")) -} - -// newNetDev creates a new NetDev from the contents of the given file. -func newNetDev(file string) (NetDev, error) { - f, err := os.Open(file) - if err != nil { - return NetDev{}, err - } - defer f.Close() - - nd := NetDev{} - s := bufio.NewScanner(f) - for n := 0; s.Scan(); n++ { - // Skip the 2 header lines. - if n < 2 { - continue - } - - line, err := nd.parseLine(s.Text()) - if err != nil { - return nd, err - } - - nd[line.Name] = *line - } - - return nd, s.Err() -} - -// parseLine parses a single line from the /proc/net/dev file. Header lines -// must be filtered prior to calling this method. -func (nd NetDev) parseLine(rawLine string) (*NetDevLine, error) { - parts := strings.SplitN(rawLine, ":", 2) - if len(parts) != 2 { - return nil, errors.New("invalid net/dev line, missing colon") - } - fields := strings.Fields(strings.TrimSpace(parts[1])) - - var err error - line := &NetDevLine{} - - // Interface Name - line.Name = strings.TrimSpace(parts[0]) - if line.Name == "" { - return nil, errors.New("invalid net/dev line, empty interface name") - } - - // RX - line.RxBytes, err = strconv.ParseUint(fields[0], 10, 64) - if err != nil { - return nil, err - } - line.RxPackets, err = strconv.ParseUint(fields[1], 10, 64) - if err != nil { - return nil, err - } - line.RxErrors, err = strconv.ParseUint(fields[2], 10, 64) - if err != nil { - return nil, err - } - line.RxDropped, err = strconv.ParseUint(fields[3], 10, 64) - if err != nil { - return nil, err - } - line.RxFIFO, err = strconv.ParseUint(fields[4], 10, 64) - if err != nil { - return nil, err - } - line.RxFrame, err = strconv.ParseUint(fields[5], 10, 64) - if err != nil { - return nil, err - } - line.RxCompressed, err = strconv.ParseUint(fields[6], 10, 64) - if err != nil { - return nil, err - } - line.RxMulticast, err = strconv.ParseUint(fields[7], 10, 64) - if err != nil { - return nil, err - } - - // TX - line.TxBytes, err = strconv.ParseUint(fields[8], 10, 64) - if err != nil { - return nil, err - } - line.TxPackets, err = strconv.ParseUint(fields[9], 10, 64) - if err != nil { - return nil, err - } - line.TxErrors, err = strconv.ParseUint(fields[10], 10, 64) - if err != nil { - return nil, err - } - line.TxDropped, err = strconv.ParseUint(fields[11], 10, 64) - if err != nil { - return nil, err - } - line.TxFIFO, err = strconv.ParseUint(fields[12], 10, 64) - if err != nil { - return nil, err - } - line.TxCollisions, err = strconv.ParseUint(fields[13], 10, 64) - if err != nil { - return nil, err - } - line.TxCarrier, err = strconv.ParseUint(fields[14], 10, 64) - if err != nil { - return nil, err - } - line.TxCompressed, err = strconv.ParseUint(fields[15], 10, 64) - if err != nil { - return nil, err - } - - return line, nil -} - -// Total aggregates the values across interfaces and returns a new NetDevLine. -// The Name field will be a sorted comma separated list of interface names. -func (nd NetDev) Total() NetDevLine { - total := NetDevLine{} - - names := make([]string, 0, len(nd)) - for _, ifc := range nd { - names = append(names, ifc.Name) - total.RxBytes += ifc.RxBytes - total.RxPackets += ifc.RxPackets - total.RxPackets += ifc.RxPackets - total.RxErrors += ifc.RxErrors - total.RxDropped += ifc.RxDropped - total.RxFIFO += ifc.RxFIFO - total.RxFrame += ifc.RxFrame - total.RxCompressed += ifc.RxCompressed - total.RxMulticast += ifc.RxMulticast - total.TxBytes += ifc.TxBytes - total.TxPackets += ifc.TxPackets - total.TxErrors += ifc.TxErrors - total.TxDropped += ifc.TxDropped - total.TxFIFO += ifc.TxFIFO - total.TxCollisions += ifc.TxCollisions - total.TxCarrier += ifc.TxCarrier - total.TxCompressed += ifc.TxCompressed - } - sort.Strings(names) - total.Name = strings.Join(names, ", ") - - return total -} diff --git a/vendor/github.com/prometheus/procfs/nfs/nfs.go b/vendor/github.com/prometheus/procfs/nfs/nfs.go deleted file mode 100644 index 651bf681..00000000 --- a/vendor/github.com/prometheus/procfs/nfs/nfs.go +++ /dev/null @@ -1,263 +0,0 @@ -// Copyright 2018 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package nfs implements parsing of /proc/net/rpc/nfsd. -// Fields are documented in https://www.svennd.be/nfsd-stats-explained-procnetrpcnfsd/ -package nfs - -// ReplyCache models the "rc" line. -type ReplyCache struct { - Hits uint64 - Misses uint64 - NoCache uint64 -} - -// FileHandles models the "fh" line. -type FileHandles struct { - Stale uint64 - TotalLookups uint64 - AnonLookups uint64 - DirNoCache uint64 - NoDirNoCache uint64 -} - -// InputOutput models the "io" line. -type InputOutput struct { - Read uint64 - Write uint64 -} - -// Threads models the "th" line. -type Threads struct { - Threads uint64 - FullCnt uint64 -} - -// ReadAheadCache models the "ra" line. -type ReadAheadCache struct { - CacheSize uint64 - CacheHistogram []uint64 - NotFound uint64 -} - -// Network models the "net" line. -type Network struct { - NetCount uint64 - UDPCount uint64 - TCPCount uint64 - TCPConnect uint64 -} - -// ClientRPC models the nfs "rpc" line. -type ClientRPC struct { - RPCCount uint64 - Retransmissions uint64 - AuthRefreshes uint64 -} - -// ServerRPC models the nfsd "rpc" line. -type ServerRPC struct { - RPCCount uint64 - BadCnt uint64 - BadFmt uint64 - BadAuth uint64 - BadcInt uint64 -} - -// V2Stats models the "proc2" line. -type V2Stats struct { - Null uint64 - GetAttr uint64 - SetAttr uint64 - Root uint64 - Lookup uint64 - ReadLink uint64 - Read uint64 - WrCache uint64 - Write uint64 - Create uint64 - Remove uint64 - Rename uint64 - Link uint64 - SymLink uint64 - MkDir uint64 - RmDir uint64 - ReadDir uint64 - FsStat uint64 -} - -// V3Stats models the "proc3" line. -type V3Stats struct { - Null uint64 - GetAttr uint64 - SetAttr uint64 - Lookup uint64 - Access uint64 - ReadLink uint64 - Read uint64 - Write uint64 - Create uint64 - MkDir uint64 - SymLink uint64 - MkNod uint64 - Remove uint64 - RmDir uint64 - Rename uint64 - Link uint64 - ReadDir uint64 - ReadDirPlus uint64 - FsStat uint64 - FsInfo uint64 - PathConf uint64 - Commit uint64 -} - -// ClientV4Stats models the nfs "proc4" line. -type ClientV4Stats struct { - Null uint64 - Read uint64 - Write uint64 - Commit uint64 - Open uint64 - OpenConfirm uint64 - OpenNoattr uint64 - OpenDowngrade uint64 - Close uint64 - Setattr uint64 - FsInfo uint64 - Renew uint64 - SetClientID uint64 - SetClientIDConfirm uint64 - Lock uint64 - Lockt uint64 - Locku uint64 - Access uint64 - Getattr uint64 - Lookup uint64 - LookupRoot uint64 - Remove uint64 - Rename uint64 - Link uint64 - Symlink uint64 - Create uint64 - Pathconf uint64 - StatFs uint64 - ReadLink uint64 - ReadDir uint64 - ServerCaps uint64 - DelegReturn uint64 - GetACL uint64 - SetACL uint64 - FsLocations uint64 - ReleaseLockowner uint64 - Secinfo uint64 - FsidPresent uint64 - ExchangeID uint64 - CreateSession uint64 - DestroySession uint64 - Sequence uint64 - GetLeaseTime uint64 - ReclaimComplete uint64 - LayoutGet uint64 - GetDeviceInfo uint64 - LayoutCommit uint64 - LayoutReturn uint64 - SecinfoNoName uint64 - TestStateID uint64 - FreeStateID uint64 - GetDeviceList uint64 - BindConnToSession uint64 - DestroyClientID uint64 - Seek uint64 - Allocate uint64 - DeAllocate uint64 - LayoutStats uint64 - Clone uint64 -} - -// ServerV4Stats models the nfsd "proc4" line. -type ServerV4Stats struct { - Null uint64 - Compound uint64 -} - -// V4Ops models the "proc4ops" line: NFSv4 operations -// Variable list, see: -// v4.0 https://tools.ietf.org/html/rfc3010 (38 operations) -// v4.1 https://tools.ietf.org/html/rfc5661 (58 operations) -// v4.2 https://tools.ietf.org/html/draft-ietf-nfsv4-minorversion2-41 (71 operations) -type V4Ops struct { - //Values uint64 // Variable depending on v4.x sub-version. TODO: Will this always at least include the fields in this struct? - Op0Unused uint64 - Op1Unused uint64 - Op2Future uint64 - Access uint64 - Close uint64 - Commit uint64 - Create uint64 - DelegPurge uint64 - DelegReturn uint64 - GetAttr uint64 - GetFH uint64 - Link uint64 - Lock uint64 - Lockt uint64 - Locku uint64 - Lookup uint64 - LookupRoot uint64 - Nverify uint64 - Open uint64 - OpenAttr uint64 - OpenConfirm uint64 - OpenDgrd uint64 - PutFH uint64 - PutPubFH uint64 - PutRootFH uint64 - Read uint64 - ReadDir uint64 - ReadLink uint64 - Remove uint64 - Rename uint64 - Renew uint64 - RestoreFH uint64 - SaveFH uint64 - SecInfo uint64 - SetAttr uint64 - Verify uint64 - Write uint64 - RelLockOwner uint64 -} - -// ClientRPCStats models all stats from /proc/net/rpc/nfs. -type ClientRPCStats struct { - Network Network - ClientRPC ClientRPC - V2Stats V2Stats - V3Stats V3Stats - ClientV4Stats ClientV4Stats -} - -// ServerRPCStats models all stats from /proc/net/rpc/nfsd. -type ServerRPCStats struct { - ReplyCache ReplyCache - FileHandles FileHandles - InputOutput InputOutput - Threads Threads - ReadAheadCache ReadAheadCache - Network Network - ServerRPC ServerRPC - V2Stats V2Stats - V3Stats V3Stats - ServerV4Stats ServerV4Stats - V4Ops V4Ops -} diff --git a/vendor/github.com/prometheus/procfs/nfs/parse.go b/vendor/github.com/prometheus/procfs/nfs/parse.go deleted file mode 100644 index 95a83cc5..00000000 --- a/vendor/github.com/prometheus/procfs/nfs/parse.go +++ /dev/null @@ -1,317 +0,0 @@ -// Copyright 2018 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package nfs - -import ( - "fmt" -) - -func parseReplyCache(v []uint64) (ReplyCache, error) { - if len(v) != 3 { - return ReplyCache{}, fmt.Errorf("invalid ReplyCache line %q", v) - } - - return ReplyCache{ - Hits: v[0], - Misses: v[1], - NoCache: v[2], - }, nil -} - -func parseFileHandles(v []uint64) (FileHandles, error) { - if len(v) != 5 { - return FileHandles{}, fmt.Errorf("invalid FileHandles, line %q", v) - } - - return FileHandles{ - Stale: v[0], - TotalLookups: v[1], - AnonLookups: v[2], - DirNoCache: v[3], - NoDirNoCache: v[4], - }, nil -} - -func parseInputOutput(v []uint64) (InputOutput, error) { - if len(v) != 2 { - return InputOutput{}, fmt.Errorf("invalid InputOutput line %q", v) - } - - return InputOutput{ - Read: v[0], - Write: v[1], - }, nil -} - -func parseThreads(v []uint64) (Threads, error) { - if len(v) != 2 { - return Threads{}, fmt.Errorf("invalid Threads line %q", v) - } - - return Threads{ - Threads: v[0], - FullCnt: v[1], - }, nil -} - -func parseReadAheadCache(v []uint64) (ReadAheadCache, error) { - if len(v) != 12 { - return ReadAheadCache{}, fmt.Errorf("invalid ReadAheadCache line %q", v) - } - - return ReadAheadCache{ - CacheSize: v[0], - CacheHistogram: v[1:11], - NotFound: v[11], - }, nil -} - -func parseNetwork(v []uint64) (Network, error) { - if len(v) != 4 { - return Network{}, fmt.Errorf("invalid Network line %q", v) - } - - return Network{ - NetCount: v[0], - UDPCount: v[1], - TCPCount: v[2], - TCPConnect: v[3], - }, nil -} - -func parseServerRPC(v []uint64) (ServerRPC, error) { - if len(v) != 5 { - return ServerRPC{}, fmt.Errorf("invalid RPC line %q", v) - } - - return ServerRPC{ - RPCCount: v[0], - BadCnt: v[1], - BadFmt: v[2], - BadAuth: v[3], - BadcInt: v[4], - }, nil -} - -func parseClientRPC(v []uint64) (ClientRPC, error) { - if len(v) != 3 { - return ClientRPC{}, fmt.Errorf("invalid RPC line %q", v) - } - - return ClientRPC{ - RPCCount: v[0], - Retransmissions: v[1], - AuthRefreshes: v[2], - }, nil -} - -func parseV2Stats(v []uint64) (V2Stats, error) { - values := int(v[0]) - if len(v[1:]) != values || values != 18 { - return V2Stats{}, fmt.Errorf("invalid V2Stats line %q", v) - } - - return V2Stats{ - Null: v[1], - GetAttr: v[2], - SetAttr: v[3], - Root: v[4], - Lookup: v[5], - ReadLink: v[6], - Read: v[7], - WrCache: v[8], - Write: v[9], - Create: v[10], - Remove: v[11], - Rename: v[12], - Link: v[13], - SymLink: v[14], - MkDir: v[15], - RmDir: v[16], - ReadDir: v[17], - FsStat: v[18], - }, nil -} - -func parseV3Stats(v []uint64) (V3Stats, error) { - values := int(v[0]) - if len(v[1:]) != values || values != 22 { - return V3Stats{}, fmt.Errorf("invalid V3Stats line %q", v) - } - - return V3Stats{ - Null: v[1], - GetAttr: v[2], - SetAttr: v[3], - Lookup: v[4], - Access: v[5], - ReadLink: v[6], - Read: v[7], - Write: v[8], - Create: v[9], - MkDir: v[10], - SymLink: v[11], - MkNod: v[12], - Remove: v[13], - RmDir: v[14], - Rename: v[15], - Link: v[16], - ReadDir: v[17], - ReadDirPlus: v[18], - FsStat: v[19], - FsInfo: v[20], - PathConf: v[21], - Commit: v[22], - }, nil -} - -func parseClientV4Stats(v []uint64) (ClientV4Stats, error) { - values := int(v[0]) - if len(v[1:]) != values { - return ClientV4Stats{}, fmt.Errorf("invalid ClientV4Stats line %q", v) - } - - // This function currently supports mapping 59 NFS v4 client stats. Older - // kernels may emit fewer stats, so we must detect this and pad out the - // values to match the expected slice size. - if values < 59 { - newValues := make([]uint64, 60) - copy(newValues, v) - v = newValues - } - - return ClientV4Stats{ - Null: v[1], - Read: v[2], - Write: v[3], - Commit: v[4], - Open: v[5], - OpenConfirm: v[6], - OpenNoattr: v[7], - OpenDowngrade: v[8], - Close: v[9], - Setattr: v[10], - FsInfo: v[11], - Renew: v[12], - SetClientID: v[13], - SetClientIDConfirm: v[14], - Lock: v[15], - Lockt: v[16], - Locku: v[17], - Access: v[18], - Getattr: v[19], - Lookup: v[20], - LookupRoot: v[21], - Remove: v[22], - Rename: v[23], - Link: v[24], - Symlink: v[25], - Create: v[26], - Pathconf: v[27], - StatFs: v[28], - ReadLink: v[29], - ReadDir: v[30], - ServerCaps: v[31], - DelegReturn: v[32], - GetACL: v[33], - SetACL: v[34], - FsLocations: v[35], - ReleaseLockowner: v[36], - Secinfo: v[37], - FsidPresent: v[38], - ExchangeID: v[39], - CreateSession: v[40], - DestroySession: v[41], - Sequence: v[42], - GetLeaseTime: v[43], - ReclaimComplete: v[44], - LayoutGet: v[45], - GetDeviceInfo: v[46], - LayoutCommit: v[47], - LayoutReturn: v[48], - SecinfoNoName: v[49], - TestStateID: v[50], - FreeStateID: v[51], - GetDeviceList: v[52], - BindConnToSession: v[53], - DestroyClientID: v[54], - Seek: v[55], - Allocate: v[56], - DeAllocate: v[57], - LayoutStats: v[58], - Clone: v[59], - }, nil -} - -func parseServerV4Stats(v []uint64) (ServerV4Stats, error) { - values := int(v[0]) - if len(v[1:]) != values || values != 2 { - return ServerV4Stats{}, fmt.Errorf("invalid V4Stats line %q", v) - } - - return ServerV4Stats{ - Null: v[1], - Compound: v[2], - }, nil -} - -func parseV4Ops(v []uint64) (V4Ops, error) { - values := int(v[0]) - if len(v[1:]) != values || values < 39 { - return V4Ops{}, fmt.Errorf("invalid V4Ops line %q", v) - } - - stats := V4Ops{ - Op0Unused: v[1], - Op1Unused: v[2], - Op2Future: v[3], - Access: v[4], - Close: v[5], - Commit: v[6], - Create: v[7], - DelegPurge: v[8], - DelegReturn: v[9], - GetAttr: v[10], - GetFH: v[11], - Link: v[12], - Lock: v[13], - Lockt: v[14], - Locku: v[15], - Lookup: v[16], - LookupRoot: v[17], - Nverify: v[18], - Open: v[19], - OpenAttr: v[20], - OpenConfirm: v[21], - OpenDgrd: v[22], - PutFH: v[23], - PutPubFH: v[24], - PutRootFH: v[25], - Read: v[26], - ReadDir: v[27], - ReadLink: v[28], - Remove: v[29], - Rename: v[30], - Renew: v[31], - RestoreFH: v[32], - SaveFH: v[33], - SecInfo: v[34], - SetAttr: v[35], - Verify: v[36], - Write: v[37], - RelLockOwner: v[38], - } - - return stats, nil -} diff --git a/vendor/github.com/prometheus/procfs/nfs/parse_nfs.go b/vendor/github.com/prometheus/procfs/nfs/parse_nfs.go deleted file mode 100644 index c0d3a5ad..00000000 --- a/vendor/github.com/prometheus/procfs/nfs/parse_nfs.go +++ /dev/null @@ -1,67 +0,0 @@ -// Copyright 2018 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package nfs - -import ( - "bufio" - "fmt" - "io" - "strings" - - "github.com/prometheus/procfs/internal/util" -) - -// ParseClientRPCStats returns stats read from /proc/net/rpc/nfs -func ParseClientRPCStats(r io.Reader) (*ClientRPCStats, error) { - stats := &ClientRPCStats{} - - scanner := bufio.NewScanner(r) - for scanner.Scan() { - line := scanner.Text() - parts := strings.Fields(scanner.Text()) - // require at least - if len(parts) < 2 { - return nil, fmt.Errorf("invalid NFS metric line %q", line) - } - - values, err := util.ParseUint64s(parts[1:]) - if err != nil { - return nil, fmt.Errorf("error parsing NFS metric line: %s", err) - } - - switch metricLine := parts[0]; metricLine { - case "net": - stats.Network, err = parseNetwork(values) - case "rpc": - stats.ClientRPC, err = parseClientRPC(values) - case "proc2": - stats.V2Stats, err = parseV2Stats(values) - case "proc3": - stats.V3Stats, err = parseV3Stats(values) - case "proc4": - stats.ClientV4Stats, err = parseClientV4Stats(values) - default: - return nil, fmt.Errorf("unknown NFS metric line %q", metricLine) - } - if err != nil { - return nil, fmt.Errorf("errors parsing NFS metric line: %s", err) - } - } - - if err := scanner.Err(); err != nil { - return nil, fmt.Errorf("error scanning NFS file: %s", err) - } - - return stats, nil -} diff --git a/vendor/github.com/prometheus/procfs/nfs/parse_nfsd.go b/vendor/github.com/prometheus/procfs/nfs/parse_nfsd.go deleted file mode 100644 index 57bb4a35..00000000 --- a/vendor/github.com/prometheus/procfs/nfs/parse_nfsd.go +++ /dev/null @@ -1,89 +0,0 @@ -// Copyright 2018 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package nfs - -import ( - "bufio" - "fmt" - "io" - "strings" - - "github.com/prometheus/procfs/internal/util" -) - -// ParseServerRPCStats returns stats read from /proc/net/rpc/nfsd -func ParseServerRPCStats(r io.Reader) (*ServerRPCStats, error) { - stats := &ServerRPCStats{} - - scanner := bufio.NewScanner(r) - for scanner.Scan() { - line := scanner.Text() - parts := strings.Fields(scanner.Text()) - // require at least - if len(parts) < 2 { - return nil, fmt.Errorf("invalid NFSd metric line %q", line) - } - label := parts[0] - - var values []uint64 - var err error - if label == "th" { - if len(parts) < 3 { - return nil, fmt.Errorf("invalid NFSd th metric line %q", line) - } - values, err = util.ParseUint64s(parts[1:3]) - } else { - values, err = util.ParseUint64s(parts[1:]) - } - if err != nil { - return nil, fmt.Errorf("error parsing NFSd metric line: %s", err) - } - - switch metricLine := parts[0]; metricLine { - case "rc": - stats.ReplyCache, err = parseReplyCache(values) - case "fh": - stats.FileHandles, err = parseFileHandles(values) - case "io": - stats.InputOutput, err = parseInputOutput(values) - case "th": - stats.Threads, err = parseThreads(values) - case "ra": - stats.ReadAheadCache, err = parseReadAheadCache(values) - case "net": - stats.Network, err = parseNetwork(values) - case "rpc": - stats.ServerRPC, err = parseServerRPC(values) - case "proc2": - stats.V2Stats, err = parseV2Stats(values) - case "proc3": - stats.V3Stats, err = parseV3Stats(values) - case "proc4": - stats.ServerV4Stats, err = parseServerV4Stats(values) - case "proc4ops": - stats.V4Ops, err = parseV4Ops(values) - default: - return nil, fmt.Errorf("unknown NFSd metric line %q", metricLine) - } - if err != nil { - return nil, fmt.Errorf("errors parsing NFSd metric line: %s", err) - } - } - - if err := scanner.Err(); err != nil { - return nil, fmt.Errorf("error scanning NFSd file: %s", err) - } - - return stats, nil -} diff --git a/vendor/github.com/prometheus/procfs/proc.go b/vendor/github.com/prometheus/procfs/proc.go index 06bed0ef..8717e1fe 100644 --- a/vendor/github.com/prometheus/procfs/proc.go +++ b/vendor/github.com/prometheus/procfs/proc.go @@ -1,20 +1,6 @@ -// Copyright 2018 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - package procfs import ( - "bytes" "fmt" "io/ioutil" "os" @@ -127,7 +113,7 @@ func (p Proc) CmdLine() ([]string, error) { return []string{}, nil } - return strings.Split(string(bytes.TrimRight(data, string("\x00"))), string(byte(0))), nil + return strings.Split(string(data[:len(data)-1]), string(byte(0))), nil } // Comm returns the command name of a process. @@ -156,26 +142,6 @@ func (p Proc) Executable() (string, error) { return exe, err } -// Cwd returns the absolute path to the current working directory of the process. -func (p Proc) Cwd() (string, error) { - wd, err := os.Readlink(p.path("cwd")) - if os.IsNotExist(err) { - return "", nil - } - - return wd, err -} - -// RootDir returns the absolute path to the process's root directory (as set by chroot) -func (p Proc) RootDir() (string, error) { - rdir, err := os.Readlink(p.path("root")) - if os.IsNotExist(err) { - return "", nil - } - - return rdir, err -} - // FileDescriptors returns the currently open file descriptors of a process. func (p Proc) FileDescriptors() ([]uintptr, error) { names, err := p.fileDescriptors() diff --git a/vendor/github.com/prometheus/procfs/proc_io.go b/vendor/github.com/prometheus/procfs/proc_io.go index 0251c83b..b4e31d7b 100644 --- a/vendor/github.com/prometheus/procfs/proc_io.go +++ b/vendor/github.com/prometheus/procfs/proc_io.go @@ -1,16 +1,3 @@ -// Copyright 2018 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - package procfs import ( @@ -60,6 +47,9 @@ func (p Proc) NewIO() (ProcIO, error) { _, err = fmt.Sscanf(string(data), ioFormat, &pio.RChar, &pio.WChar, &pio.SyscR, &pio.SyscW, &pio.ReadBytes, &pio.WriteBytes, &pio.CancelledWriteBytes) + if err != nil { + return pio, err + } - return pio, err + return pio, nil } diff --git a/vendor/github.com/prometheus/procfs/proc_limits.go b/vendor/github.com/prometheus/procfs/proc_limits.go index f04ba6fd..2df997ce 100644 --- a/vendor/github.com/prometheus/procfs/proc_limits.go +++ b/vendor/github.com/prometheus/procfs/proc_limits.go @@ -1,16 +1,3 @@ -// Copyright 2018 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - package procfs import ( @@ -26,46 +13,46 @@ import ( // http://man7.org/linux/man-pages/man2/getrlimit.2.html. type ProcLimits struct { // CPU time limit in seconds. - CPUTime int64 + CPUTime int // Maximum size of files that the process may create. - FileSize int64 + FileSize int // Maximum size of the process's data segment (initialized data, // uninitialized data, and heap). - DataSize int64 + DataSize int // Maximum size of the process stack in bytes. - StackSize int64 + StackSize int // Maximum size of a core file. - CoreFileSize int64 + CoreFileSize int // Limit of the process's resident set in pages. - ResidentSet int64 + ResidentSet int // Maximum number of processes that can be created for the real user ID of // the calling process. - Processes int64 + Processes int // Value one greater than the maximum file descriptor number that can be // opened by this process. - OpenFiles int64 + OpenFiles int // Maximum number of bytes of memory that may be locked into RAM. - LockedMemory int64 + LockedMemory int // Maximum size of the process's virtual memory address space in bytes. - AddressSpace int64 + AddressSpace int // Limit on the combined number of flock(2) locks and fcntl(2) leases that // this process may establish. - FileLocks int64 + FileLocks int // Limit of signals that may be queued for the real user ID of the calling // process. - PendingSignals int64 + PendingSignals int // Limit on the number of bytes that can be allocated for POSIX message // queues for the real user ID of the calling process. - MsqqueueSize int64 + MsqqueueSize int // Limit of the nice priority set using setpriority(2) or nice(2). - NicePriority int64 + NicePriority int // Limit of the real-time priority set using sched_setscheduler(2) or // sched_setparam(2). - RealtimePriority int64 + RealtimePriority int // Limit (in microseconds) on the amount of CPU time that a process // scheduled under a real-time scheduling policy may consume without making // a blocking system call. - RealtimeTimeout int64 + RealtimeTimeout int } const ( @@ -138,13 +125,13 @@ func (p Proc) NewLimits() (ProcLimits, error) { return l, s.Err() } -func parseInt(s string) (int64, error) { +func parseInt(s string) (int, error) { if s == limitsUnlimited { return -1, nil } - i, err := strconv.ParseInt(s, 10, 64) + i, err := strconv.ParseInt(s, 10, 32) if err != nil { return 0, fmt.Errorf("couldn't parse value %s: %s", s, err) } - return i, nil + return int(i), nil } diff --git a/vendor/github.com/prometheus/procfs/proc_ns.go b/vendor/github.com/prometheus/procfs/proc_ns.go deleted file mode 100644 index d06c26eb..00000000 --- a/vendor/github.com/prometheus/procfs/proc_ns.go +++ /dev/null @@ -1,68 +0,0 @@ -// Copyright 2018 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package procfs - -import ( - "fmt" - "os" - "strconv" - "strings" -) - -// Namespace represents a single namespace of a process. -type Namespace struct { - Type string // Namespace type. - Inode uint32 // Inode number of the namespace. If two processes are in the same namespace their inodes will match. -} - -// Namespaces contains all of the namespaces that the process is contained in. -type Namespaces map[string]Namespace - -// NewNamespaces reads from /proc/[pid/ns/* to get the namespaces of which the -// process is a member. -func (p Proc) NewNamespaces() (Namespaces, error) { - d, err := os.Open(p.path("ns")) - if err != nil { - return nil, err - } - defer d.Close() - - names, err := d.Readdirnames(-1) - if err != nil { - return nil, fmt.Errorf("failed to read contents of ns dir: %v", err) - } - - ns := make(Namespaces, len(names)) - for _, name := range names { - target, err := os.Readlink(p.path("ns", name)) - if err != nil { - return nil, err - } - - fields := strings.SplitN(target, ":", 2) - if len(fields) != 2 { - return nil, fmt.Errorf("failed to parse namespace type and inode from '%v'", target) - } - - typ := fields[0] - inode, err := strconv.ParseUint(strings.Trim(fields[1], "[]"), 10, 32) - if err != nil { - return nil, fmt.Errorf("failed to parse inode from '%v': %v", fields[1], err) - } - - ns[name] = Namespace{typ, uint32(inode)} - } - - return ns, nil -} diff --git a/vendor/github.com/prometheus/procfs/proc_stat.go b/vendor/github.com/prometheus/procfs/proc_stat.go index 3cf2a9f1..724e271b 100644 --- a/vendor/github.com/prometheus/procfs/proc_stat.go +++ b/vendor/github.com/prometheus/procfs/proc_stat.go @@ -1,16 +1,3 @@ -// Copyright 2018 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - package procfs import ( diff --git a/vendor/github.com/prometheus/procfs/stat.go b/vendor/github.com/prometheus/procfs/stat.go index 61eb6b0e..1ca217e8 100644 --- a/vendor/github.com/prometheus/procfs/stat.go +++ b/vendor/github.com/prometheus/procfs/stat.go @@ -1,81 +1,17 @@ -// Copyright 2018 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - package procfs import ( "bufio" "fmt" - "io" "os" "strconv" "strings" ) -// CPUStat shows how much time the cpu spend in various stages. -type CPUStat struct { - User float64 - Nice float64 - System float64 - Idle float64 - Iowait float64 - IRQ float64 - SoftIRQ float64 - Steal float64 - Guest float64 - GuestNice float64 -} - -// SoftIRQStat represent the softirq statistics as exported in the procfs stat file. -// A nice introduction can be found at https://0xax.gitbooks.io/linux-insides/content/interrupts/interrupts-9.html -// It is possible to get per-cpu stats by reading /proc/softirqs -type SoftIRQStat struct { - Hi uint64 - Timer uint64 - NetTx uint64 - NetRx uint64 - Block uint64 - BlockIoPoll uint64 - Tasklet uint64 - Sched uint64 - Hrtimer uint64 - Rcu uint64 -} - // Stat represents kernel/system statistics. type Stat struct { // Boot time in seconds since the Epoch. - BootTime uint64 - // Summed up cpu statistics. - CPUTotal CPUStat - // Per-CPU statistics. - CPU []CPUStat - // Number of times interrupts were handled, which contains numbered and unnumbered IRQs. - IRQTotal uint64 - // Number of times a numbered IRQ was triggered. - IRQ []uint64 - // Number of times a context switch happened. - ContextSwitches uint64 - // Number of times a process was created. - ProcessCreated uint64 - // Number of processes currently running. - ProcessesRunning uint64 - // Number of processes currently blocked (waiting for IO). - ProcessesBlocked uint64 - // Number of times a softirq was scheduled. - SoftIRQTotal uint64 - // Detailed softirq statistics. - SoftIRQ SoftIRQStat + BootTime int64 } // NewStat returns kernel/system statistics read from /proc/stat. @@ -88,145 +24,33 @@ func NewStat() (Stat, error) { return fs.NewStat() } -// Parse a cpu statistics line and returns the CPUStat struct plus the cpu id (or -1 for the overall sum). -func parseCPUStat(line string) (CPUStat, int64, error) { - cpuStat := CPUStat{} - var cpu string - - count, err := fmt.Sscanf(line, "%s %f %f %f %f %f %f %f %f %f %f", - &cpu, - &cpuStat.User, &cpuStat.Nice, &cpuStat.System, &cpuStat.Idle, - &cpuStat.Iowait, &cpuStat.IRQ, &cpuStat.SoftIRQ, &cpuStat.Steal, - &cpuStat.Guest, &cpuStat.GuestNice) - - if err != nil && err != io.EOF { - return CPUStat{}, -1, fmt.Errorf("couldn't parse %s (cpu): %s", line, err) - } - if count == 0 { - return CPUStat{}, -1, fmt.Errorf("couldn't parse %s (cpu): 0 elements parsed", line) - } - - cpuStat.User /= userHZ - cpuStat.Nice /= userHZ - cpuStat.System /= userHZ - cpuStat.Idle /= userHZ - cpuStat.Iowait /= userHZ - cpuStat.IRQ /= userHZ - cpuStat.SoftIRQ /= userHZ - cpuStat.Steal /= userHZ - cpuStat.Guest /= userHZ - cpuStat.GuestNice /= userHZ - - if cpu == "cpu" { - return cpuStat, -1, nil - } - - cpuID, err := strconv.ParseInt(cpu[3:], 10, 64) - if err != nil { - return CPUStat{}, -1, fmt.Errorf("couldn't parse %s (cpu/cpuid): %s", line, err) - } - - return cpuStat, cpuID, nil -} - -// Parse a softirq line. -func parseSoftIRQStat(line string) (SoftIRQStat, uint64, error) { - softIRQStat := SoftIRQStat{} - var total uint64 - var prefix string - - _, err := fmt.Sscanf(line, "%s %d %d %d %d %d %d %d %d %d %d %d", - &prefix, &total, - &softIRQStat.Hi, &softIRQStat.Timer, &softIRQStat.NetTx, &softIRQStat.NetRx, - &softIRQStat.Block, &softIRQStat.BlockIoPoll, - &softIRQStat.Tasklet, &softIRQStat.Sched, - &softIRQStat.Hrtimer, &softIRQStat.Rcu) - - if err != nil { - return SoftIRQStat{}, 0, fmt.Errorf("couldn't parse %s (softirq): %s", line, err) - } - - return softIRQStat, total, nil -} - // NewStat returns an information about current kernel/system statistics. func (fs FS) NewStat() (Stat, error) { - // See https://www.kernel.org/doc/Documentation/filesystems/proc.txt - f, err := os.Open(fs.Path("stat")) if err != nil { return Stat{}, err } defer f.Close() - stat := Stat{} - - scanner := bufio.NewScanner(f) - for scanner.Scan() { - line := scanner.Text() - parts := strings.Fields(scanner.Text()) - // require at least - if len(parts) < 2 { + s := bufio.NewScanner(f) + for s.Scan() { + line := s.Text() + if !strings.HasPrefix(line, "btime") { continue } - switch { - case parts[0] == "btime": - if stat.BootTime, err = strconv.ParseUint(parts[1], 10, 64); err != nil { - return Stat{}, fmt.Errorf("couldn't parse %s (btime): %s", parts[1], err) - } - case parts[0] == "intr": - if stat.IRQTotal, err = strconv.ParseUint(parts[1], 10, 64); err != nil { - return Stat{}, fmt.Errorf("couldn't parse %s (intr): %s", parts[1], err) - } - numberedIRQs := parts[2:] - stat.IRQ = make([]uint64, len(numberedIRQs)) - for i, count := range numberedIRQs { - if stat.IRQ[i], err = strconv.ParseUint(count, 10, 64); err != nil { - return Stat{}, fmt.Errorf("couldn't parse %s (intr%d): %s", count, i, err) - } - } - case parts[0] == "ctxt": - if stat.ContextSwitches, err = strconv.ParseUint(parts[1], 10, 64); err != nil { - return Stat{}, fmt.Errorf("couldn't parse %s (ctxt): %s", parts[1], err) - } - case parts[0] == "processes": - if stat.ProcessCreated, err = strconv.ParseUint(parts[1], 10, 64); err != nil { - return Stat{}, fmt.Errorf("couldn't parse %s (processes): %s", parts[1], err) - } - case parts[0] == "procs_running": - if stat.ProcessesRunning, err = strconv.ParseUint(parts[1], 10, 64); err != nil { - return Stat{}, fmt.Errorf("couldn't parse %s (procs_running): %s", parts[1], err) - } - case parts[0] == "procs_blocked": - if stat.ProcessesBlocked, err = strconv.ParseUint(parts[1], 10, 64); err != nil { - return Stat{}, fmt.Errorf("couldn't parse %s (procs_blocked): %s", parts[1], err) - } - case parts[0] == "softirq": - softIRQStats, total, err := parseSoftIRQStat(line) - if err != nil { - return Stat{}, err - } - stat.SoftIRQTotal = total - stat.SoftIRQ = softIRQStats - case strings.HasPrefix(parts[0], "cpu"): - cpuStat, cpuID, err := parseCPUStat(line) - if err != nil { - return Stat{}, err - } - if cpuID == -1 { - stat.CPUTotal = cpuStat - } else { - for int64(len(stat.CPU)) <= cpuID { - stat.CPU = append(stat.CPU, CPUStat{}) - } - stat.CPU[cpuID] = cpuStat - } + fields := strings.Fields(line) + if len(fields) != 2 { + return Stat{}, fmt.Errorf("couldn't parse %s line %s", f.Name(), line) } + i, err := strconv.ParseInt(fields[1], 10, 32) + if err != nil { + return Stat{}, fmt.Errorf("couldn't parse %s: %s", fields[1], err) + } + return Stat{BootTime: i}, nil } - - if err := scanner.Err(); err != nil { + if err := s.Err(); err != nil { return Stat{}, fmt.Errorf("couldn't parse %s: %s", f.Name(), err) } - return stat, nil + return Stat{}, fmt.Errorf("couldn't parse %s, missing btime", f.Name()) } diff --git a/vendor/github.com/prometheus/procfs/ttar b/vendor/github.com/prometheus/procfs/ttar deleted file mode 100644 index b0171a12..00000000 --- a/vendor/github.com/prometheus/procfs/ttar +++ /dev/null @@ -1,389 +0,0 @@ -#!/usr/bin/env bash - -# Purpose: plain text tar format -# Limitations: - only suitable for text files, directories, and symlinks -# - stores only filename, content, and mode -# - not designed for untrusted input -# -# Note: must work with bash version 3.2 (macOS) - -# Copyright 2017 Roger Luethi -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -set -o errexit -o nounset - -# Sanitize environment (for instance, standard sorting of glob matches) -export LC_ALL=C - -path="" -CMD="" -ARG_STRING="$*" - -#------------------------------------------------------------------------------ -# Not all sed implementations can work on null bytes. In order to make ttar -# work out of the box on macOS, use Python as a stream editor. - -USE_PYTHON=0 - -PYTHON_CREATE_FILTER=$(cat << 'PCF' -#!/usr/bin/env python - -import re -import sys - -for line in sys.stdin: - line = re.sub(r'EOF', r'\EOF', line) - line = re.sub(r'NULLBYTE', r'\NULLBYTE', line) - line = re.sub('\x00', r'NULLBYTE', line) - sys.stdout.write(line) -PCF -) - -PYTHON_EXTRACT_FILTER=$(cat << 'PEF' -#!/usr/bin/env python - -import re -import sys - -for line in sys.stdin: - line = re.sub(r'(?/dev/null; then - echo "ERROR Python not found. Aborting." - exit 2 - fi - USE_PYTHON=1 - fi -} - -#------------------------------------------------------------------------------ - -function usage { - bname=$(basename "$0") - cat << USAGE -Usage: $bname [-C ] -c -f (create archive) - $bname -t -f (list archive contents) - $bname [-C ] -x -f (extract archive) - -Options: - -C (change directory) - -v (verbose) - -Example: Change to sysfs directory, create ttar file from fixtures directory - $bname -C sysfs -c -f sysfs/fixtures.ttar fixtures/ -USAGE -exit "$1" -} - -function vecho { - if [ "${VERBOSE:-}" == "yes" ]; then - echo >&7 "$@" - fi -} - -function set_cmd { - if [ -n "$CMD" ]; then - echo "ERROR: more than one command given" - echo - usage 2 - fi - CMD=$1 -} - -unset VERBOSE - -while getopts :cf:htxvC: opt; do - case $opt in - c) - set_cmd "create" - ;; - f) - ARCHIVE=$OPTARG - ;; - h) - usage 0 - ;; - t) - set_cmd "list" - ;; - x) - set_cmd "extract" - ;; - v) - VERBOSE=yes - exec 7>&1 - ;; - C) - CDIR=$OPTARG - ;; - *) - echo >&2 "ERROR: invalid option -$OPTARG" - echo - usage 1 - ;; - esac -done - -# Remove processed options from arguments -shift $(( OPTIND - 1 )); - -if [ "${CMD:-}" == "" ]; then - echo >&2 "ERROR: no command given" - echo - usage 1 -elif [ "${ARCHIVE:-}" == "" ]; then - echo >&2 "ERROR: no archive name given" - echo - usage 1 -fi - -function list { - local path="" - local size=0 - local line_no=0 - local ttar_file=$1 - if [ -n "${2:-}" ]; then - echo >&2 "ERROR: too many arguments." - echo - usage 1 - fi - if [ ! -e "$ttar_file" ]; then - echo >&2 "ERROR: file not found ($ttar_file)" - echo - usage 1 - fi - while read -r line; do - line_no=$(( line_no + 1 )) - if [ $size -gt 0 ]; then - size=$(( size - 1 )) - continue - fi - if [[ $line =~ ^Path:\ (.*)$ ]]; then - path=${BASH_REMATCH[1]} - elif [[ $line =~ ^Lines:\ (.*)$ ]]; then - size=${BASH_REMATCH[1]} - echo "$path" - elif [[ $line =~ ^Directory:\ (.*)$ ]]; then - path=${BASH_REMATCH[1]} - echo "$path/" - elif [[ $line =~ ^SymlinkTo:\ (.*)$ ]]; then - echo "$path -> ${BASH_REMATCH[1]}" - fi - done < "$ttar_file" -} - -function extract { - local path="" - local size=0 - local line_no=0 - local ttar_file=$1 - if [ -n "${2:-}" ]; then - echo >&2 "ERROR: too many arguments." - echo - usage 1 - fi - if [ ! -e "$ttar_file" ]; then - echo >&2 "ERROR: file not found ($ttar_file)" - echo - usage 1 - fi - while IFS= read -r line; do - line_no=$(( line_no + 1 )) - local eof_without_newline - if [ "$size" -gt 0 ]; then - if [[ "$line" =~ [^\\]EOF ]]; then - # An EOF not preceeded by a backslash indicates that the line - # does not end with a newline - eof_without_newline=1 - else - eof_without_newline=0 - fi - # Replace NULLBYTE with null byte if at beginning of line - # Replace NULLBYTE with null byte unless preceeded by backslash - # Remove one backslash in front of NULLBYTE (if any) - # Remove EOF unless preceeded by backslash - # Remove one backslash in front of EOF - if [ $USE_PYTHON -eq 1 ]; then - echo -n "$line" | python -c "$PYTHON_EXTRACT_FILTER" >> "$path" - else - # The repeated pattern makes up for sed's lack of negative - # lookbehind assertions (for consecutive null bytes). - echo -n "$line" | \ - sed -e 's/^NULLBYTE/\x0/g; - s/\([^\\]\)NULLBYTE/\1\x0/g; - s/\([^\\]\)NULLBYTE/\1\x0/g; - s/\\NULLBYTE/NULLBYTE/g; - s/\([^\\]\)EOF/\1/g; - s/\\EOF/EOF/g; - ' >> "$path" - fi - if [[ "$eof_without_newline" -eq 0 ]]; then - echo >> "$path" - fi - size=$(( size - 1 )) - continue - fi - if [[ $line =~ ^Path:\ (.*)$ ]]; then - path=${BASH_REMATCH[1]} - if [ -e "$path" ] || [ -L "$path" ]; then - rm "$path" - fi - elif [[ $line =~ ^Lines:\ (.*)$ ]]; then - size=${BASH_REMATCH[1]} - # Create file even if it is zero-length. - touch "$path" - vecho " $path" - elif [[ $line =~ ^Mode:\ (.*)$ ]]; then - mode=${BASH_REMATCH[1]} - chmod "$mode" "$path" - vecho "$mode" - elif [[ $line =~ ^Directory:\ (.*)$ ]]; then - path=${BASH_REMATCH[1]} - mkdir -p "$path" - vecho " $path/" - elif [[ $line =~ ^SymlinkTo:\ (.*)$ ]]; then - ln -s "${BASH_REMATCH[1]}" "$path" - vecho " $path -> ${BASH_REMATCH[1]}" - elif [[ $line =~ ^# ]]; then - # Ignore comments between files - continue - else - echo >&2 "ERROR: Unknown keyword on line $line_no: $line" - exit 1 - fi - done < "$ttar_file" -} - -function div { - echo "# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -" \ - "- - - - - -" -} - -function get_mode { - local mfile=$1 - if [ -z "${STAT_OPTION:-}" ]; then - if stat -c '%a' "$mfile" >/dev/null 2>&1; then - # GNU stat - STAT_OPTION='-c' - STAT_FORMAT='%a' - else - # BSD stat - STAT_OPTION='-f' - # Octal output, user/group/other (omit file type, sticky bit) - STAT_FORMAT='%OLp' - fi - fi - stat "${STAT_OPTION}" "${STAT_FORMAT}" "$mfile" -} - -function _create { - shopt -s nullglob - local mode - local eof_without_newline - while (( "$#" )); do - file=$1 - if [ -L "$file" ]; then - echo "Path: $file" - symlinkTo=$(readlink "$file") - echo "SymlinkTo: $symlinkTo" - vecho " $file -> $symlinkTo" - div - elif [ -d "$file" ]; then - # Strip trailing slash (if there is one) - file=${file%/} - echo "Directory: $file" - mode=$(get_mode "$file") - echo "Mode: $mode" - vecho "$mode $file/" - div - # Find all files and dirs, including hidden/dot files - for x in "$file/"{*,.[^.]*}; do - _create "$x" - done - elif [ -f "$file" ]; then - echo "Path: $file" - lines=$(wc -l "$file"|awk '{print $1}') - eof_without_newline=0 - if [[ "$(wc -c "$file"|awk '{print $1}')" -gt 0 ]] && \ - [[ "$(tail -c 1 "$file" | wc -l)" -eq 0 ]]; then - eof_without_newline=1 - lines=$((lines+1)) - fi - echo "Lines: $lines" - # Add backslash in front of EOF - # Add backslash in front of NULLBYTE - # Replace null byte with NULLBYTE - if [ $USE_PYTHON -eq 1 ]; then - < "$file" python -c "$PYTHON_CREATE_FILTER" - else - < "$file" \ - sed 's/EOF/\\EOF/g; - s/NULLBYTE/\\NULLBYTE/g; - s/\x0/NULLBYTE/g; - ' - fi - if [[ "$eof_without_newline" -eq 1 ]]; then - # Finish line with EOF to indicate that the original line did - # not end with a linefeed - echo "EOF" - fi - mode=$(get_mode "$file") - echo "Mode: $mode" - vecho "$mode $file" - div - else - echo >&2 "ERROR: file not found ($file in $(pwd))" - exit 2 - fi - shift - done -} - -function create { - ttar_file=$1 - shift - if [ -z "${1:-}" ]; then - echo >&2 "ERROR: missing arguments." - echo - usage 1 - fi - if [ -e "$ttar_file" ]; then - rm "$ttar_file" - fi - exec > "$ttar_file" - echo "# Archive created by ttar $ARG_STRING" - _create "$@" -} - -test_environment - -if [ -n "${CDIR:-}" ]; then - if [[ "$ARCHIVE" != /* ]]; then - # Relative path: preserve the archive's location before changing - # directory - ARCHIVE="$(pwd)/$ARCHIVE" - fi - cd "$CDIR" -fi - -"$CMD" "$ARCHIVE" "$@" diff --git a/vendor/github.com/prometheus/procfs/xfrm.go b/vendor/github.com/prometheus/procfs/xfrm.go index 8f1508f0..ffe9df50 100644 --- a/vendor/github.com/prometheus/procfs/xfrm.go +++ b/vendor/github.com/prometheus/procfs/xfrm.go @@ -113,7 +113,7 @@ func (fs FS) NewXfrmStat() (XfrmStat, error) { if len(fields) != 2 { return XfrmStat{}, fmt.Errorf( - "couldn't parse %s line %s", file.Name(), s.Text()) + "couldnt parse %s line %s", file.Name(), s.Text()) } name := fields[0] diff --git a/vendor/github.com/prometheus/procfs/xfs/parse.go b/vendor/github.com/prometheus/procfs/xfs/parse.go index 2bc0ef34..c8f6279f 100644 --- a/vendor/github.com/prometheus/procfs/xfs/parse.go +++ b/vendor/github.com/prometheus/procfs/xfs/parse.go @@ -17,9 +17,8 @@ import ( "bufio" "fmt" "io" + "strconv" "strings" - - "github.com/prometheus/procfs/internal/util" ) // ParseStats parses a Stats from an input io.Reader, using the format @@ -69,7 +68,7 @@ func ParseStats(r io.Reader) (*Stats, error) { // Extended precision counters are uint64 values. if label == fieldXpc { - us, err := util.ParseUint64s(ss[1:]) + us, err := parseUint64s(ss[1:]) if err != nil { return nil, err } @@ -83,7 +82,7 @@ func ParseStats(r io.Reader) (*Stats, error) { } // All other counters are uint32 values. - us, err := util.ParseUint32s(ss[1:]) + us, err := parseUint32s(ss[1:]) if err != nil { return nil, err } @@ -328,3 +327,33 @@ func extendedPrecisionStats(us []uint64) (ExtendedPrecisionStats, error) { ReadBytes: us[2], }, nil } + +// parseUint32s parses a slice of strings into a slice of uint32s. +func parseUint32s(ss []string) ([]uint32, error) { + us := make([]uint32, 0, len(ss)) + for _, s := range ss { + u, err := strconv.ParseUint(s, 10, 32) + if err != nil { + return nil, err + } + + us = append(us, uint32(u)) + } + + return us, nil +} + +// parseUint64s parses a slice of strings into a slice of uint64s. +func parseUint64s(ss []string) ([]uint64, error) { + us := make([]uint64, 0, len(ss)) + for _, s := range ss { + u, err := strconv.ParseUint(s, 10, 64) + if err != nil { + return nil, err + } + + us = append(us, u) + } + + return us, nil +} diff --git a/vendor/github.com/rancher/norman/.dockerignore b/vendor/github.com/rancher/norman/.dockerignore new file mode 100644 index 00000000..2e7d9023 --- /dev/null +++ b/vendor/github.com/rancher/norman/.dockerignore @@ -0,0 +1,5 @@ +./bin +./.dapper +./dist +./.trash-cache +./.idea diff --git a/vendor/github.com/rancher/norman/.drone.yml b/vendor/github.com/rancher/norman/.drone.yml new file mode 100644 index 00000000..8f5c37cd --- /dev/null +++ b/vendor/github.com/rancher/norman/.drone.yml @@ -0,0 +1,9 @@ +--- +pipeline: + build: + privileged: true + image: rancher/dapper:1.11.2 + volumes: + - /var/run/docker.sock:/var/run/docker.sock + commands: + - dapper ci diff --git a/vendor/github.com/rancher/norman/.gitignore b/vendor/github.com/rancher/norman/.gitignore new file mode 100644 index 00000000..a10e060c --- /dev/null +++ b/vendor/github.com/rancher/norman/.gitignore @@ -0,0 +1,7 @@ +/.idea +/.dapper +/bin +/dist +*.swp +/.trash-cache +/trash.lock diff --git a/vendor/github.com/rancher/norman/Dockerfile.dapper b/vendor/github.com/rancher/norman/Dockerfile.dapper new file mode 100644 index 00000000..3cc956b8 --- /dev/null +++ b/vendor/github.com/rancher/norman/Dockerfile.dapper @@ -0,0 +1,16 @@ +FROM golang:1.11-alpine + +RUN apk -U add bash git gcc musl-dev docker +RUN go get -d golang.org/x/lint/golint && \ + git -C /go/src/golang.org/x/lint/golint checkout -b current 06c8688daad7faa9da5a0c2f163a3d14aac986ca && \ + go install golang.org/x/lint/golint && \ + rm -rf /go/src /go/pkg + +ENV DAPPER_SOURCE /go/src/github.com/rancher/norman/ +ENV DAPPER_OUTPUT ./bin ./dist +ENV DAPPER_DOCKER_SOCKET true +ENV HOME ${DAPPER_SOURCE} +WORKDIR ${DAPPER_SOURCE} + +ENTRYPOINT ["./scripts/entry"] +CMD ["ci"] diff --git a/vendor/github.com/rancher/norman/Makefile b/vendor/github.com/rancher/norman/Makefile new file mode 100644 index 00000000..78839b9b --- /dev/null +++ b/vendor/github.com/rancher/norman/Makefile @@ -0,0 +1,15 @@ +TARGETS := $(shell ls scripts) + +.dapper: + @echo Downloading dapper + @curl -sL https://releases.rancher.com/dapper/latest/dapper-`uname -s`-`uname -m` > .dapper.tmp + @@chmod +x .dapper.tmp + @./.dapper.tmp -v + @mv .dapper.tmp .dapper + +$(TARGETS): .dapper + ./.dapper $@ + +.DEFAULT_GOAL := ci + +.PHONY: $(TARGETS) diff --git a/vendor/github.com/rancher/norman/README.md b/vendor/github.com/rancher/norman/README.md new file mode 100644 index 00000000..2b6e8733 --- /dev/null +++ b/vendor/github.com/rancher/norman/README.md @@ -0,0 +1,78 @@ +Norman +======== + +An API framework for Building [Rancher Style APIs](https://github.com/rancher/api-spec/) backed by K8s CustomResources. + +## Building + +`make` + +## Example + +Refer to `examples/` + +```go +package main + +import ( + "context" + "fmt" + "net/http" + "os" + + "github.com/rancher/norman/generator" + "github.com/rancher/norman/server" + "github.com/rancher/norman/types" +) + +type Foo struct { + types.Resource + Name string `json:"name"` + Foo string `json:"foo"` + SubThing Baz `json:"subThing"` +} + +type Baz struct { + Name string `json:"name"` +} + +var ( + version = types.APIVersion{ + Version: "v1", + Group: "io.cattle.core.example", + Path: "/example/v1", + } + + Schemas = types.NewSchemas() +) + +func main() { + if _, err := Schemas.Import(&version, Foo{}); err != nil { + panic(err) + } + + server, err := server.NewAPIServer(context.Background(), os.Getenv("KUBECONFIG"), Schemas) + if err != nil { + panic(err) + } + + fmt.Println("Listening on 0.0.0.0:1234") + http.ListenAndServe("0.0.0.0:1234", server) +} +``` + + +## License +Copyright (c) 2014-2017 [Rancher Labs, Inc.](http://rancher.com) + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +[http://www.apache.org/licenses/LICENSE-2.0](http://www.apache.org/licenses/LICENSE-2.0) + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/vendor/github.com/rancher/norman/controller/generic_controller.go b/vendor/github.com/rancher/norman/controller/generic_controller.go index 32efe1ee..d1b215fe 100644 --- a/vendor/github.com/rancher/norman/controller/generic_controller.go +++ b/vendor/github.com/rancher/norman/controller/generic_controller.go @@ -44,6 +44,7 @@ func init() { type HandlerFunc func(key string) error type GenericController interface { + SetThreadinessOverride(count int) Informer() cache.SharedIndexInformer AddHandler(name string, handler HandlerFunc) HandlerCount() int @@ -65,12 +66,13 @@ type handlerDef struct { type genericController struct { sync.Mutex - informer cache.SharedIndexInformer - handlers []handlerDef - queue workqueue.RateLimitingInterface - name string - running bool - synced bool + threadinessOverride int + informer cache.SharedIndexInformer + handlers []handlerDef + queue workqueue.RateLimitingInterface + name string + running bool + synced bool } func NewGenericController(name string, genericClient Backend) GenericController { @@ -94,6 +96,10 @@ func NewGenericController(name string, genericClient Backend) GenericController } } +func (g *genericController) SetThreadinessOverride(count int) { + g.threadinessOverride = count +} + func (g *genericController) HandlerCount() int { return len(g.handlers) } @@ -163,6 +169,9 @@ func (g *genericController) Start(ctx context.Context, threadiness int) error { } if !g.running { + if g.threadinessOverride > 0 { + threadiness = g.threadinessOverride + } go g.run(ctx, threadiness) } diff --git a/vendor/github.com/rancher/norman/event/logger.go b/vendor/github.com/rancher/norman/event/logger.go deleted file mode 100644 index 65488315..00000000 --- a/vendor/github.com/rancher/norman/event/logger.go +++ /dev/null @@ -1,39 +0,0 @@ -package event - -import ( - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/client-go/tools/record" -) - -type Logger interface { - Info(obj runtime.Object, message string) - Infof(obj runtime.Object, messagefmt string, args ...interface{}) - Error(obj runtime.Object, message string) - Errorf(obj runtime.Object, messagefmt string, args ...interface{}) -} - -type logger struct { - recorder record.EventRecorder -} - -func (l *logger) Info(obj runtime.Object, message string) { - //l.recorder.Event(obj, "Normal", "Message", message) -} - -func (l *logger) Infof(obj runtime.Object, messagefmt string, args ...interface{}) { - //l.recorder.Eventf(obj, "Normal", "Message", messagefmt, args...) -} - -func (l *logger) Error(obj runtime.Object, message string) { - //l.recorder.Event(obj, "Warning", "Message", message) -} - -func (l *logger) Errorf(obj runtime.Object, messagefmt string, args ...interface{}) { - //l.recorder.Eventf(obj, "Warning", "Message", messagefmt, args...) -} - -func NewLogger(recorder record.EventRecorder) Logger { - return &logger{ - recorder: recorder, - } -} diff --git a/vendor/github.com/rancher/norman/generator/generator.go b/vendor/github.com/rancher/norman/generator/generator.go index 27df2039..9e510f71 100644 --- a/vendor/github.com/rancher/norman/generator/generator.go +++ b/vendor/github.com/rancher/norman/generator/generator.go @@ -2,7 +2,6 @@ package generator import ( "fmt" - "io" "io/ioutil" "net/http" "os" @@ -504,14 +503,6 @@ func deepCopyGen(workDir, pkg string) error { }) } -type noInitGenerator struct { - generator.Generator -} - -func (n *noInitGenerator) Init(*generator.Context, io.Writer) error { - return nil -} - func isObjectOrList(t *gengotypes.Type) bool { for _, member := range t.Members { if member.Embedded && (member.Name == "ObjectMeta" || member.Name == "ListMeta") { diff --git a/vendor/github.com/rancher/norman/generator/k8s_client_template.go b/vendor/github.com/rancher/norman/generator/k8s_client_template.go index 05311829..548b35c4 100644 --- a/vendor/github.com/rancher/norman/generator/k8s_client_template.go +++ b/vendor/github.com/rancher/norman/generator/k8s_client_template.go @@ -13,6 +13,8 @@ import ( "k8s.io/client-go/rest" ) +type contextKeyType struct{} + type Interface interface { RESTClient() rest.Interface controller.Starter @@ -28,6 +30,20 @@ type Client struct { {{.ID}}Controllers map[string]{{.CodeName}}Controller{{end}} } +func Factory(ctx context.Context, config rest.Config) (context.Context, controller.Starter, error) { + c, err := NewForConfig(config) + if err != nil { + return ctx, nil, err + } + + return context.WithValue(ctx, contextKeyType{}, c), c, nil +} + +func From(ctx context.Context) Interface { + return ctx.Value(contextKeyType{}).(Interface) +} + + func NewForConfig(config rest.Config) (Interface, error) { if config.NegotiatedSerializer == nil { config.NegotiatedSerializer = dynamic.NegotiatedSerializer diff --git a/vendor/github.com/rancher/norman/lifecycle/object.go b/vendor/github.com/rancher/norman/lifecycle/object.go index 120f341f..211fac63 100644 --- a/vendor/github.com/rancher/norman/lifecycle/object.go +++ b/vendor/github.com/rancher/norman/lifecycle/object.go @@ -97,22 +97,22 @@ func (o *objectLifecycleAdapter) finalize(metadata metav1.Object, obj runtime.Ob } func (o *objectLifecycleAdapter) removeFinalizer(name string, obj runtime.Object) error { - metadata, err := meta.Accessor(obj) - if err != nil { - return err - } - - var finalizers []string - for _, finalizer := range metadata.GetFinalizers() { - if finalizer == name { - continue - } - finalizers = append(finalizers, finalizer) - } - metadata.SetFinalizers(finalizers) - for i := 0; i < 3; i++ { - _, err := o.objectClient.Update(metadata.GetName(), obj) + metadata, err := meta.Accessor(obj) + if err != nil { + return err + } + + var finalizers []string + for _, finalizer := range metadata.GetFinalizers() { + if finalizer == name { + continue + } + finalizers = append(finalizers, finalizer) + } + metadata.SetFinalizers(finalizers) + + _, err = o.objectClient.Update(metadata.GetName(), obj) if err == nil { return nil } @@ -121,16 +121,9 @@ func (o *objectLifecycleAdapter) removeFinalizer(name string, obj runtime.Object if err != nil { return err } - - metadata, err := meta.Accessor(obj) - if err != nil { - return err - } - - metadata.SetFinalizers(finalizers) } - return fmt.Errorf("failed to remove finalizer on %s:%s", metadata.GetNamespace(), metadata.GetName()) + return fmt.Errorf("failed to remove finalizer on %s", name) } func (o *objectLifecycleAdapter) createKey() string { diff --git a/vendor/github.com/rancher/norman/vendor.conf b/vendor/github.com/rancher/norman/vendor.conf new file mode 100644 index 00000000..dc59fb25 --- /dev/null +++ b/vendor/github.com/rancher/norman/vendor.conf @@ -0,0 +1,8 @@ +# package +github.com/rancher/norman + +k8s.io/kubernetes v1.12.1-lite5 https://github.com/ibuildthecloud/k3s.git transitive=true,staging=true +github.com/maruel/panicparse c0182c169410cfa80c7e8f046dad208eaef91338 +bitbucket.org/ww/goautoneg a547fc61f48d567d5b4ec6f8aee5573d8efce11d https://github.com/rancher/goautoneg.git +golang.org/x/sync fd80eb99c8f653c847d294a001bdf2a3a6f768f5 +github.com/gorilla/mux v1.6.1 diff --git a/vendor/golang.org/x/crypto/.gitattributes b/vendor/golang.org/x/crypto/.gitattributes new file mode 100644 index 00000000..d2f212e5 --- /dev/null +++ b/vendor/golang.org/x/crypto/.gitattributes @@ -0,0 +1,10 @@ +# Treat all files in this repo as binary, with no git magic updating +# line endings. Windows users contributing to Go will need to use a +# modern version of git and editors capable of LF line endings. +# +# We'll prevent accidental CRLF line endings from entering the repo +# via the git-review gofmt checks. +# +# See golang.org/issue/9281 + +* -text diff --git a/vendor/golang.org/x/crypto/.gitignore b/vendor/golang.org/x/crypto/.gitignore new file mode 100644 index 00000000..8339fd61 --- /dev/null +++ b/vendor/golang.org/x/crypto/.gitignore @@ -0,0 +1,2 @@ +# Add no patterns to .hgignore except for files generated by the build. +last-change diff --git a/vendor/golang.org/x/crypto/CONTRIBUTING.md b/vendor/golang.org/x/crypto/CONTRIBUTING.md new file mode 100644 index 00000000..d0485e88 --- /dev/null +++ b/vendor/golang.org/x/crypto/CONTRIBUTING.md @@ -0,0 +1,26 @@ +# Contributing to Go + +Go is an open source project. + +It is the work of hundreds of contributors. We appreciate your help! + +## Filing issues + +When [filing an issue](https://golang.org/issue/new), make sure to answer these five questions: + +1. What version of Go are you using (`go version`)? +2. What operating system and processor architecture are you using? +3. What did you do? +4. What did you expect to see? +5. What did you see instead? + +General questions should go to the [golang-nuts mailing list](https://groups.google.com/group/golang-nuts) instead of the issue tracker. +The gophers there will answer or ask you to file an issue if you've tripped over a bug. + +## Contributing code + +Please read the [Contribution Guidelines](https://golang.org/doc/contribute.html) +before sending patches. + +Unless otherwise noted, the Go source files are distributed under +the BSD-style license found in the LICENSE file. diff --git a/vendor/golang.org/x/crypto/README.md b/vendor/golang.org/x/crypto/README.md new file mode 100644 index 00000000..c9d6fecd --- /dev/null +++ b/vendor/golang.org/x/crypto/README.md @@ -0,0 +1,21 @@ +# Go Cryptography + +This repository holds supplementary Go cryptography libraries. + +## Download/Install + +The easiest way to install is to run `go get -u golang.org/x/crypto/...`. You +can also manually git clone the repository to `$GOPATH/src/golang.org/x/crypto`. + +## Report Issues / Send Patches + +This repository uses Gerrit for code changes. To learn how to submit changes to +this repository, see https://golang.org/doc/contribute.html. + +The main issue tracker for the crypto repository is located at +https://github.com/golang/go/issues. Prefix your issue with "x/crypto:" in the +subject line, so it is easy to find. + +Note that contributions to the cryptography package receive additional scrutiny +due to their sensitive nature. Patches may take longer than normal to receive +feedback. diff --git a/vendor/golang.org/x/crypto/codereview.cfg b/vendor/golang.org/x/crypto/codereview.cfg new file mode 100644 index 00000000..3f8b14b6 --- /dev/null +++ b/vendor/golang.org/x/crypto/codereview.cfg @@ -0,0 +1 @@ +issuerepo: golang/go diff --git a/vendor/golang.org/x/crypto/ssh/terminal/terminal.go b/vendor/golang.org/x/crypto/ssh/terminal/terminal.go index 18379a93..9a887598 100644 --- a/vendor/golang.org/x/crypto/ssh/terminal/terminal.go +++ b/vendor/golang.org/x/crypto/ssh/terminal/terminal.go @@ -617,7 +617,7 @@ func writeWithCRLF(w io.Writer, buf []byte) (n int, err error) { if _, err = w.Write(crlf); err != nil { return n, err } - n += 1 + n++ buf = buf[1:] } } diff --git a/vendor/golang.org/x/crypto/ssh/terminal/util.go b/vendor/golang.org/x/crypto/ssh/terminal/util.go index e7404ff4..731c89a2 100644 --- a/vendor/golang.org/x/crypto/ssh/terminal/util.go +++ b/vendor/golang.org/x/crypto/ssh/terminal/util.go @@ -17,44 +17,41 @@ package terminal // import "golang.org/x/crypto/ssh/terminal" import ( - "syscall" - "unsafe" - "golang.org/x/sys/unix" ) // State contains the state of a terminal. type State struct { - termios syscall.Termios + termios unix.Termios } // IsTerminal returns true if the given file descriptor is a terminal. func IsTerminal(fd int) bool { - var termios syscall.Termios - _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), ioctlReadTermios, uintptr(unsafe.Pointer(&termios)), 0, 0, 0) - return err == 0 + _, err := unix.IoctlGetTermios(fd, ioctlReadTermios) + return err == nil } // MakeRaw put the terminal connected to the given file descriptor into raw // mode and returns the previous state of the terminal so that it can be // restored. func MakeRaw(fd int) (*State, error) { - var oldState State - if _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), ioctlReadTermios, uintptr(unsafe.Pointer(&oldState.termios)), 0, 0, 0); err != 0 { + termios, err := unix.IoctlGetTermios(fd, ioctlReadTermios) + if err != nil { return nil, err } - newState := oldState.termios + oldState := State{termios: *termios} + // This attempts to replicate the behaviour documented for cfmakeraw in // the termios(3) manpage. - newState.Iflag &^= syscall.IGNBRK | syscall.BRKINT | syscall.PARMRK | syscall.ISTRIP | syscall.INLCR | syscall.IGNCR | syscall.ICRNL | syscall.IXON - newState.Oflag &^= syscall.OPOST - newState.Lflag &^= syscall.ECHO | syscall.ECHONL | syscall.ICANON | syscall.ISIG | syscall.IEXTEN - newState.Cflag &^= syscall.CSIZE | syscall.PARENB - newState.Cflag |= syscall.CS8 - newState.Cc[unix.VMIN] = 1 - newState.Cc[unix.VTIME] = 0 - if _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), ioctlWriteTermios, uintptr(unsafe.Pointer(&newState)), 0, 0, 0); err != 0 { + termios.Iflag &^= unix.IGNBRK | unix.BRKINT | unix.PARMRK | unix.ISTRIP | unix.INLCR | unix.IGNCR | unix.ICRNL | unix.IXON + termios.Oflag &^= unix.OPOST + termios.Lflag &^= unix.ECHO | unix.ECHONL | unix.ICANON | unix.ISIG | unix.IEXTEN + termios.Cflag &^= unix.CSIZE | unix.PARENB + termios.Cflag |= unix.CS8 + termios.Cc[unix.VMIN] = 1 + termios.Cc[unix.VTIME] = 0 + if err := unix.IoctlSetTermios(fd, ioctlWriteTermios, termios); err != nil { return nil, err } @@ -64,60 +61,54 @@ func MakeRaw(fd int) (*State, error) { // GetState returns the current state of a terminal which may be useful to // restore the terminal after a signal. func GetState(fd int) (*State, error) { - var oldState State - if _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), ioctlReadTermios, uintptr(unsafe.Pointer(&oldState.termios)), 0, 0, 0); err != 0 { + termios, err := unix.IoctlGetTermios(fd, ioctlReadTermios) + if err != nil { return nil, err } - return &oldState, nil + return &State{termios: *termios}, nil } // Restore restores the terminal connected to the given file descriptor to a // previous state. func Restore(fd int, state *State) error { - if _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), ioctlWriteTermios, uintptr(unsafe.Pointer(&state.termios)), 0, 0, 0); err != 0 { - return err - } - return nil + return unix.IoctlSetTermios(fd, ioctlWriteTermios, &state.termios) } // GetSize returns the dimensions of the given terminal. func GetSize(fd int) (width, height int, err error) { - var dimensions [4]uint16 - - if _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), uintptr(syscall.TIOCGWINSZ), uintptr(unsafe.Pointer(&dimensions)), 0, 0, 0); err != 0 { + ws, err := unix.IoctlGetWinsize(fd, unix.TIOCGWINSZ) + if err != nil { return -1, -1, err } - return int(dimensions[1]), int(dimensions[0]), nil + return int(ws.Col), int(ws.Row), nil } // passwordReader is an io.Reader that reads from a specific file descriptor. type passwordReader int func (r passwordReader) Read(buf []byte) (int, error) { - return syscall.Read(int(r), buf) + return unix.Read(int(r), buf) } // ReadPassword reads a line of input from a terminal without local echo. This // is commonly used for inputting passwords and other sensitive data. The slice // returned does not include the \n. func ReadPassword(fd int) ([]byte, error) { - var oldState syscall.Termios - if _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), ioctlReadTermios, uintptr(unsafe.Pointer(&oldState)), 0, 0, 0); err != 0 { + termios, err := unix.IoctlGetTermios(fd, ioctlReadTermios) + if err != nil { return nil, err } - newState := oldState - newState.Lflag &^= syscall.ECHO - newState.Lflag |= syscall.ICANON | syscall.ISIG - newState.Iflag |= syscall.ICRNL - if _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), ioctlWriteTermios, uintptr(unsafe.Pointer(&newState)), 0, 0, 0); err != 0 { + newState := *termios + newState.Lflag &^= unix.ECHO + newState.Lflag |= unix.ICANON | unix.ISIG + newState.Iflag |= unix.ICRNL + if err := unix.IoctlSetTermios(fd, ioctlWriteTermios, &newState); err != nil { return nil, err } - defer func() { - syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), ioctlWriteTermios, uintptr(unsafe.Pointer(&oldState)), 0, 0, 0) - }() + defer unix.IoctlSetTermios(fd, ioctlWriteTermios, termios) return readPasswordLine(passwordReader(fd)) } diff --git a/vendor/golang.org/x/crypto/ssh/terminal/util_solaris.go b/vendor/golang.org/x/crypto/ssh/terminal/util_solaris.go index a2e1b57d..9e41b9f4 100644 --- a/vendor/golang.org/x/crypto/ssh/terminal/util_solaris.go +++ b/vendor/golang.org/x/crypto/ssh/terminal/util_solaris.go @@ -14,7 +14,7 @@ import ( // State contains the state of a terminal. type State struct { - state *unix.Termios + termios unix.Termios } // IsTerminal returns true if the given file descriptor is a terminal. @@ -75,47 +75,43 @@ func ReadPassword(fd int) ([]byte, error) { // restored. // see http://cr.illumos.org/~webrev/andy_js/1060/ func MakeRaw(fd int) (*State, error) { - oldTermiosPtr, err := unix.IoctlGetTermios(fd, unix.TCGETS) + termios, err := unix.IoctlGetTermios(fd, unix.TCGETS) if err != nil { return nil, err } - oldTermios := *oldTermiosPtr - newTermios := oldTermios - newTermios.Iflag &^= syscall.IGNBRK | syscall.BRKINT | syscall.PARMRK | syscall.ISTRIP | syscall.INLCR | syscall.IGNCR | syscall.ICRNL | syscall.IXON - newTermios.Oflag &^= syscall.OPOST - newTermios.Lflag &^= syscall.ECHO | syscall.ECHONL | syscall.ICANON | syscall.ISIG | syscall.IEXTEN - newTermios.Cflag &^= syscall.CSIZE | syscall.PARENB - newTermios.Cflag |= syscall.CS8 - newTermios.Cc[unix.VMIN] = 1 - newTermios.Cc[unix.VTIME] = 0 + oldState := State{termios: *termios} - if err := unix.IoctlSetTermios(fd, unix.TCSETS, &newTermios); err != nil { + termios.Iflag &^= unix.IGNBRK | unix.BRKINT | unix.PARMRK | unix.ISTRIP | unix.INLCR | unix.IGNCR | unix.ICRNL | unix.IXON + termios.Oflag &^= unix.OPOST + termios.Lflag &^= unix.ECHO | unix.ECHONL | unix.ICANON | unix.ISIG | unix.IEXTEN + termios.Cflag &^= unix.CSIZE | unix.PARENB + termios.Cflag |= unix.CS8 + termios.Cc[unix.VMIN] = 1 + termios.Cc[unix.VTIME] = 0 + + if err := unix.IoctlSetTermios(fd, unix.TCSETS, termios); err != nil { return nil, err } - return &State{ - state: oldTermiosPtr, - }, nil + return &oldState, nil } // Restore restores the terminal connected to the given file descriptor to a // previous state. func Restore(fd int, oldState *State) error { - return unix.IoctlSetTermios(fd, unix.TCSETS, oldState.state) + return unix.IoctlSetTermios(fd, unix.TCSETS, &oldState.termios) } // GetState returns the current state of a terminal which may be useful to // restore the terminal after a signal. func GetState(fd int) (*State, error) { - oldTermiosPtr, err := unix.IoctlGetTermios(fd, unix.TCGETS) + termios, err := unix.IoctlGetTermios(fd, unix.TCGETS) if err != nil { return nil, err } - return &State{ - state: oldTermiosPtr, - }, nil + return &State{termios: *termios}, nil } // GetSize returns the dimensions of the given terminal. diff --git a/vendor/golang.org/x/crypto/ssh/terminal/util_windows.go b/vendor/golang.org/x/crypto/ssh/terminal/util_windows.go index 60979ccd..8618955d 100644 --- a/vendor/golang.org/x/crypto/ssh/terminal/util_windows.go +++ b/vendor/golang.org/x/crypto/ssh/terminal/util_windows.go @@ -17,6 +17,8 @@ package terminal import ( + "os" + "golang.org/x/sys/windows" ) @@ -71,13 +73,6 @@ func GetSize(fd int) (width, height int, err error) { return int(info.Size.X), int(info.Size.Y), nil } -// passwordReader is an io.Reader that reads from a specific Windows HANDLE. -type passwordReader int - -func (r passwordReader) Read(buf []byte) (int, error) { - return windows.Read(windows.Handle(r), buf) -} - // ReadPassword reads a line of input from a terminal without local echo. This // is commonly used for inputting passwords and other sensitive data. The slice // returned does not include the \n. @@ -94,9 +89,15 @@ func ReadPassword(fd int) ([]byte, error) { return nil, err } - defer func() { - windows.SetConsoleMode(windows.Handle(fd), old) - }() + defer windows.SetConsoleMode(windows.Handle(fd), old) - return readPasswordLine(passwordReader(fd)) + var h windows.Handle + p, _ := windows.GetCurrentProcess() + if err := windows.DuplicateHandle(p, windows.Handle(fd), p, &h, 0, false, windows.DUPLICATE_SAME_ACCESS); err != nil { + return nil, err + } + + f := os.NewFile(uintptr(h), "stdin") + defer f.Close() + return readPasswordLine(f) } diff --git a/vendor/golang.org/x/net/.gitattributes b/vendor/golang.org/x/net/.gitattributes new file mode 100644 index 00000000..d2f212e5 --- /dev/null +++ b/vendor/golang.org/x/net/.gitattributes @@ -0,0 +1,10 @@ +# Treat all files in this repo as binary, with no git magic updating +# line endings. Windows users contributing to Go will need to use a +# modern version of git and editors capable of LF line endings. +# +# We'll prevent accidental CRLF line endings from entering the repo +# via the git-review gofmt checks. +# +# See golang.org/issue/9281 + +* -text diff --git a/vendor/golang.org/x/net/.gitignore b/vendor/golang.org/x/net/.gitignore new file mode 100644 index 00000000..8339fd61 --- /dev/null +++ b/vendor/golang.org/x/net/.gitignore @@ -0,0 +1,2 @@ +# Add no patterns to .hgignore except for files generated by the build. +last-change diff --git a/vendor/golang.org/x/net/CONTRIBUTING.md b/vendor/golang.org/x/net/CONTRIBUTING.md new file mode 100644 index 00000000..88dff59b --- /dev/null +++ b/vendor/golang.org/x/net/CONTRIBUTING.md @@ -0,0 +1,31 @@ +# Contributing to Go + +Go is an open source project. + +It is the work of hundreds of contributors. We appreciate your help! + + +## Filing issues + +When [filing an issue](https://golang.org/issue/new), make sure to answer these five questions: + +1. What version of Go are you using (`go version`)? +2. What operating system and processor architecture are you using? +3. What did you do? +4. What did you expect to see? +5. What did you see instead? + +General questions should go to the [golang-nuts mailing list](https://groups.google.com/group/golang-nuts) instead of the issue tracker. +The gophers there will answer or ask you to file an issue if you've tripped over a bug. + +## Contributing code + +Please read the [Contribution Guidelines](https://golang.org/doc/contribute.html) +before sending patches. + +**We do not accept GitHub pull requests** +(we use [Gerrit](https://code.google.com/p/gerrit/) instead for code review). + +Unless otherwise noted, the Go source files are distributed under +the BSD-style license found in the LICENSE file. + diff --git a/vendor/golang.org/x/net/README b/vendor/golang.org/x/net/README new file mode 100644 index 00000000..6b13d8e5 --- /dev/null +++ b/vendor/golang.org/x/net/README @@ -0,0 +1,3 @@ +This repository holds supplementary Go networking libraries. + +To submit changes to this repository, see http://golang.org/doc/contribute.html. diff --git a/vendor/golang.org/x/net/codereview.cfg b/vendor/golang.org/x/net/codereview.cfg new file mode 100644 index 00000000..3f8b14b6 --- /dev/null +++ b/vendor/golang.org/x/net/codereview.cfg @@ -0,0 +1 @@ +issuerepo: golang/go diff --git a/vendor/golang.org/x/oauth2/.travis.yml b/vendor/golang.org/x/oauth2/.travis.yml new file mode 100644 index 00000000..fa139db2 --- /dev/null +++ b/vendor/golang.org/x/oauth2/.travis.yml @@ -0,0 +1,13 @@ +language: go + +go: + - tip + +install: + - export GOPATH="$HOME/gopath" + - mkdir -p "$GOPATH/src/golang.org/x" + - mv "$TRAVIS_BUILD_DIR" "$GOPATH/src/golang.org/x/oauth2" + - go get -v -t -d golang.org/x/oauth2/... + +script: + - go test -v golang.org/x/oauth2/... diff --git a/vendor/golang.org/x/oauth2/AUTHORS b/vendor/golang.org/x/oauth2/AUTHORS new file mode 100644 index 00000000..15167cd7 --- /dev/null +++ b/vendor/golang.org/x/oauth2/AUTHORS @@ -0,0 +1,3 @@ +# This source code refers to The Go Authors for copyright purposes. +# The master list of authors is in the main Go distribution, +# visible at http://tip.golang.org/AUTHORS. diff --git a/vendor/golang.org/x/oauth2/CONTRIBUTING.md b/vendor/golang.org/x/oauth2/CONTRIBUTING.md new file mode 100644 index 00000000..46aa2b12 --- /dev/null +++ b/vendor/golang.org/x/oauth2/CONTRIBUTING.md @@ -0,0 +1,31 @@ +# Contributing to Go + +Go is an open source project. + +It is the work of hundreds of contributors. We appreciate your help! + + +## Filing issues + +When [filing an issue](https://github.com/golang/oauth2/issues), make sure to answer these five questions: + +1. What version of Go are you using (`go version`)? +2. What operating system and processor architecture are you using? +3. What did you do? +4. What did you expect to see? +5. What did you see instead? + +General questions should go to the [golang-nuts mailing list](https://groups.google.com/group/golang-nuts) instead of the issue tracker. +The gophers there will answer or ask you to file an issue if you've tripped over a bug. + +## Contributing code + +Please read the [Contribution Guidelines](https://golang.org/doc/contribute.html) +before sending patches. + +**We do not accept GitHub pull requests** +(we use [Gerrit](https://code.google.com/p/gerrit/) instead for code review). + +Unless otherwise noted, the Go source files are distributed under +the BSD-style license found in the LICENSE file. + diff --git a/vendor/golang.org/x/oauth2/CONTRIBUTORS b/vendor/golang.org/x/oauth2/CONTRIBUTORS new file mode 100644 index 00000000..1c4577e9 --- /dev/null +++ b/vendor/golang.org/x/oauth2/CONTRIBUTORS @@ -0,0 +1,3 @@ +# This source code was written by the Go contributors. +# The master list of contributors is in the main Go distribution, +# visible at http://tip.golang.org/CONTRIBUTORS. diff --git a/vendor/golang.org/x/oauth2/LICENSE b/vendor/golang.org/x/oauth2/LICENSE new file mode 100644 index 00000000..d02f24fd --- /dev/null +++ b/vendor/golang.org/x/oauth2/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2009 The oauth2 Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/golang.org/x/oauth2/README.md b/vendor/golang.org/x/oauth2/README.md new file mode 100644 index 00000000..b0ddf3c1 --- /dev/null +++ b/vendor/golang.org/x/oauth2/README.md @@ -0,0 +1,74 @@ +# OAuth2 for Go + +[![Build Status](https://travis-ci.org/golang/oauth2.svg?branch=master)](https://travis-ci.org/golang/oauth2) +[![GoDoc](https://godoc.org/golang.org/x/oauth2?status.svg)](https://godoc.org/golang.org/x/oauth2) + +oauth2 package contains a client implementation for OAuth 2.0 spec. + +## Installation + +~~~~ +go get golang.org/x/oauth2 +~~~~ + +See godoc for further documentation and examples. + +* [godoc.org/golang.org/x/oauth2](http://godoc.org/golang.org/x/oauth2) +* [godoc.org/golang.org/x/oauth2/google](http://godoc.org/golang.org/x/oauth2/google) + + +## App Engine + +In change 96e89be (March 2015) we removed the `oauth2.Context2` type in favor +of the [`context.Context`](https://golang.org/x/net/context#Context) type from +the `golang.org/x/net/context` package + +This means its no longer possible to use the "Classic App Engine" +`appengine.Context` type with the `oauth2` package. (You're using +Classic App Engine if you import the package `"appengine"`.) + +To work around this, you may use the new `"google.golang.org/appengine"` +package. This package has almost the same API as the `"appengine"` package, +but it can be fetched with `go get` and used on "Managed VMs" and well as +Classic App Engine. + +See the [new `appengine` package's readme](https://github.com/golang/appengine#updating-a-go-app-engine-app) +for information on updating your app. + +If you don't want to update your entire app to use the new App Engine packages, +you may use both sets of packages in parallel, using only the new packages +with the `oauth2` package. + + import ( + "golang.org/x/net/context" + "golang.org/x/oauth2" + "golang.org/x/oauth2/google" + newappengine "google.golang.org/appengine" + newurlfetch "google.golang.org/appengine/urlfetch" + + "appengine" + ) + + func handler(w http.ResponseWriter, r *http.Request) { + var c appengine.Context = appengine.NewContext(r) + c.Infof("Logging a message with the old package") + + var ctx context.Context = newappengine.NewContext(r) + client := &http.Client{ + Transport: &oauth2.Transport{ + Source: google.AppEngineTokenSource(ctx, "scope"), + Base: &newurlfetch.Transport{Context: ctx}, + }, + } + client.Get("...") + } + +## Contributing + +We appreciate your help! + +To contribute, please read the contribution guidelines: + https://golang.org/doc/contribute.html + +Note that the Go project does not use GitHub pull requests but +uses Gerrit for code reviews. See the contribution guide for details. diff --git a/vendor/golang.org/x/oauth2/client_appengine.go b/vendor/golang.org/x/oauth2/client_appengine.go new file mode 100644 index 00000000..8962c49d --- /dev/null +++ b/vendor/golang.org/x/oauth2/client_appengine.go @@ -0,0 +1,25 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build appengine + +// App Engine hooks. + +package oauth2 + +import ( + "net/http" + + "golang.org/x/net/context" + "golang.org/x/oauth2/internal" + "google.golang.org/appengine/urlfetch" +) + +func init() { + internal.RegisterContextClientFunc(contextClientAppEngine) +} + +func contextClientAppEngine(ctx context.Context) (*http.Client, error) { + return urlfetch.Client(ctx), nil +} diff --git a/vendor/golang.org/x/oauth2/internal/oauth2.go b/vendor/golang.org/x/oauth2/internal/oauth2.go new file mode 100644 index 00000000..e31541b3 --- /dev/null +++ b/vendor/golang.org/x/oauth2/internal/oauth2.go @@ -0,0 +1,76 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package internal contains support packages for oauth2 package. +package internal + +import ( + "bufio" + "crypto/rsa" + "crypto/x509" + "encoding/pem" + "errors" + "fmt" + "io" + "strings" +) + +// ParseKey converts the binary contents of a private key file +// to an *rsa.PrivateKey. It detects whether the private key is in a +// PEM container or not. If so, it extracts the the private key +// from PEM container before conversion. It only supports PEM +// containers with no passphrase. +func ParseKey(key []byte) (*rsa.PrivateKey, error) { + block, _ := pem.Decode(key) + if block != nil { + key = block.Bytes + } + parsedKey, err := x509.ParsePKCS8PrivateKey(key) + if err != nil { + parsedKey, err = x509.ParsePKCS1PrivateKey(key) + if err != nil { + return nil, fmt.Errorf("private key should be a PEM or plain PKSC1 or PKCS8; parse error: %v", err) + } + } + parsed, ok := parsedKey.(*rsa.PrivateKey) + if !ok { + return nil, errors.New("private key is invalid") + } + return parsed, nil +} + +func ParseINI(ini io.Reader) (map[string]map[string]string, error) { + result := map[string]map[string]string{ + "": {}, // root section + } + scanner := bufio.NewScanner(ini) + currentSection := "" + for scanner.Scan() { + line := strings.TrimSpace(scanner.Text()) + if strings.HasPrefix(line, ";") { + // comment. + continue + } + if strings.HasPrefix(line, "[") && strings.HasSuffix(line, "]") { + currentSection = strings.TrimSpace(line[1 : len(line)-1]) + result[currentSection] = map[string]string{} + continue + } + parts := strings.SplitN(line, "=", 2) + if len(parts) == 2 && parts[0] != "" { + result[currentSection][strings.TrimSpace(parts[0])] = strings.TrimSpace(parts[1]) + } + } + if err := scanner.Err(); err != nil { + return nil, fmt.Errorf("error scanning ini: %v", err) + } + return result, nil +} + +func CondVal(v string) []string { + if v == "" { + return nil + } + return []string{v} +} diff --git a/vendor/golang.org/x/oauth2/internal/token.go b/vendor/golang.org/x/oauth2/internal/token.go new file mode 100644 index 00000000..018b58ad --- /dev/null +++ b/vendor/golang.org/x/oauth2/internal/token.go @@ -0,0 +1,247 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package internal contains support packages for oauth2 package. +package internal + +import ( + "encoding/json" + "fmt" + "io" + "io/ioutil" + "mime" + "net/http" + "net/url" + "strconv" + "strings" + "time" + + "golang.org/x/net/context" +) + +// Token represents the crendentials used to authorize +// the requests to access protected resources on the OAuth 2.0 +// provider's backend. +// +// This type is a mirror of oauth2.Token and exists to break +// an otherwise-circular dependency. Other internal packages +// should convert this Token into an oauth2.Token before use. +type Token struct { + // AccessToken is the token that authorizes and authenticates + // the requests. + AccessToken string + + // TokenType is the type of token. + // The Type method returns either this or "Bearer", the default. + TokenType string + + // RefreshToken is a token that's used by the application + // (as opposed to the user) to refresh the access token + // if it expires. + RefreshToken string + + // Expiry is the optional expiration time of the access token. + // + // If zero, TokenSource implementations will reuse the same + // token forever and RefreshToken or equivalent + // mechanisms for that TokenSource will not be used. + Expiry time.Time + + // Raw optionally contains extra metadata from the server + // when updating a token. + Raw interface{} +} + +// tokenJSON is the struct representing the HTTP response from OAuth2 +// providers returning a token in JSON form. +type tokenJSON struct { + AccessToken string `json:"access_token"` + TokenType string `json:"token_type"` + RefreshToken string `json:"refresh_token"` + ExpiresIn expirationTime `json:"expires_in"` // at least PayPal returns string, while most return number + Expires expirationTime `json:"expires"` // broken Facebook spelling of expires_in +} + +func (e *tokenJSON) expiry() (t time.Time) { + if v := e.ExpiresIn; v != 0 { + return time.Now().Add(time.Duration(v) * time.Second) + } + if v := e.Expires; v != 0 { + return time.Now().Add(time.Duration(v) * time.Second) + } + return +} + +type expirationTime int32 + +func (e *expirationTime) UnmarshalJSON(b []byte) error { + var n json.Number + err := json.Unmarshal(b, &n) + if err != nil { + return err + } + i, err := n.Int64() + if err != nil { + return err + } + *e = expirationTime(i) + return nil +} + +var brokenAuthHeaderProviders = []string{ + "https://accounts.google.com/", + "https://api.codeswholesale.com/oauth/token", + "https://api.dropbox.com/", + "https://api.dropboxapi.com/", + "https://api.instagram.com/", + "https://api.netatmo.net/", + "https://api.odnoklassniki.ru/", + "https://api.pushbullet.com/", + "https://api.soundcloud.com/", + "https://api.twitch.tv/", + "https://app.box.com/", + "https://connect.stripe.com/", + "https://graph.facebook.com", // see https://github.com/golang/oauth2/issues/214 + "https://login.microsoftonline.com/", + "https://login.salesforce.com/", + "https://oauth.sandbox.trainingpeaks.com/", + "https://oauth.trainingpeaks.com/", + "https://oauth.vk.com/", + "https://openapi.baidu.com/", + "https://slack.com/", + "https://test-sandbox.auth.corp.google.com", + "https://test.salesforce.com/", + "https://user.gini.net/", + "https://www.douban.com/", + "https://www.googleapis.com/", + "https://www.linkedin.com/", + "https://www.strava.com/oauth/", + "https://www.wunderlist.com/oauth/", + "https://api.patreon.com/", + "https://sandbox.codeswholesale.com/oauth/token", +} + +// brokenAuthHeaderDomains lists broken providers that issue dynamic endpoints. +var brokenAuthHeaderDomains = []string{ + ".force.com", + ".okta.com", + ".oktapreview.com", +} + +func RegisterBrokenAuthHeaderProvider(tokenURL string) { + brokenAuthHeaderProviders = append(brokenAuthHeaderProviders, tokenURL) +} + +// providerAuthHeaderWorks reports whether the OAuth2 server identified by the tokenURL +// implements the OAuth2 spec correctly +// See https://code.google.com/p/goauth2/issues/detail?id=31 for background. +// In summary: +// - Reddit only accepts client secret in the Authorization header +// - Dropbox accepts either it in URL param or Auth header, but not both. +// - Google only accepts URL param (not spec compliant?), not Auth header +// - Stripe only accepts client secret in Auth header with Bearer method, not Basic +func providerAuthHeaderWorks(tokenURL string) bool { + for _, s := range brokenAuthHeaderProviders { + if strings.HasPrefix(tokenURL, s) { + // Some sites fail to implement the OAuth2 spec fully. + return false + } + } + + if u, err := url.Parse(tokenURL); err == nil { + for _, s := range brokenAuthHeaderDomains { + if strings.HasSuffix(u.Host, s) { + return false + } + } + } + + // Assume the provider implements the spec properly + // otherwise. We can add more exceptions as they're + // discovered. We will _not_ be adding configurable hooks + // to this package to let users select server bugs. + return true +} + +func RetrieveToken(ctx context.Context, clientID, clientSecret, tokenURL string, v url.Values) (*Token, error) { + hc, err := ContextClient(ctx) + if err != nil { + return nil, err + } + bustedAuth := !providerAuthHeaderWorks(tokenURL) + if bustedAuth { + if clientID != "" { + v.Set("client_id", clientID) + } + if clientSecret != "" { + v.Set("client_secret", clientSecret) + } + } + req, err := http.NewRequest("POST", tokenURL, strings.NewReader(v.Encode())) + if err != nil { + return nil, err + } + req.Header.Set("Content-Type", "application/x-www-form-urlencoded") + if !bustedAuth { + req.SetBasicAuth(clientID, clientSecret) + } + r, err := hc.Do(req) + if err != nil { + return nil, err + } + defer r.Body.Close() + body, err := ioutil.ReadAll(io.LimitReader(r.Body, 1<<20)) + if err != nil { + return nil, fmt.Errorf("oauth2: cannot fetch token: %v", err) + } + if code := r.StatusCode; code < 200 || code > 299 { + return nil, fmt.Errorf("oauth2: cannot fetch token: %v\nResponse: %s", r.Status, body) + } + + var token *Token + content, _, _ := mime.ParseMediaType(r.Header.Get("Content-Type")) + switch content { + case "application/x-www-form-urlencoded", "text/plain": + vals, err := url.ParseQuery(string(body)) + if err != nil { + return nil, err + } + token = &Token{ + AccessToken: vals.Get("access_token"), + TokenType: vals.Get("token_type"), + RefreshToken: vals.Get("refresh_token"), + Raw: vals, + } + e := vals.Get("expires_in") + if e == "" { + // TODO(jbd): Facebook's OAuth2 implementation is broken and + // returns expires_in field in expires. Remove the fallback to expires, + // when Facebook fixes their implementation. + e = vals.Get("expires") + } + expires, _ := strconv.Atoi(e) + if expires != 0 { + token.Expiry = time.Now().Add(time.Duration(expires) * time.Second) + } + default: + var tj tokenJSON + if err = json.Unmarshal(body, &tj); err != nil { + return nil, err + } + token = &Token{ + AccessToken: tj.AccessToken, + TokenType: tj.TokenType, + RefreshToken: tj.RefreshToken, + Expiry: tj.expiry(), + Raw: make(map[string]interface{}), + } + json.Unmarshal(body, &token.Raw) // no error checks for optional fields + } + // Don't overwrite `RefreshToken` with an empty value + // if this was a token refreshing request. + if token.RefreshToken == "" { + token.RefreshToken = v.Get("refresh_token") + } + return token, nil +} diff --git a/vendor/golang.org/x/oauth2/internal/transport.go b/vendor/golang.org/x/oauth2/internal/transport.go new file mode 100644 index 00000000..f1f173e3 --- /dev/null +++ b/vendor/golang.org/x/oauth2/internal/transport.go @@ -0,0 +1,69 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package internal contains support packages for oauth2 package. +package internal + +import ( + "net/http" + + "golang.org/x/net/context" +) + +// HTTPClient is the context key to use with golang.org/x/net/context's +// WithValue function to associate an *http.Client value with a context. +var HTTPClient ContextKey + +// ContextKey is just an empty struct. It exists so HTTPClient can be +// an immutable public variable with a unique type. It's immutable +// because nobody else can create a ContextKey, being unexported. +type ContextKey struct{} + +// ContextClientFunc is a func which tries to return an *http.Client +// given a Context value. If it returns an error, the search stops +// with that error. If it returns (nil, nil), the search continues +// down the list of registered funcs. +type ContextClientFunc func(context.Context) (*http.Client, error) + +var contextClientFuncs []ContextClientFunc + +func RegisterContextClientFunc(fn ContextClientFunc) { + contextClientFuncs = append(contextClientFuncs, fn) +} + +func ContextClient(ctx context.Context) (*http.Client, error) { + if ctx != nil { + if hc, ok := ctx.Value(HTTPClient).(*http.Client); ok { + return hc, nil + } + } + for _, fn := range contextClientFuncs { + c, err := fn(ctx) + if err != nil { + return nil, err + } + if c != nil { + return c, nil + } + } + return http.DefaultClient, nil +} + +func ContextTransport(ctx context.Context) http.RoundTripper { + hc, err := ContextClient(ctx) + // This is a rare error case (somebody using nil on App Engine). + if err != nil { + return ErrorTransport{err} + } + return hc.Transport +} + +// ErrorTransport returns the specified error on RoundTrip. +// This RoundTripper should be used in rare error cases where +// error handling can be postponed to response handling time. +type ErrorTransport struct{ Err error } + +func (t ErrorTransport) RoundTrip(*http.Request) (*http.Response, error) { + return nil, t.Err +} diff --git a/vendor/golang.org/x/oauth2/oauth2.go b/vendor/golang.org/x/oauth2/oauth2.go new file mode 100644 index 00000000..3e4835d7 --- /dev/null +++ b/vendor/golang.org/x/oauth2/oauth2.go @@ -0,0 +1,340 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package oauth2 provides support for making +// OAuth2 authorized and authenticated HTTP requests. +// It can additionally grant authorization with Bearer JWT. +package oauth2 // import "golang.org/x/oauth2" + +import ( + "bytes" + "errors" + "net/http" + "net/url" + "strings" + "sync" + + "golang.org/x/net/context" + "golang.org/x/oauth2/internal" +) + +// NoContext is the default context you should supply if not using +// your own context.Context (see https://golang.org/x/net/context). +// +// Deprecated: Use context.Background() or context.TODO() instead. +var NoContext = context.TODO() + +// RegisterBrokenAuthHeaderProvider registers an OAuth2 server +// identified by the tokenURL prefix as an OAuth2 implementation +// which doesn't support the HTTP Basic authentication +// scheme to authenticate with the authorization server. +// Once a server is registered, credentials (client_id and client_secret) +// will be passed as query parameters rather than being present +// in the Authorization header. +// See https://code.google.com/p/goauth2/issues/detail?id=31 for background. +func RegisterBrokenAuthHeaderProvider(tokenURL string) { + internal.RegisterBrokenAuthHeaderProvider(tokenURL) +} + +// Config describes a typical 3-legged OAuth2 flow, with both the +// client application information and the server's endpoint URLs. +// For the client credentials 2-legged OAuth2 flow, see the clientcredentials +// package (https://golang.org/x/oauth2/clientcredentials). +type Config struct { + // ClientID is the application's ID. + ClientID string + + // ClientSecret is the application's secret. + ClientSecret string + + // Endpoint contains the resource server's token endpoint + // URLs. These are constants specific to each server and are + // often available via site-specific packages, such as + // google.Endpoint or github.Endpoint. + Endpoint Endpoint + + // RedirectURL is the URL to redirect users going through + // the OAuth flow, after the resource owner's URLs. + RedirectURL string + + // Scope specifies optional requested permissions. + Scopes []string +} + +// A TokenSource is anything that can return a token. +type TokenSource interface { + // Token returns a token or an error. + // Token must be safe for concurrent use by multiple goroutines. + // The returned Token must not be modified. + Token() (*Token, error) +} + +// Endpoint contains the OAuth 2.0 provider's authorization and token +// endpoint URLs. +type Endpoint struct { + AuthURL string + TokenURL string +} + +var ( + // AccessTypeOnline and AccessTypeOffline are options passed + // to the Options.AuthCodeURL method. They modify the + // "access_type" field that gets sent in the URL returned by + // AuthCodeURL. + // + // Online is the default if neither is specified. If your + // application needs to refresh access tokens when the user + // is not present at the browser, then use offline. This will + // result in your application obtaining a refresh token the + // first time your application exchanges an authorization + // code for a user. + AccessTypeOnline AuthCodeOption = SetAuthURLParam("access_type", "online") + AccessTypeOffline AuthCodeOption = SetAuthURLParam("access_type", "offline") + + // ApprovalForce forces the users to view the consent dialog + // and confirm the permissions request at the URL returned + // from AuthCodeURL, even if they've already done so. + ApprovalForce AuthCodeOption = SetAuthURLParam("approval_prompt", "force") +) + +// An AuthCodeOption is passed to Config.AuthCodeURL. +type AuthCodeOption interface { + setValue(url.Values) +} + +type setParam struct{ k, v string } + +func (p setParam) setValue(m url.Values) { m.Set(p.k, p.v) } + +// SetAuthURLParam builds an AuthCodeOption which passes key/value parameters +// to a provider's authorization endpoint. +func SetAuthURLParam(key, value string) AuthCodeOption { + return setParam{key, value} +} + +// AuthCodeURL returns a URL to OAuth 2.0 provider's consent page +// that asks for permissions for the required scopes explicitly. +// +// State is a token to protect the user from CSRF attacks. You must +// always provide a non-zero string and validate that it matches the +// the state query parameter on your redirect callback. +// See http://tools.ietf.org/html/rfc6749#section-10.12 for more info. +// +// Opts may include AccessTypeOnline or AccessTypeOffline, as well +// as ApprovalForce. +func (c *Config) AuthCodeURL(state string, opts ...AuthCodeOption) string { + var buf bytes.Buffer + buf.WriteString(c.Endpoint.AuthURL) + v := url.Values{ + "response_type": {"code"}, + "client_id": {c.ClientID}, + "redirect_uri": internal.CondVal(c.RedirectURL), + "scope": internal.CondVal(strings.Join(c.Scopes, " ")), + "state": internal.CondVal(state), + } + for _, opt := range opts { + opt.setValue(v) + } + if strings.Contains(c.Endpoint.AuthURL, "?") { + buf.WriteByte('&') + } else { + buf.WriteByte('?') + } + buf.WriteString(v.Encode()) + return buf.String() +} + +// PasswordCredentialsToken converts a resource owner username and password +// pair into a token. +// +// Per the RFC, this grant type should only be used "when there is a high +// degree of trust between the resource owner and the client (e.g., the client +// is part of the device operating system or a highly privileged application), +// and when other authorization grant types are not available." +// See https://tools.ietf.org/html/rfc6749#section-4.3 for more info. +// +// The HTTP client to use is derived from the context. +// If nil, http.DefaultClient is used. +func (c *Config) PasswordCredentialsToken(ctx context.Context, username, password string) (*Token, error) { + return retrieveToken(ctx, c, url.Values{ + "grant_type": {"password"}, + "username": {username}, + "password": {password}, + "scope": internal.CondVal(strings.Join(c.Scopes, " ")), + }) +} + +// Exchange converts an authorization code into a token. +// +// It is used after a resource provider redirects the user back +// to the Redirect URI (the URL obtained from AuthCodeURL). +// +// The HTTP client to use is derived from the context. +// If a client is not provided via the context, http.DefaultClient is used. +// +// The code will be in the *http.Request.FormValue("code"). Before +// calling Exchange, be sure to validate FormValue("state"). +func (c *Config) Exchange(ctx context.Context, code string) (*Token, error) { + return retrieveToken(ctx, c, url.Values{ + "grant_type": {"authorization_code"}, + "code": {code}, + "redirect_uri": internal.CondVal(c.RedirectURL), + }) +} + +// Client returns an HTTP client using the provided token. +// The token will auto-refresh as necessary. The underlying +// HTTP transport will be obtained using the provided context. +// The returned client and its Transport should not be modified. +func (c *Config) Client(ctx context.Context, t *Token) *http.Client { + return NewClient(ctx, c.TokenSource(ctx, t)) +} + +// TokenSource returns a TokenSource that returns t until t expires, +// automatically refreshing it as necessary using the provided context. +// +// Most users will use Config.Client instead. +func (c *Config) TokenSource(ctx context.Context, t *Token) TokenSource { + tkr := &tokenRefresher{ + ctx: ctx, + conf: c, + } + if t != nil { + tkr.refreshToken = t.RefreshToken + } + return &reuseTokenSource{ + t: t, + new: tkr, + } +} + +// tokenRefresher is a TokenSource that makes "grant_type"=="refresh_token" +// HTTP requests to renew a token using a RefreshToken. +type tokenRefresher struct { + ctx context.Context // used to get HTTP requests + conf *Config + refreshToken string +} + +// WARNING: Token is not safe for concurrent access, as it +// updates the tokenRefresher's refreshToken field. +// Within this package, it is used by reuseTokenSource which +// synchronizes calls to this method with its own mutex. +func (tf *tokenRefresher) Token() (*Token, error) { + if tf.refreshToken == "" { + return nil, errors.New("oauth2: token expired and refresh token is not set") + } + + tk, err := retrieveToken(tf.ctx, tf.conf, url.Values{ + "grant_type": {"refresh_token"}, + "refresh_token": {tf.refreshToken}, + }) + + if err != nil { + return nil, err + } + if tf.refreshToken != tk.RefreshToken { + tf.refreshToken = tk.RefreshToken + } + return tk, err +} + +// reuseTokenSource is a TokenSource that holds a single token in memory +// and validates its expiry before each call to retrieve it with +// Token. If it's expired, it will be auto-refreshed using the +// new TokenSource. +type reuseTokenSource struct { + new TokenSource // called when t is expired. + + mu sync.Mutex // guards t + t *Token +} + +// Token returns the current token if it's still valid, else will +// refresh the current token (using r.Context for HTTP client +// information) and return the new one. +func (s *reuseTokenSource) Token() (*Token, error) { + s.mu.Lock() + defer s.mu.Unlock() + if s.t.Valid() { + return s.t, nil + } + t, err := s.new.Token() + if err != nil { + return nil, err + } + s.t = t + return t, nil +} + +// StaticTokenSource returns a TokenSource that always returns the same token. +// Because the provided token t is never refreshed, StaticTokenSource is only +// useful for tokens that never expire. +func StaticTokenSource(t *Token) TokenSource { + return staticTokenSource{t} +} + +// staticTokenSource is a TokenSource that always returns the same Token. +type staticTokenSource struct { + t *Token +} + +func (s staticTokenSource) Token() (*Token, error) { + return s.t, nil +} + +// HTTPClient is the context key to use with golang.org/x/net/context's +// WithValue function to associate an *http.Client value with a context. +var HTTPClient internal.ContextKey + +// NewClient creates an *http.Client from a Context and TokenSource. +// The returned client is not valid beyond the lifetime of the context. +// +// As a special case, if src is nil, a non-OAuth2 client is returned +// using the provided context. This exists to support related OAuth2 +// packages. +func NewClient(ctx context.Context, src TokenSource) *http.Client { + if src == nil { + c, err := internal.ContextClient(ctx) + if err != nil { + return &http.Client{Transport: internal.ErrorTransport{Err: err}} + } + return c + } + return &http.Client{ + Transport: &Transport{ + Base: internal.ContextTransport(ctx), + Source: ReuseTokenSource(nil, src), + }, + } +} + +// ReuseTokenSource returns a TokenSource which repeatedly returns the +// same token as long as it's valid, starting with t. +// When its cached token is invalid, a new token is obtained from src. +// +// ReuseTokenSource is typically used to reuse tokens from a cache +// (such as a file on disk) between runs of a program, rather than +// obtaining new tokens unnecessarily. +// +// The initial token t may be nil, in which case the TokenSource is +// wrapped in a caching version if it isn't one already. This also +// means it's always safe to wrap ReuseTokenSource around any other +// TokenSource without adverse effects. +func ReuseTokenSource(t *Token, src TokenSource) TokenSource { + // Don't wrap a reuseTokenSource in itself. That would work, + // but cause an unnecessary number of mutex operations. + // Just build the equivalent one. + if rt, ok := src.(*reuseTokenSource); ok { + if t == nil { + // Just use it directly. + return rt + } + src = rt.new + } + return &reuseTokenSource{ + t: t, + new: src, + } +} diff --git a/vendor/golang.org/x/oauth2/token.go b/vendor/golang.org/x/oauth2/token.go new file mode 100644 index 00000000..7a3167f1 --- /dev/null +++ b/vendor/golang.org/x/oauth2/token.go @@ -0,0 +1,158 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package oauth2 + +import ( + "net/http" + "net/url" + "strconv" + "strings" + "time" + + "golang.org/x/net/context" + "golang.org/x/oauth2/internal" +) + +// expiryDelta determines how earlier a token should be considered +// expired than its actual expiration time. It is used to avoid late +// expirations due to client-server time mismatches. +const expiryDelta = 10 * time.Second + +// Token represents the crendentials used to authorize +// the requests to access protected resources on the OAuth 2.0 +// provider's backend. +// +// Most users of this package should not access fields of Token +// directly. They're exported mostly for use by related packages +// implementing derivative OAuth2 flows. +type Token struct { + // AccessToken is the token that authorizes and authenticates + // the requests. + AccessToken string `json:"access_token"` + + // TokenType is the type of token. + // The Type method returns either this or "Bearer", the default. + TokenType string `json:"token_type,omitempty"` + + // RefreshToken is a token that's used by the application + // (as opposed to the user) to refresh the access token + // if it expires. + RefreshToken string `json:"refresh_token,omitempty"` + + // Expiry is the optional expiration time of the access token. + // + // If zero, TokenSource implementations will reuse the same + // token forever and RefreshToken or equivalent + // mechanisms for that TokenSource will not be used. + Expiry time.Time `json:"expiry,omitempty"` + + // raw optionally contains extra metadata from the server + // when updating a token. + raw interface{} +} + +// Type returns t.TokenType if non-empty, else "Bearer". +func (t *Token) Type() string { + if strings.EqualFold(t.TokenType, "bearer") { + return "Bearer" + } + if strings.EqualFold(t.TokenType, "mac") { + return "MAC" + } + if strings.EqualFold(t.TokenType, "basic") { + return "Basic" + } + if t.TokenType != "" { + return t.TokenType + } + return "Bearer" +} + +// SetAuthHeader sets the Authorization header to r using the access +// token in t. +// +// This method is unnecessary when using Transport or an HTTP Client +// returned by this package. +func (t *Token) SetAuthHeader(r *http.Request) { + r.Header.Set("Authorization", t.Type()+" "+t.AccessToken) +} + +// WithExtra returns a new Token that's a clone of t, but using the +// provided raw extra map. This is only intended for use by packages +// implementing derivative OAuth2 flows. +func (t *Token) WithExtra(extra interface{}) *Token { + t2 := new(Token) + *t2 = *t + t2.raw = extra + return t2 +} + +// Extra returns an extra field. +// Extra fields are key-value pairs returned by the server as a +// part of the token retrieval response. +func (t *Token) Extra(key string) interface{} { + if raw, ok := t.raw.(map[string]interface{}); ok { + return raw[key] + } + + vals, ok := t.raw.(url.Values) + if !ok { + return nil + } + + v := vals.Get(key) + switch s := strings.TrimSpace(v); strings.Count(s, ".") { + case 0: // Contains no "."; try to parse as int + if i, err := strconv.ParseInt(s, 10, 64); err == nil { + return i + } + case 1: // Contains a single "."; try to parse as float + if f, err := strconv.ParseFloat(s, 64); err == nil { + return f + } + } + + return v +} + +// expired reports whether the token is expired. +// t must be non-nil. +func (t *Token) expired() bool { + if t.Expiry.IsZero() { + return false + } + return t.Expiry.Add(-expiryDelta).Before(time.Now()) +} + +// Valid reports whether t is non-nil, has an AccessToken, and is not expired. +func (t *Token) Valid() bool { + return t != nil && t.AccessToken != "" && !t.expired() +} + +// tokenFromInternal maps an *internal.Token struct into +// a *Token struct. +func tokenFromInternal(t *internal.Token) *Token { + if t == nil { + return nil + } + return &Token{ + AccessToken: t.AccessToken, + TokenType: t.TokenType, + RefreshToken: t.RefreshToken, + Expiry: t.Expiry, + raw: t.Raw, + } +} + +// retrieveToken takes a *Config and uses that to retrieve an *internal.Token. +// This token is then mapped from *internal.Token into an *oauth2.Token which is returned along +// with an error.. +func retrieveToken(ctx context.Context, c *Config, v url.Values) (*Token, error) { + tk, err := internal.RetrieveToken(ctx, c.ClientID, c.ClientSecret, c.Endpoint.TokenURL, v) + if err != nil { + return nil, err + } + return tokenFromInternal(tk), nil +} diff --git a/vendor/golang.org/x/oauth2/transport.go b/vendor/golang.org/x/oauth2/transport.go new file mode 100644 index 00000000..92ac7e25 --- /dev/null +++ b/vendor/golang.org/x/oauth2/transport.go @@ -0,0 +1,132 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package oauth2 + +import ( + "errors" + "io" + "net/http" + "sync" +) + +// Transport is an http.RoundTripper that makes OAuth 2.0 HTTP requests, +// wrapping a base RoundTripper and adding an Authorization header +// with a token from the supplied Sources. +// +// Transport is a low-level mechanism. Most code will use the +// higher-level Config.Client method instead. +type Transport struct { + // Source supplies the token to add to outgoing requests' + // Authorization headers. + Source TokenSource + + // Base is the base RoundTripper used to make HTTP requests. + // If nil, http.DefaultTransport is used. + Base http.RoundTripper + + mu sync.Mutex // guards modReq + modReq map[*http.Request]*http.Request // original -> modified +} + +// RoundTrip authorizes and authenticates the request with an +// access token. If no token exists or token is expired, +// tries to refresh/fetch a new token. +func (t *Transport) RoundTrip(req *http.Request) (*http.Response, error) { + if t.Source == nil { + return nil, errors.New("oauth2: Transport's Source is nil") + } + token, err := t.Source.Token() + if err != nil { + return nil, err + } + + req2 := cloneRequest(req) // per RoundTripper contract + token.SetAuthHeader(req2) + t.setModReq(req, req2) + res, err := t.base().RoundTrip(req2) + if err != nil { + t.setModReq(req, nil) + return nil, err + } + res.Body = &onEOFReader{ + rc: res.Body, + fn: func() { t.setModReq(req, nil) }, + } + return res, nil +} + +// CancelRequest cancels an in-flight request by closing its connection. +func (t *Transport) CancelRequest(req *http.Request) { + type canceler interface { + CancelRequest(*http.Request) + } + if cr, ok := t.base().(canceler); ok { + t.mu.Lock() + modReq := t.modReq[req] + delete(t.modReq, req) + t.mu.Unlock() + cr.CancelRequest(modReq) + } +} + +func (t *Transport) base() http.RoundTripper { + if t.Base != nil { + return t.Base + } + return http.DefaultTransport +} + +func (t *Transport) setModReq(orig, mod *http.Request) { + t.mu.Lock() + defer t.mu.Unlock() + if t.modReq == nil { + t.modReq = make(map[*http.Request]*http.Request) + } + if mod == nil { + delete(t.modReq, orig) + } else { + t.modReq[orig] = mod + } +} + +// cloneRequest returns a clone of the provided *http.Request. +// The clone is a shallow copy of the struct and its Header map. +func cloneRequest(r *http.Request) *http.Request { + // shallow copy of the struct + r2 := new(http.Request) + *r2 = *r + // deep copy of the Header + r2.Header = make(http.Header, len(r.Header)) + for k, s := range r.Header { + r2.Header[k] = append([]string(nil), s...) + } + return r2 +} + +type onEOFReader struct { + rc io.ReadCloser + fn func() +} + +func (r *onEOFReader) Read(p []byte) (n int, err error) { + n, err = r.rc.Read(p) + if err == io.EOF { + r.runFunc() + } + return +} + +func (r *onEOFReader) Close() error { + err := r.rc.Close() + r.runFunc() + return err +} + +func (r *onEOFReader) runFunc() { + if fn := r.fn; fn != nil { + fn() + r.fn = nil + } +} diff --git a/vendor/golang.org/x/sync/CONTRIBUTING.md b/vendor/golang.org/x/sync/CONTRIBUTING.md new file mode 100644 index 00000000..88dff59b --- /dev/null +++ b/vendor/golang.org/x/sync/CONTRIBUTING.md @@ -0,0 +1,31 @@ +# Contributing to Go + +Go is an open source project. + +It is the work of hundreds of contributors. We appreciate your help! + + +## Filing issues + +When [filing an issue](https://golang.org/issue/new), make sure to answer these five questions: + +1. What version of Go are you using (`go version`)? +2. What operating system and processor architecture are you using? +3. What did you do? +4. What did you expect to see? +5. What did you see instead? + +General questions should go to the [golang-nuts mailing list](https://groups.google.com/group/golang-nuts) instead of the issue tracker. +The gophers there will answer or ask you to file an issue if you've tripped over a bug. + +## Contributing code + +Please read the [Contribution Guidelines](https://golang.org/doc/contribute.html) +before sending patches. + +**We do not accept GitHub pull requests** +(we use [Gerrit](https://code.google.com/p/gerrit/) instead for code review). + +Unless otherwise noted, the Go source files are distributed under +the BSD-style license found in the LICENSE file. + diff --git a/vendor/golang.org/x/sync/README.md b/vendor/golang.org/x/sync/README.md new file mode 100644 index 00000000..1f8436cc --- /dev/null +++ b/vendor/golang.org/x/sync/README.md @@ -0,0 +1,18 @@ +# Go Sync + +This repository provides Go concurrency primitives in addition to the +ones provided by the language and "sync" and "sync/atomic" packages. + +## Download/Install + +The easiest way to install is to run `go get -u golang.org/x/sync`. You can +also manually git clone the repository to `$GOPATH/src/golang.org/x/sync`. + +## Report Issues / Send Patches + +This repository uses Gerrit for code changes. To learn how to submit changes to +this repository, see https://golang.org/doc/contribute.html. + +The main issue tracker for the sync repository is located at +https://github.com/golang/go/issues. Prefix your issue with "x/sync:" in the +subject line, so it is easy to find. diff --git a/vendor/golang.org/x/sync/codereview.cfg b/vendor/golang.org/x/sync/codereview.cfg new file mode 100644 index 00000000..3f8b14b6 --- /dev/null +++ b/vendor/golang.org/x/sync/codereview.cfg @@ -0,0 +1 @@ +issuerepo: golang/go diff --git a/vendor/golang.org/x/sys/.gitattributes b/vendor/golang.org/x/sys/.gitattributes new file mode 100644 index 00000000..d2f212e5 --- /dev/null +++ b/vendor/golang.org/x/sys/.gitattributes @@ -0,0 +1,10 @@ +# Treat all files in this repo as binary, with no git magic updating +# line endings. Windows users contributing to Go will need to use a +# modern version of git and editors capable of LF line endings. +# +# We'll prevent accidental CRLF line endings from entering the repo +# via the git-review gofmt checks. +# +# See golang.org/issue/9281 + +* -text diff --git a/vendor/golang.org/x/sys/.gitignore b/vendor/golang.org/x/sys/.gitignore new file mode 100644 index 00000000..8339fd61 --- /dev/null +++ b/vendor/golang.org/x/sys/.gitignore @@ -0,0 +1,2 @@ +# Add no patterns to .hgignore except for files generated by the build. +last-change diff --git a/vendor/golang.org/x/sys/CONTRIBUTING.md b/vendor/golang.org/x/sys/CONTRIBUTING.md new file mode 100644 index 00000000..88dff59b --- /dev/null +++ b/vendor/golang.org/x/sys/CONTRIBUTING.md @@ -0,0 +1,31 @@ +# Contributing to Go + +Go is an open source project. + +It is the work of hundreds of contributors. We appreciate your help! + + +## Filing issues + +When [filing an issue](https://golang.org/issue/new), make sure to answer these five questions: + +1. What version of Go are you using (`go version`)? +2. What operating system and processor architecture are you using? +3. What did you do? +4. What did you expect to see? +5. What did you see instead? + +General questions should go to the [golang-nuts mailing list](https://groups.google.com/group/golang-nuts) instead of the issue tracker. +The gophers there will answer or ask you to file an issue if you've tripped over a bug. + +## Contributing code + +Please read the [Contribution Guidelines](https://golang.org/doc/contribute.html) +before sending patches. + +**We do not accept GitHub pull requests** +(we use [Gerrit](https://code.google.com/p/gerrit/) instead for code review). + +Unless otherwise noted, the Go source files are distributed under +the BSD-style license found in the LICENSE file. + diff --git a/vendor/golang.org/x/sys/README.md b/vendor/golang.org/x/sys/README.md new file mode 100644 index 00000000..ef6c9e59 --- /dev/null +++ b/vendor/golang.org/x/sys/README.md @@ -0,0 +1,18 @@ +# sys + +This repository holds supplemental Go packages for low-level interactions with +the operating system. + +## Download/Install + +The easiest way to install is to run `go get -u golang.org/x/sys`. You can +also manually git clone the repository to `$GOPATH/src/golang.org/x/sys`. + +## Report Issues / Send Patches + +This repository uses Gerrit for code changes. To learn how to submit changes to +this repository, see https://golang.org/doc/contribute.html. + +The main issue tracker for the sys repository is located at +https://github.com/golang/go/issues. Prefix your issue with "x/sys:" in the +subject line, so it is easy to find. diff --git a/vendor/golang.org/x/sys/codereview.cfg b/vendor/golang.org/x/sys/codereview.cfg new file mode 100644 index 00000000..3f8b14b6 --- /dev/null +++ b/vendor/golang.org/x/sys/codereview.cfg @@ -0,0 +1 @@ +issuerepo: golang/go diff --git a/vendor/golang.org/x/sys/unix/mkall.sh b/vendor/golang.org/x/sys/unix/mkall.sh old mode 100644 new mode 100755 diff --git a/vendor/golang.org/x/sys/unix/mkerrors.sh b/vendor/golang.org/x/sys/unix/mkerrors.sh old mode 100644 new mode 100755 diff --git a/vendor/golang.org/x/sys/unix/mksyscall.pl b/vendor/golang.org/x/sys/unix/mksyscall.pl old mode 100644 new mode 100755 diff --git a/vendor/golang.org/x/sys/unix/mksyscall_solaris.pl b/vendor/golang.org/x/sys/unix/mksyscall_solaris.pl old mode 100644 new mode 100755 diff --git a/vendor/golang.org/x/sys/unix/mksysctl_openbsd.pl b/vendor/golang.org/x/sys/unix/mksysctl_openbsd.pl old mode 100644 new mode 100755 diff --git a/vendor/golang.org/x/sys/unix/mksysnum_darwin.pl b/vendor/golang.org/x/sys/unix/mksysnum_darwin.pl old mode 100644 new mode 100755 diff --git a/vendor/golang.org/x/sys/unix/mksysnum_dragonfly.pl b/vendor/golang.org/x/sys/unix/mksysnum_dragonfly.pl old mode 100644 new mode 100755 diff --git a/vendor/golang.org/x/sys/unix/mksysnum_freebsd.pl b/vendor/golang.org/x/sys/unix/mksysnum_freebsd.pl old mode 100644 new mode 100755 diff --git a/vendor/golang.org/x/sys/unix/mksysnum_netbsd.pl b/vendor/golang.org/x/sys/unix/mksysnum_netbsd.pl old mode 100644 new mode 100755 diff --git a/vendor/golang.org/x/sys/unix/mksysnum_openbsd.pl b/vendor/golang.org/x/sys/unix/mksysnum_openbsd.pl old mode 100644 new mode 100755 diff --git a/vendor/golang.org/x/text/.gitattributes b/vendor/golang.org/x/text/.gitattributes new file mode 100644 index 00000000..d2f212e5 --- /dev/null +++ b/vendor/golang.org/x/text/.gitattributes @@ -0,0 +1,10 @@ +# Treat all files in this repo as binary, with no git magic updating +# line endings. Windows users contributing to Go will need to use a +# modern version of git and editors capable of LF line endings. +# +# We'll prevent accidental CRLF line endings from entering the repo +# via the git-review gofmt checks. +# +# See golang.org/issue/9281 + +* -text diff --git a/vendor/golang.org/x/text/.gitignore b/vendor/golang.org/x/text/.gitignore new file mode 100644 index 00000000..b2de568b --- /dev/null +++ b/vendor/golang.org/x/text/.gitignore @@ -0,0 +1,6 @@ +# Add no patterns to .gitignore except for files generated by the build. +last-change +/DATA +# This file is rather large and the tests really only need to be run +# after generation. +/unicode/norm/data_test.go \ No newline at end of file diff --git a/vendor/golang.org/x/text/CONTRIBUTING.md b/vendor/golang.org/x/text/CONTRIBUTING.md new file mode 100644 index 00000000..88dff59b --- /dev/null +++ b/vendor/golang.org/x/text/CONTRIBUTING.md @@ -0,0 +1,31 @@ +# Contributing to Go + +Go is an open source project. + +It is the work of hundreds of contributors. We appreciate your help! + + +## Filing issues + +When [filing an issue](https://golang.org/issue/new), make sure to answer these five questions: + +1. What version of Go are you using (`go version`)? +2. What operating system and processor architecture are you using? +3. What did you do? +4. What did you expect to see? +5. What did you see instead? + +General questions should go to the [golang-nuts mailing list](https://groups.google.com/group/golang-nuts) instead of the issue tracker. +The gophers there will answer or ask you to file an issue if you've tripped over a bug. + +## Contributing code + +Please read the [Contribution Guidelines](https://golang.org/doc/contribute.html) +before sending patches. + +**We do not accept GitHub pull requests** +(we use [Gerrit](https://code.google.com/p/gerrit/) instead for code review). + +Unless otherwise noted, the Go source files are distributed under +the BSD-style license found in the LICENSE file. + diff --git a/vendor/golang.org/x/text/README b/vendor/golang.org/x/text/README new file mode 100644 index 00000000..4826fe8f --- /dev/null +++ b/vendor/golang.org/x/text/README @@ -0,0 +1,23 @@ +This repository holds supplementary Go libraries for text processing, many involving Unicode. + +To submit changes to this repository, see http://golang.org/doc/contribute.html. + +To generate the tables in this repository (except for the encoding tables), +run go generate from this directory. By default tables are generated for the +Unicode version in core and the CLDR version defined in +golang.org/x/text/unicode/cldr. + +Running go generate will as a side effect create a DATA subdirectory in this +directory which holds all files that are used as a source for generating the +tables. This directory will also serve as a cache. + +Run + + go test ./... + +from this directory to run all tests. Add the "-tags icu" flag to also run +ICU conformance tests (if available). This requires that you have the correct +ICU version installed on your system. + +TODO: +- updating unversioned source files. \ No newline at end of file diff --git a/vendor/golang.org/x/text/codereview.cfg b/vendor/golang.org/x/text/codereview.cfg new file mode 100644 index 00000000..3f8b14b6 --- /dev/null +++ b/vendor/golang.org/x/text/codereview.cfg @@ -0,0 +1 @@ +issuerepo: golang/go diff --git a/vendor/golang.org/x/text/internal/gen/code.go b/vendor/golang.org/x/text/internal/gen/code.go new file mode 100644 index 00000000..d7031b69 --- /dev/null +++ b/vendor/golang.org/x/text/internal/gen/code.go @@ -0,0 +1,351 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gen + +import ( + "bytes" + "encoding/gob" + "fmt" + "hash" + "hash/fnv" + "io" + "log" + "os" + "reflect" + "strings" + "unicode" + "unicode/utf8" +) + +// This file contains utilities for generating code. + +// TODO: other write methods like: +// - slices, maps, types, etc. + +// CodeWriter is a utility for writing structured code. It computes the content +// hash and size of written content. It ensures there are newlines between +// written code blocks. +type CodeWriter struct { + buf bytes.Buffer + Size int + Hash hash.Hash32 // content hash + gob *gob.Encoder + // For comments we skip the usual one-line separator if they are followed by + // a code block. + skipSep bool +} + +func (w *CodeWriter) Write(p []byte) (n int, err error) { + return w.buf.Write(p) +} + +// NewCodeWriter returns a new CodeWriter. +func NewCodeWriter() *CodeWriter { + h := fnv.New32() + return &CodeWriter{Hash: h, gob: gob.NewEncoder(h)} +} + +// WriteGoFile appends the buffer with the total size of all created structures +// and writes it as a Go file to the the given file with the given package name. +func (w *CodeWriter) WriteGoFile(filename, pkg string) { + f, err := os.Create(filename) + if err != nil { + log.Fatalf("Could not create file %s: %v", filename, err) + } + defer f.Close() + if _, err = w.WriteGo(f, pkg); err != nil { + log.Fatalf("Error writing file %s: %v", filename, err) + } +} + +// WriteGo appends the buffer with the total size of all created structures and +// writes it as a Go file to the the given writer with the given package name. +func (w *CodeWriter) WriteGo(out io.Writer, pkg string) (n int, err error) { + sz := w.Size + w.WriteComment("Total table size %d bytes (%dKiB); checksum: %X\n", sz, sz/1024, w.Hash.Sum32()) + defer w.buf.Reset() + return WriteGo(out, pkg, w.buf.Bytes()) +} + +func (w *CodeWriter) printf(f string, x ...interface{}) { + fmt.Fprintf(w, f, x...) +} + +func (w *CodeWriter) insertSep() { + if w.skipSep { + w.skipSep = false + return + } + // Use at least two newlines to ensure a blank space between the previous + // block. WriteGoFile will remove extraneous newlines. + w.printf("\n\n") +} + +// WriteComment writes a comment block. All line starts are prefixed with "//". +// Initial empty lines are gobbled. The indentation for the first line is +// stripped from consecutive lines. +func (w *CodeWriter) WriteComment(comment string, args ...interface{}) { + s := fmt.Sprintf(comment, args...) + s = strings.Trim(s, "\n") + + // Use at least two newlines to ensure a blank space between the previous + // block. WriteGoFile will remove extraneous newlines. + w.printf("\n\n// ") + w.skipSep = true + + // strip first indent level. + sep := "\n" + for ; len(s) > 0 && (s[0] == '\t' || s[0] == ' '); s = s[1:] { + sep += s[:1] + } + + strings.NewReplacer(sep, "\n// ", "\n", "\n// ").WriteString(w, s) + + w.printf("\n") +} + +func (w *CodeWriter) writeSizeInfo(size int) { + w.printf("// Size: %d bytes\n", size) +} + +// WriteConst writes a constant of the given name and value. +func (w *CodeWriter) WriteConst(name string, x interface{}) { + w.insertSep() + v := reflect.ValueOf(x) + + switch v.Type().Kind() { + case reflect.String: + w.printf("const %s %s = ", name, typeName(x)) + w.WriteString(v.String()) + w.printf("\n") + default: + w.printf("const %s = %#v\n", name, x) + } +} + +// WriteVar writes a variable of the given name and value. +func (w *CodeWriter) WriteVar(name string, x interface{}) { + w.insertSep() + v := reflect.ValueOf(x) + oldSize := w.Size + sz := int(v.Type().Size()) + w.Size += sz + + switch v.Type().Kind() { + case reflect.String: + w.printf("var %s %s = ", name, typeName(x)) + w.WriteString(v.String()) + case reflect.Struct: + w.gob.Encode(x) + fallthrough + case reflect.Slice, reflect.Array: + w.printf("var %s = ", name) + w.writeValue(v) + w.writeSizeInfo(w.Size - oldSize) + default: + w.printf("var %s %s = ", name, typeName(x)) + w.gob.Encode(x) + w.writeValue(v) + w.writeSizeInfo(w.Size - oldSize) + } + w.printf("\n") +} + +func (w *CodeWriter) writeValue(v reflect.Value) { + x := v.Interface() + switch v.Kind() { + case reflect.String: + w.WriteString(v.String()) + case reflect.Array: + // Don't double count: callers of WriteArray count on the size being + // added, so we need to discount it here. + w.Size -= int(v.Type().Size()) + w.writeSlice(x, true) + case reflect.Slice: + w.writeSlice(x, false) + case reflect.Struct: + w.printf("%s{\n", typeName(v.Interface())) + t := v.Type() + for i := 0; i < v.NumField(); i++ { + w.printf("%s: ", t.Field(i).Name) + w.writeValue(v.Field(i)) + w.printf(",\n") + } + w.printf("}") + default: + w.printf("%#v", x) + } +} + +// WriteString writes a string literal. +func (w *CodeWriter) WriteString(s string) { + s = strings.Replace(s, `\`, `\\`, -1) + io.WriteString(w.Hash, s) // content hash + w.Size += len(s) + + const maxInline = 40 + if len(s) <= maxInline { + w.printf("%q", s) + return + } + + // We will render the string as a multi-line string. + const maxWidth = 80 - 4 - len(`"`) - len(`" +`) + + // When starting on its own line, go fmt indents line 2+ an extra level. + n, max := maxWidth, maxWidth-4 + + // As per https://golang.org/issue/18078, the compiler has trouble + // compiling the concatenation of many strings, s0 + s1 + s2 + ... + sN, + // for large N. We insert redundant, explicit parentheses to work around + // that, lowering the N at any given step: (s0 + s1 + ... + s63) + (s64 + + // ... + s127) + etc + (etc + ... + sN). + explicitParens, extraComment := len(s) > 128*1024, "" + if explicitParens { + w.printf(`(`) + extraComment = "; the redundant, explicit parens are for https://golang.org/issue/18078" + } + + // Print "" +\n, if a string does not start on its own line. + b := w.buf.Bytes() + if p := len(bytes.TrimRight(b, " \t")); p > 0 && b[p-1] != '\n' { + w.printf("\"\" + // Size: %d bytes%s\n", len(s), extraComment) + n, max = maxWidth, maxWidth + } + + w.printf(`"`) + + for sz, p, nLines := 0, 0, 0; p < len(s); { + var r rune + r, sz = utf8.DecodeRuneInString(s[p:]) + out := s[p : p+sz] + chars := 1 + if !unicode.IsPrint(r) || r == utf8.RuneError || r == '"' { + switch sz { + case 1: + out = fmt.Sprintf("\\x%02x", s[p]) + case 2, 3: + out = fmt.Sprintf("\\u%04x", r) + case 4: + out = fmt.Sprintf("\\U%08x", r) + } + chars = len(out) + } + if n -= chars; n < 0 { + nLines++ + if explicitParens && nLines&63 == 63 { + w.printf("\") + (\"") + } + w.printf("\" +\n\"") + n = max - len(out) + } + w.printf("%s", out) + p += sz + } + w.printf(`"`) + if explicitParens { + w.printf(`)`) + } +} + +// WriteSlice writes a slice value. +func (w *CodeWriter) WriteSlice(x interface{}) { + w.writeSlice(x, false) +} + +// WriteArray writes an array value. +func (w *CodeWriter) WriteArray(x interface{}) { + w.writeSlice(x, true) +} + +func (w *CodeWriter) writeSlice(x interface{}, isArray bool) { + v := reflect.ValueOf(x) + w.gob.Encode(v.Len()) + w.Size += v.Len() * int(v.Type().Elem().Size()) + name := typeName(x) + if isArray { + name = fmt.Sprintf("[%d]%s", v.Len(), name[strings.Index(name, "]")+1:]) + } + if isArray { + w.printf("%s{\n", name) + } else { + w.printf("%s{ // %d elements\n", name, v.Len()) + } + + switch kind := v.Type().Elem().Kind(); kind { + case reflect.String: + for _, s := range x.([]string) { + w.WriteString(s) + w.printf(",\n") + } + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, + reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + // nLine and nBlock are the number of elements per line and block. + nLine, nBlock, format := 8, 64, "%d," + switch kind { + case reflect.Uint8: + format = "%#02x," + case reflect.Uint16: + format = "%#04x," + case reflect.Uint32: + nLine, nBlock, format = 4, 32, "%#08x," + case reflect.Uint, reflect.Uint64: + nLine, nBlock, format = 4, 32, "%#016x," + case reflect.Int8: + nLine = 16 + } + n := nLine + for i := 0; i < v.Len(); i++ { + if i%nBlock == 0 && v.Len() > nBlock { + w.printf("// Entry %X - %X\n", i, i+nBlock-1) + } + x := v.Index(i).Interface() + w.gob.Encode(x) + w.printf(format, x) + if n--; n == 0 { + n = nLine + w.printf("\n") + } + } + w.printf("\n") + case reflect.Struct: + zero := reflect.Zero(v.Type().Elem()).Interface() + for i := 0; i < v.Len(); i++ { + x := v.Index(i).Interface() + w.gob.EncodeValue(v) + if !reflect.DeepEqual(zero, x) { + line := fmt.Sprintf("%#v,\n", x) + line = line[strings.IndexByte(line, '{'):] + w.printf("%d: ", i) + w.printf(line) + } + } + case reflect.Array: + for i := 0; i < v.Len(); i++ { + w.printf("%d: %#v,\n", i, v.Index(i).Interface()) + } + default: + panic("gen: slice elem type not supported") + } + w.printf("}") +} + +// WriteType writes a definition of the type of the given value and returns the +// type name. +func (w *CodeWriter) WriteType(x interface{}) string { + t := reflect.TypeOf(x) + w.printf("type %s struct {\n", t.Name()) + for i := 0; i < t.NumField(); i++ { + w.printf("\t%s %s\n", t.Field(i).Name, t.Field(i).Type) + } + w.printf("}\n") + return t.Name() +} + +// typeName returns the name of the go type of x. +func typeName(x interface{}) string { + t := reflect.ValueOf(x).Type() + return strings.Replace(fmt.Sprint(t), "main.", "", 1) +} diff --git a/vendor/golang.org/x/text/internal/gen/gen.go b/vendor/golang.org/x/text/internal/gen/gen.go new file mode 100644 index 00000000..2acb0355 --- /dev/null +++ b/vendor/golang.org/x/text/internal/gen/gen.go @@ -0,0 +1,281 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package gen contains common code for the various code generation tools in the +// text repository. Its usage ensures consistency between tools. +// +// This package defines command line flags that are common to most generation +// tools. The flags allow for specifying specific Unicode and CLDR versions +// in the public Unicode data repository (http://www.unicode.org/Public). +// +// A local Unicode data mirror can be set through the flag -local or the +// environment variable UNICODE_DIR. The former takes precedence. The local +// directory should follow the same structure as the public repository. +// +// IANA data can also optionally be mirrored by putting it in the iana directory +// rooted at the top of the local mirror. Beware, though, that IANA data is not +// versioned. So it is up to the developer to use the right version. +package gen // import "golang.org/x/text/internal/gen" + +import ( + "bytes" + "flag" + "fmt" + "go/build" + "go/format" + "io" + "io/ioutil" + "log" + "net/http" + "os" + "path" + "path/filepath" + "sync" + "unicode" + + "golang.org/x/text/unicode/cldr" +) + +var ( + url = flag.String("url", + "http://www.unicode.org/Public", + "URL of Unicode database directory") + iana = flag.String("iana", + "http://www.iana.org", + "URL of the IANA repository") + unicodeVersion = flag.String("unicode", + getEnv("UNICODE_VERSION", unicode.Version), + "unicode version to use") + cldrVersion = flag.String("cldr", + getEnv("CLDR_VERSION", cldr.Version), + "cldr version to use") +) + +func getEnv(name, def string) string { + if v := os.Getenv(name); v != "" { + return v + } + return def +} + +// Init performs common initialization for a gen command. It parses the flags +// and sets up the standard logging parameters. +func Init() { + log.SetPrefix("") + log.SetFlags(log.Lshortfile) + flag.Parse() +} + +const header = `// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. + +package %s + +` + +// UnicodeVersion reports the requested Unicode version. +func UnicodeVersion() string { + return *unicodeVersion +} + +// UnicodeVersion reports the requested CLDR version. +func CLDRVersion() string { + return *cldrVersion +} + +// IsLocal reports whether data files are available locally. +func IsLocal() bool { + dir, err := localReadmeFile() + if err != nil { + return false + } + if _, err = os.Stat(dir); err != nil { + return false + } + return true +} + +// OpenUCDFile opens the requested UCD file. The file is specified relative to +// the public Unicode root directory. It will call log.Fatal if there are any +// errors. +func OpenUCDFile(file string) io.ReadCloser { + return openUnicode(path.Join(*unicodeVersion, "ucd", file)) +} + +// OpenCLDRCoreZip opens the CLDR core zip file. It will call log.Fatal if there +// are any errors. +func OpenCLDRCoreZip() io.ReadCloser { + return OpenUnicodeFile("cldr", *cldrVersion, "core.zip") +} + +// OpenUnicodeFile opens the requested file of the requested category from the +// root of the Unicode data archive. The file is specified relative to the +// public Unicode root directory. If version is "", it will use the default +// Unicode version. It will call log.Fatal if there are any errors. +func OpenUnicodeFile(category, version, file string) io.ReadCloser { + if version == "" { + version = UnicodeVersion() + } + return openUnicode(path.Join(category, version, file)) +} + +// OpenIANAFile opens the requested IANA file. The file is specified relative +// to the IANA root, which is typically either http://www.iana.org or the +// iana directory in the local mirror. It will call log.Fatal if there are any +// errors. +func OpenIANAFile(path string) io.ReadCloser { + return Open(*iana, "iana", path) +} + +var ( + dirMutex sync.Mutex + localDir string +) + +const permissions = 0755 + +func localReadmeFile() (string, error) { + p, err := build.Import("golang.org/x/text", "", build.FindOnly) + if err != nil { + return "", fmt.Errorf("Could not locate package: %v", err) + } + return filepath.Join(p.Dir, "DATA", "README"), nil +} + +func getLocalDir() string { + dirMutex.Lock() + defer dirMutex.Unlock() + + readme, err := localReadmeFile() + if err != nil { + log.Fatal(err) + } + dir := filepath.Dir(readme) + if _, err := os.Stat(readme); err != nil { + if err := os.MkdirAll(dir, permissions); err != nil { + log.Fatalf("Could not create directory: %v", err) + } + ioutil.WriteFile(readme, []byte(readmeTxt), permissions) + } + return dir +} + +const readmeTxt = `Generated by golang.org/x/text/internal/gen. DO NOT EDIT. + +This directory contains downloaded files used to generate the various tables +in the golang.org/x/text subrepo. + +Note that the language subtag repo (iana/assignments/language-subtag-registry) +and all other times in the iana subdirectory are not versioned and will need +to be periodically manually updated. The easiest way to do this is to remove +the entire iana directory. This is mostly of concern when updating the language +package. +` + +// Open opens subdir/path if a local directory is specified and the file exists, +// where subdir is a directory relative to the local root, or fetches it from +// urlRoot/path otherwise. It will call log.Fatal if there are any errors. +func Open(urlRoot, subdir, path string) io.ReadCloser { + file := filepath.Join(getLocalDir(), subdir, filepath.FromSlash(path)) + return open(file, urlRoot, path) +} + +func openUnicode(path string) io.ReadCloser { + file := filepath.Join(getLocalDir(), filepath.FromSlash(path)) + return open(file, *url, path) +} + +// TODO: automatically periodically update non-versioned files. + +func open(file, urlRoot, path string) io.ReadCloser { + if f, err := os.Open(file); err == nil { + return f + } + r := get(urlRoot, path) + defer r.Close() + b, err := ioutil.ReadAll(r) + if err != nil { + log.Fatalf("Could not download file: %v", err) + } + os.MkdirAll(filepath.Dir(file), permissions) + if err := ioutil.WriteFile(file, b, permissions); err != nil { + log.Fatalf("Could not create file: %v", err) + } + return ioutil.NopCloser(bytes.NewReader(b)) +} + +func get(root, path string) io.ReadCloser { + url := root + "/" + path + fmt.Printf("Fetching %s...", url) + defer fmt.Println(" done.") + resp, err := http.Get(url) + if err != nil { + log.Fatalf("HTTP GET: %v", err) + } + if resp.StatusCode != 200 { + log.Fatalf("Bad GET status for %q: %q", url, resp.Status) + } + return resp.Body +} + +// TODO: use Write*Version in all applicable packages. + +// WriteUnicodeVersion writes a constant for the Unicode version from which the +// tables are generated. +func WriteUnicodeVersion(w io.Writer) { + fmt.Fprintf(w, "// UnicodeVersion is the Unicode version from which the tables in this package are derived.\n") + fmt.Fprintf(w, "const UnicodeVersion = %q\n\n", UnicodeVersion()) +} + +// WriteCLDRVersion writes a constant for the CLDR version from which the +// tables are generated. +func WriteCLDRVersion(w io.Writer) { + fmt.Fprintf(w, "// CLDRVersion is the CLDR version from which the tables in this package are derived.\n") + fmt.Fprintf(w, "const CLDRVersion = %q\n\n", CLDRVersion()) +} + +// WriteGoFile prepends a standard file comment and package statement to the +// given bytes, applies gofmt, and writes them to a file with the given name. +// It will call log.Fatal if there are any errors. +func WriteGoFile(filename, pkg string, b []byte) { + w, err := os.Create(filename) + if err != nil { + log.Fatalf("Could not create file %s: %v", filename, err) + } + defer w.Close() + if _, err = WriteGo(w, pkg, b); err != nil { + log.Fatalf("Error writing file %s: %v", filename, err) + } +} + +// WriteGo prepends a standard file comment and package statement to the given +// bytes, applies gofmt, and writes them to w. +func WriteGo(w io.Writer, pkg string, b []byte) (n int, err error) { + src := []byte(fmt.Sprintf(header, pkg)) + src = append(src, b...) + formatted, err := format.Source(src) + if err != nil { + // Print the generated code even in case of an error so that the + // returned error can be meaningfully interpreted. + n, _ = w.Write(src) + return n, err + } + return w.Write(formatted) +} + +// Repackage rewrites a Go file from belonging to package main to belonging to +// the given package. +func Repackage(inFile, outFile, pkg string) { + src, err := ioutil.ReadFile(inFile) + if err != nil { + log.Fatalf("reading %s: %v", inFile, err) + } + const toDelete = "package main\n\n" + i := bytes.Index(src, []byte(toDelete)) + if i < 0 { + log.Fatalf("Could not find %q in %s.", toDelete, inFile) + } + w := &bytes.Buffer{} + w.Write(src[i+len(toDelete):]) + WriteGoFile(outFile, pkg, w.Bytes()) +} diff --git a/vendor/golang.org/x/text/internal/triegen/compact.go b/vendor/golang.org/x/text/internal/triegen/compact.go new file mode 100644 index 00000000..397b975c --- /dev/null +++ b/vendor/golang.org/x/text/internal/triegen/compact.go @@ -0,0 +1,58 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package triegen + +// This file defines Compacter and its implementations. + +import "io" + +// A Compacter generates an alternative, more space-efficient way to store a +// trie value block. A trie value block holds all possible values for the last +// byte of a UTF-8 encoded rune. Excluding ASCII characters, a trie value block +// always has 64 values, as a UTF-8 encoding ends with a byte in [0x80, 0xC0). +type Compacter interface { + // Size returns whether the Compacter could encode the given block as well + // as its size in case it can. len(v) is always 64. + Size(v []uint64) (sz int, ok bool) + + // Store stores the block using the Compacter's compression method. + // It returns a handle with which the block can be retrieved. + // len(v) is always 64. + Store(v []uint64) uint32 + + // Print writes the data structures associated to the given store to w. + Print(w io.Writer) error + + // Handler returns the name of a function that gets called during trie + // lookup for blocks generated by the Compacter. The function should be of + // the form func (n uint32, b byte) uint64, where n is the index returned by + // the Compacter's Store method and b is the last byte of the UTF-8 + // encoding, where 0x80 <= b < 0xC0, for which to do the lookup in the + // block. + Handler() string +} + +// simpleCompacter is the default Compacter used by builder. It implements a +// normal trie block. +type simpleCompacter builder + +func (b *simpleCompacter) Size([]uint64) (sz int, ok bool) { + return blockSize * b.ValueSize, true +} + +func (b *simpleCompacter) Store(v []uint64) uint32 { + h := uint32(len(b.ValueBlocks) - blockOffset) + b.ValueBlocks = append(b.ValueBlocks, v) + return h +} + +func (b *simpleCompacter) Print(io.Writer) error { + // Structures are printed in print.go. + return nil +} + +func (b *simpleCompacter) Handler() string { + panic("Handler should be special-cased for this Compacter") +} diff --git a/vendor/golang.org/x/text/internal/triegen/print.go b/vendor/golang.org/x/text/internal/triegen/print.go new file mode 100644 index 00000000..8d9f120b --- /dev/null +++ b/vendor/golang.org/x/text/internal/triegen/print.go @@ -0,0 +1,251 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package triegen + +import ( + "bytes" + "fmt" + "io" + "strings" + "text/template" +) + +// print writes all the data structures as well as the code necessary to use the +// trie to w. +func (b *builder) print(w io.Writer) error { + b.Stats.NValueEntries = len(b.ValueBlocks) * blockSize + b.Stats.NValueBytes = len(b.ValueBlocks) * blockSize * b.ValueSize + b.Stats.NIndexEntries = len(b.IndexBlocks) * blockSize + b.Stats.NIndexBytes = len(b.IndexBlocks) * blockSize * b.IndexSize + b.Stats.NHandleBytes = len(b.Trie) * 2 * b.IndexSize + + // If we only have one root trie, all starter blocks are at position 0 and + // we can access the arrays directly. + if len(b.Trie) == 1 { + // At this point we cannot refer to the generated tables directly. + b.ASCIIBlock = b.Name + "Values" + b.StarterBlock = b.Name + "Index" + } else { + // Otherwise we need to have explicit starter indexes in the trie + // structure. + b.ASCIIBlock = "t.ascii" + b.StarterBlock = "t.utf8Start" + } + + b.SourceType = "[]byte" + if err := lookupGen.Execute(w, b); err != nil { + return err + } + + b.SourceType = "string" + if err := lookupGen.Execute(w, b); err != nil { + return err + } + + if err := trieGen.Execute(w, b); err != nil { + return err + } + + for _, c := range b.Compactions { + if err := c.c.Print(w); err != nil { + return err + } + } + + return nil +} + +func printValues(n int, values []uint64) string { + w := &bytes.Buffer{} + boff := n * blockSize + fmt.Fprintf(w, "\t// Block %#x, offset %#x", n, boff) + var newline bool + for i, v := range values { + if i%6 == 0 { + newline = true + } + if v != 0 { + if newline { + fmt.Fprintf(w, "\n") + newline = false + } + fmt.Fprintf(w, "\t%#02x:%#04x, ", boff+i, v) + } + } + return w.String() +} + +func printIndex(b *builder, nr int, n *node) string { + w := &bytes.Buffer{} + boff := nr * blockSize + fmt.Fprintf(w, "\t// Block %#x, offset %#x", nr, boff) + var newline bool + for i, c := range n.children { + if i%8 == 0 { + newline = true + } + if c != nil { + v := b.Compactions[c.index.compaction].Offset + uint32(c.index.index) + if v != 0 { + if newline { + fmt.Fprintf(w, "\n") + newline = false + } + fmt.Fprintf(w, "\t%#02x:%#02x, ", boff+i, v) + } + } + } + return w.String() +} + +var ( + trieGen = template.Must(template.New("trie").Funcs(template.FuncMap{ + "printValues": printValues, + "printIndex": printIndex, + "title": strings.Title, + "dec": func(x int) int { return x - 1 }, + "psize": func(n int) string { + return fmt.Sprintf("%d bytes (%.2f KiB)", n, float64(n)/1024) + }, + }).Parse(trieTemplate)) + lookupGen = template.Must(template.New("lookup").Parse(lookupTemplate)) +) + +// TODO: consider the return type of lookup. It could be uint64, even if the +// internal value type is smaller. We will have to verify this with the +// performance of unicode/norm, which is very sensitive to such changes. +const trieTemplate = `{{$b := .}}{{$multi := gt (len .Trie) 1}} +// {{.Name}}Trie. Total size: {{psize .Size}}. Checksum: {{printf "%08x" .Checksum}}. +type {{.Name}}Trie struct { {{if $multi}} + ascii []{{.ValueType}} // index for ASCII bytes + utf8Start []{{.IndexType}} // index for UTF-8 bytes >= 0xC0 +{{end}}} + +func new{{title .Name}}Trie(i int) *{{.Name}}Trie { {{if $multi}} + h := {{.Name}}TrieHandles[i] + return &{{.Name}}Trie{ {{.Name}}Values[uint32(h.ascii)<<6:], {{.Name}}Index[uint32(h.multi)<<6:] } +} + +type {{.Name}}TrieHandle struct { + ascii, multi {{.IndexType}} +} + +// {{.Name}}TrieHandles: {{len .Trie}} handles, {{.Stats.NHandleBytes}} bytes +var {{.Name}}TrieHandles = [{{len .Trie}}]{{.Name}}TrieHandle{ +{{range .Trie}} { {{.ASCIIIndex}}, {{.StarterIndex}} }, // {{printf "%08x" .Checksum}}: {{.Name}} +{{end}}}{{else}} + return &{{.Name}}Trie{} +} +{{end}} +// lookupValue determines the type of block n and looks up the value for b. +func (t *{{.Name}}Trie) lookupValue(n uint32, b byte) {{.ValueType}}{{$last := dec (len .Compactions)}} { + switch { {{range $i, $c := .Compactions}} + {{if eq $i $last}}default{{else}}case n < {{$c.Cutoff}}{{end}}:{{if ne $i 0}} + n -= {{$c.Offset}}{{end}} + return {{print $b.ValueType}}({{$c.Handler}}){{end}} + } +} + +// {{.Name}}Values: {{len .ValueBlocks}} blocks, {{.Stats.NValueEntries}} entries, {{.Stats.NValueBytes}} bytes +// The third block is the zero block. +var {{.Name}}Values = [{{.Stats.NValueEntries}}]{{.ValueType}} { +{{range $i, $v := .ValueBlocks}}{{printValues $i $v}} +{{end}}} + +// {{.Name}}Index: {{len .IndexBlocks}} blocks, {{.Stats.NIndexEntries}} entries, {{.Stats.NIndexBytes}} bytes +// Block 0 is the zero block. +var {{.Name}}Index = [{{.Stats.NIndexEntries}}]{{.IndexType}} { +{{range $i, $v := .IndexBlocks}}{{printIndex $b $i $v}} +{{end}}} +` + +// TODO: consider allowing zero-length strings after evaluating performance with +// unicode/norm. +const lookupTemplate = ` +// lookup{{if eq .SourceType "string"}}String{{end}} returns the trie value for the first UTF-8 encoding in s and +// the width in bytes of this encoding. The size will be 0 if s does not +// hold enough bytes to complete the encoding. len(s) must be greater than 0. +func (t *{{.Name}}Trie) lookup{{if eq .SourceType "string"}}String{{end}}(s {{.SourceType}}) (v {{.ValueType}}, sz int) { + c0 := s[0] + switch { + case c0 < 0x80: // is ASCII + return {{.ASCIIBlock}}[c0], 1 + case c0 < 0xC2: + return 0, 1 // Illegal UTF-8: not a starter, not ASCII. + case c0 < 0xE0: // 2-byte UTF-8 + if len(s) < 2 { + return 0, 0 + } + i := {{.StarterBlock}}[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c1), 2 + case c0 < 0xF0: // 3-byte UTF-8 + if len(s) < 3 { + return 0, 0 + } + i := {{.StarterBlock}}[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = {{.Name}}Index[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c2), 3 + case c0 < 0xF8: // 4-byte UTF-8 + if len(s) < 4 { + return 0, 0 + } + i := {{.StarterBlock}}[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = {{.Name}}Index[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + o = uint32(i)<<6 + uint32(c2) + i = {{.Name}}Index[o] + c3 := s[3] + if c3 < 0x80 || 0xC0 <= c3 { + return 0, 3 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c3), 4 + } + // Illegal rune + return 0, 1 +} + +// lookup{{if eq .SourceType "string"}}String{{end}}Unsafe returns the trie value for the first UTF-8 encoding in s. +// s must start with a full and valid UTF-8 encoded rune. +func (t *{{.Name}}Trie) lookup{{if eq .SourceType "string"}}String{{end}}Unsafe(s {{.SourceType}}) {{.ValueType}} { + c0 := s[0] + if c0 < 0x80 { // is ASCII + return {{.ASCIIBlock}}[c0] + } + i := {{.StarterBlock}}[c0] + if c0 < 0xE0 { // 2-byte UTF-8 + return t.lookupValue(uint32(i), s[1]) + } + i = {{.Name}}Index[uint32(i)<<6+uint32(s[1])] + if c0 < 0xF0 { // 3-byte UTF-8 + return t.lookupValue(uint32(i), s[2]) + } + i = {{.Name}}Index[uint32(i)<<6+uint32(s[2])] + if c0 < 0xF8 { // 4-byte UTF-8 + return t.lookupValue(uint32(i), s[3]) + } + return 0 +} +` diff --git a/vendor/golang.org/x/text/internal/triegen/triegen.go b/vendor/golang.org/x/text/internal/triegen/triegen.go new file mode 100644 index 00000000..adb01081 --- /dev/null +++ b/vendor/golang.org/x/text/internal/triegen/triegen.go @@ -0,0 +1,494 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package triegen implements a code generator for a trie for associating +// unsigned integer values with UTF-8 encoded runes. +// +// Many of the go.text packages use tries for storing per-rune information. A +// trie is especially useful if many of the runes have the same value. If this +// is the case, many blocks can be expected to be shared allowing for +// information on many runes to be stored in little space. +// +// As most of the lookups are done directly on []byte slices, the tries use the +// UTF-8 bytes directly for the lookup. This saves a conversion from UTF-8 to +// runes and contributes a little bit to better performance. It also naturally +// provides a fast path for ASCII. +// +// Space is also an issue. There are many code points defined in Unicode and as +// a result tables can get quite large. So every byte counts. The triegen +// package automatically chooses the smallest integer values to represent the +// tables. Compacters allow further compression of the trie by allowing for +// alternative representations of individual trie blocks. +// +// triegen allows generating multiple tries as a single structure. This is +// useful when, for example, one wants to generate tries for several languages +// that have a lot of values in common. Some existing libraries for +// internationalization store all per-language data as a dynamically loadable +// chunk. The go.text packages are designed with the assumption that the user +// typically wants to compile in support for all supported languages, in line +// with the approach common to Go to create a single standalone binary. The +// multi-root trie approach can give significant storage savings in this +// scenario. +// +// triegen generates both tables and code. The code is optimized to use the +// automatically chosen data types. The following code is generated for a Trie +// or multiple Tries named "foo": +// - type fooTrie +// The trie type. +// +// - func newFooTrie(x int) *fooTrie +// Trie constructor, where x is the index of the trie passed to Gen. +// +// - func (t *fooTrie) lookup(s []byte) (v uintX, sz int) +// The lookup method, where uintX is automatically chosen. +// +// - func lookupString, lookupUnsafe and lookupStringUnsafe +// Variants of the above. +// +// - var fooValues and fooIndex and any tables generated by Compacters. +// The core trie data. +// +// - var fooTrieHandles +// Indexes of starter blocks in case of multiple trie roots. +// +// It is recommended that users test the generated trie by checking the returned +// value for every rune. Such exhaustive tests are possible as the the number of +// runes in Unicode is limited. +package triegen // import "golang.org/x/text/internal/triegen" + +// TODO: Arguably, the internally optimized data types would not have to be +// exposed in the generated API. We could also investigate not generating the +// code, but using it through a package. We would have to investigate the impact +// on performance of making such change, though. For packages like unicode/norm, +// small changes like this could tank performance. + +import ( + "encoding/binary" + "fmt" + "hash/crc64" + "io" + "log" + "unicode/utf8" +) + +// builder builds a set of tries for associating values with runes. The set of +// tries can share common index and value blocks. +type builder struct { + Name string + + // ValueType is the type of the trie values looked up. + ValueType string + + // ValueSize is the byte size of the ValueType. + ValueSize int + + // IndexType is the type of trie index values used for all UTF-8 bytes of + // a rune except the last one. + IndexType string + + // IndexSize is the byte size of the IndexType. + IndexSize int + + // SourceType is used when generating the lookup functions. If the user + // requests StringSupport, all lookup functions will be generated for + // string input as well. + SourceType string + + Trie []*Trie + + IndexBlocks []*node + ValueBlocks [][]uint64 + Compactions []compaction + Checksum uint64 + + ASCIIBlock string + StarterBlock string + + indexBlockIdx map[uint64]int + valueBlockIdx map[uint64]nodeIndex + asciiBlockIdx map[uint64]int + + // Stats are used to fill out the template. + Stats struct { + NValueEntries int + NValueBytes int + NIndexEntries int + NIndexBytes int + NHandleBytes int + } + + err error +} + +// A nodeIndex encodes the index of a node, which is defined by the compaction +// which stores it and an index within the compaction. For internal nodes, the +// compaction is always 0. +type nodeIndex struct { + compaction int + index int +} + +// compaction keeps track of stats used for the compaction. +type compaction struct { + c Compacter + blocks []*node + maxHandle uint32 + totalSize int + + // Used by template-based generator and thus exported. + Cutoff uint32 + Offset uint32 + Handler string +} + +func (b *builder) setError(err error) { + if b.err == nil { + b.err = err + } +} + +// An Option can be passed to Gen. +type Option func(b *builder) error + +// Compact configures the trie generator to use the given Compacter. +func Compact(c Compacter) Option { + return func(b *builder) error { + b.Compactions = append(b.Compactions, compaction{ + c: c, + Handler: c.Handler() + "(n, b)"}) + return nil + } +} + +// Gen writes Go code for a shared trie lookup structure to w for the given +// Tries. The generated trie type will be called nameTrie. newNameTrie(x) will +// return the *nameTrie for tries[x]. A value can be looked up by using one of +// the various lookup methods defined on nameTrie. It returns the table size of +// the generated trie. +func Gen(w io.Writer, name string, tries []*Trie, opts ...Option) (sz int, err error) { + // The index contains two dummy blocks, followed by the zero block. The zero + // block is at offset 0x80, so that the offset for the zero block for + // continuation bytes is 0. + b := &builder{ + Name: name, + Trie: tries, + IndexBlocks: []*node{{}, {}, {}}, + Compactions: []compaction{{ + Handler: name + "Values[n<<6+uint32(b)]", + }}, + // The 0 key in indexBlockIdx and valueBlockIdx is the hash of the zero + // block. + indexBlockIdx: map[uint64]int{0: 0}, + valueBlockIdx: map[uint64]nodeIndex{0: {}}, + asciiBlockIdx: map[uint64]int{}, + } + b.Compactions[0].c = (*simpleCompacter)(b) + + for _, f := range opts { + if err := f(b); err != nil { + return 0, err + } + } + b.build() + if b.err != nil { + return 0, b.err + } + if err = b.print(w); err != nil { + return 0, err + } + return b.Size(), nil +} + +// A Trie represents a single root node of a trie. A builder may build several +// overlapping tries at once. +type Trie struct { + root *node + + hiddenTrie +} + +// hiddenTrie contains values we want to be visible to the template generator, +// but hidden from the API documentation. +type hiddenTrie struct { + Name string + Checksum uint64 + ASCIIIndex int + StarterIndex int +} + +// NewTrie returns a new trie root. +func NewTrie(name string) *Trie { + return &Trie{ + &node{ + children: make([]*node, blockSize), + values: make([]uint64, utf8.RuneSelf), + }, + hiddenTrie{Name: name}, + } +} + +// Gen is a convenience wrapper around the Gen func passing t as the only trie +// and uses the name passed to NewTrie. It returns the size of the generated +// tables. +func (t *Trie) Gen(w io.Writer, opts ...Option) (sz int, err error) { + return Gen(w, t.Name, []*Trie{t}, opts...) +} + +// node is a node of the intermediate trie structure. +type node struct { + // children holds this node's children. It is always of length 64. + // A child node may be nil. + children []*node + + // values contains the values of this node. If it is non-nil, this node is + // either a root or leaf node: + // For root nodes, len(values) == 128 and it maps the bytes in [0x00, 0x7F]. + // For leaf nodes, len(values) == 64 and it maps the bytes in [0x80, 0xBF]. + values []uint64 + + index nodeIndex +} + +// Insert associates value with the given rune. Insert will panic if a non-zero +// value is passed for an invalid rune. +func (t *Trie) Insert(r rune, value uint64) { + if value == 0 { + return + } + s := string(r) + if []rune(s)[0] != r && value != 0 { + // Note: The UCD tables will always assign what amounts to a zero value + // to a surrogate. Allowing a zero value for an illegal rune allows + // users to iterate over [0..MaxRune] without having to explicitly + // exclude surrogates, which would be tedious. + panic(fmt.Sprintf("triegen: non-zero value for invalid rune %U", r)) + } + if len(s) == 1 { + // It is a root node value (ASCII). + t.root.values[s[0]] = value + return + } + + n := t.root + for ; len(s) > 1; s = s[1:] { + if n.children == nil { + n.children = make([]*node, blockSize) + } + p := s[0] % blockSize + c := n.children[p] + if c == nil { + c = &node{} + n.children[p] = c + } + if len(s) > 2 && c.values != nil { + log.Fatalf("triegen: insert(%U): found internal node with values", r) + } + n = c + } + if n.values == nil { + n.values = make([]uint64, blockSize) + } + if n.children != nil { + log.Fatalf("triegen: insert(%U): found leaf node that also has child nodes", r) + } + n.values[s[0]-0x80] = value +} + +// Size returns the number of bytes the generated trie will take to store. It +// needs to be exported as it is used in the templates. +func (b *builder) Size() int { + // Index blocks. + sz := len(b.IndexBlocks) * blockSize * b.IndexSize + + // Skip the first compaction, which represents the normal value blocks, as + // its totalSize does not account for the ASCII blocks, which are managed + // separately. + sz += len(b.ValueBlocks) * blockSize * b.ValueSize + for _, c := range b.Compactions[1:] { + sz += c.totalSize + } + + // TODO: this computation does not account for the fixed overhead of a using + // a compaction, either code or data. As for data, though, the typical + // overhead of data is in the order of bytes (2 bytes for cases). Further, + // the savings of using a compaction should anyway be substantial for it to + // be worth it. + + // For multi-root tries, we also need to account for the handles. + if len(b.Trie) > 1 { + sz += 2 * b.IndexSize * len(b.Trie) + } + return sz +} + +func (b *builder) build() { + // Compute the sizes of the values. + var vmax uint64 + for _, t := range b.Trie { + vmax = maxValue(t.root, vmax) + } + b.ValueType, b.ValueSize = getIntType(vmax) + + // Compute all block allocations. + // TODO: first compute the ASCII blocks for all tries and then the other + // nodes. ASCII blocks are more restricted in placement, as they require two + // blocks to be placed consecutively. Processing them first may improve + // sharing (at least one zero block can be expected to be saved.) + for _, t := range b.Trie { + b.Checksum += b.buildTrie(t) + } + + // Compute the offsets for all the Compacters. + offset := uint32(0) + for i := range b.Compactions { + c := &b.Compactions[i] + c.Offset = offset + offset += c.maxHandle + 1 + c.Cutoff = offset + } + + // Compute the sizes of indexes. + // TODO: different byte positions could have different sizes. So far we have + // not found a case where this is beneficial. + imax := uint64(b.Compactions[len(b.Compactions)-1].Cutoff) + for _, ib := range b.IndexBlocks { + if x := uint64(ib.index.index); x > imax { + imax = x + } + } + b.IndexType, b.IndexSize = getIntType(imax) +} + +func maxValue(n *node, max uint64) uint64 { + if n == nil { + return max + } + for _, c := range n.children { + max = maxValue(c, max) + } + for _, v := range n.values { + if max < v { + max = v + } + } + return max +} + +func getIntType(v uint64) (string, int) { + switch { + case v < 1<<8: + return "uint8", 1 + case v < 1<<16: + return "uint16", 2 + case v < 1<<32: + return "uint32", 4 + } + return "uint64", 8 +} + +const ( + blockSize = 64 + + // Subtract two blocks to offset 0x80, the first continuation byte. + blockOffset = 2 + + // Subtract three blocks to offset 0xC0, the first non-ASCII starter. + rootBlockOffset = 3 +) + +var crcTable = crc64.MakeTable(crc64.ISO) + +func (b *builder) buildTrie(t *Trie) uint64 { + n := t.root + + // Get the ASCII offset. For the first trie, the ASCII block will be at + // position 0. + hasher := crc64.New(crcTable) + binary.Write(hasher, binary.BigEndian, n.values) + hash := hasher.Sum64() + + v, ok := b.asciiBlockIdx[hash] + if !ok { + v = len(b.ValueBlocks) + b.asciiBlockIdx[hash] = v + + b.ValueBlocks = append(b.ValueBlocks, n.values[:blockSize], n.values[blockSize:]) + if v == 0 { + // Add the zero block at position 2 so that it will be assigned a + // zero reference in the lookup blocks. + // TODO: always do this? This would allow us to remove a check from + // the trie lookup, but at the expense of extra space. Analyze + // performance for unicode/norm. + b.ValueBlocks = append(b.ValueBlocks, make([]uint64, blockSize)) + } + } + t.ASCIIIndex = v + + // Compute remaining offsets. + t.Checksum = b.computeOffsets(n, true) + // We already subtracted the normal blockOffset from the index. Subtract the + // difference for starter bytes. + t.StarterIndex = n.index.index - (rootBlockOffset - blockOffset) + return t.Checksum +} + +func (b *builder) computeOffsets(n *node, root bool) uint64 { + // For the first trie, the root lookup block will be at position 3, which is + // the offset for UTF-8 non-ASCII starter bytes. + first := len(b.IndexBlocks) == rootBlockOffset + if first { + b.IndexBlocks = append(b.IndexBlocks, n) + } + + // We special-case the cases where all values recursively are 0. This allows + // for the use of a zero block to which all such values can be directed. + hash := uint64(0) + if n.children != nil || n.values != nil { + hasher := crc64.New(crcTable) + for _, c := range n.children { + var v uint64 + if c != nil { + v = b.computeOffsets(c, false) + } + binary.Write(hasher, binary.BigEndian, v) + } + binary.Write(hasher, binary.BigEndian, n.values) + hash = hasher.Sum64() + } + + if first { + b.indexBlockIdx[hash] = rootBlockOffset - blockOffset + } + + // Compacters don't apply to internal nodes. + if n.children != nil { + v, ok := b.indexBlockIdx[hash] + if !ok { + v = len(b.IndexBlocks) - blockOffset + b.IndexBlocks = append(b.IndexBlocks, n) + b.indexBlockIdx[hash] = v + } + n.index = nodeIndex{0, v} + } else { + h, ok := b.valueBlockIdx[hash] + if !ok { + bestI, bestSize := 0, blockSize*b.ValueSize + for i, c := range b.Compactions[1:] { + if sz, ok := c.c.Size(n.values); ok && bestSize > sz { + bestI, bestSize = i+1, sz + } + } + c := &b.Compactions[bestI] + c.totalSize += bestSize + v := c.c.Store(n.values) + if c.maxHandle < v { + c.maxHandle = v + } + h = nodeIndex{bestI, int(v)} + b.valueBlockIdx[hash] = h + } + n.index = h + } + return hash +} diff --git a/vendor/golang.org/x/text/internal/ucd/ucd.go b/vendor/golang.org/x/text/internal/ucd/ucd.go new file mode 100644 index 00000000..309e8d8b --- /dev/null +++ b/vendor/golang.org/x/text/internal/ucd/ucd.go @@ -0,0 +1,376 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package ucd provides a parser for Unicode Character Database files, the +// format of which is defined in http://www.unicode.org/reports/tr44/. See +// http://www.unicode.org/Public/UCD/latest/ucd/ for example files. +// +// It currently does not support substitutions of missing fields. +package ucd // import "golang.org/x/text/internal/ucd" + +import ( + "bufio" + "bytes" + "errors" + "io" + "log" + "regexp" + "strconv" + "strings" +) + +// UnicodeData.txt fields. +const ( + CodePoint = iota + Name + GeneralCategory + CanonicalCombiningClass + BidiClass + DecompMapping + DecimalValue + DigitValue + NumericValue + BidiMirrored + Unicode1Name + ISOComment + SimpleUppercaseMapping + SimpleLowercaseMapping + SimpleTitlecaseMapping +) + +// Parse calls f for each entry in the given reader of a UCD file. It will close +// the reader upon return. It will call log.Fatal if any error occurred. +// +// This implements the most common usage pattern of using Parser. +func Parse(r io.ReadCloser, f func(p *Parser)) { + defer r.Close() + + p := New(r) + for p.Next() { + f(p) + } + if err := p.Err(); err != nil { + r.Close() // os.Exit will cause defers not to be called. + log.Fatal(err) + } +} + +// An Option is used to configure a Parser. +type Option func(p *Parser) + +func keepRanges(p *Parser) { + p.keepRanges = true +} + +var ( + // KeepRanges prevents the expansion of ranges. The raw ranges can be + // obtained by calling Range(0) on the parser. + KeepRanges Option = keepRanges +) + +// The Part option register a handler for lines starting with a '@'. The text +// after a '@' is available as the first field. Comments are handled as usual. +func Part(f func(p *Parser)) Option { + return func(p *Parser) { + p.partHandler = f + } +} + +// The CommentHandler option passes comments that are on a line by itself to +// a given handler. +func CommentHandler(f func(s string)) Option { + return func(p *Parser) { + p.commentHandler = f + } +} + +// A Parser parses Unicode Character Database (UCD) files. +type Parser struct { + scanner *bufio.Scanner + + keepRanges bool // Don't expand rune ranges in field 0. + + err error + comment []byte + field [][]byte + // parsedRange is needed in case Range(0) is called more than once for one + // field. In some cases this requires scanning ahead. + parsedRange bool + rangeStart, rangeEnd rune + + partHandler func(p *Parser) + commentHandler func(s string) +} + +func (p *Parser) setError(err error) { + if p.err == nil { + p.err = err + } +} + +func (p *Parser) getField(i int) []byte { + if i >= len(p.field) { + return nil + } + return p.field[i] +} + +// Err returns a non-nil error if any error occurred during parsing. +func (p *Parser) Err() error { + return p.err +} + +// New returns a Parser for the given Reader. +func New(r io.Reader, o ...Option) *Parser { + p := &Parser{ + scanner: bufio.NewScanner(r), + } + for _, f := range o { + f(p) + } + return p +} + +// Next parses the next line in the file. It returns true if a line was parsed +// and false if it reached the end of the file. +func (p *Parser) Next() bool { + if !p.keepRanges && p.rangeStart < p.rangeEnd { + p.rangeStart++ + return true + } + p.comment = nil + p.field = p.field[:0] + p.parsedRange = false + + for p.scanner.Scan() { + b := p.scanner.Bytes() + if len(b) == 0 { + continue + } + if b[0] == '#' { + if p.commentHandler != nil { + p.commentHandler(strings.TrimSpace(string(b[1:]))) + } + continue + } + + // Parse line + if i := bytes.IndexByte(b, '#'); i != -1 { + p.comment = bytes.TrimSpace(b[i+1:]) + b = b[:i] + } + if b[0] == '@' { + if p.partHandler != nil { + p.field = append(p.field, bytes.TrimSpace(b[1:])) + p.partHandler(p) + p.field = p.field[:0] + } + p.comment = nil + continue + } + for { + i := bytes.IndexByte(b, ';') + if i == -1 { + p.field = append(p.field, bytes.TrimSpace(b)) + break + } + p.field = append(p.field, bytes.TrimSpace(b[:i])) + b = b[i+1:] + } + if !p.keepRanges { + p.rangeStart, p.rangeEnd = p.getRange(0) + } + return true + } + p.setError(p.scanner.Err()) + return false +} + +func parseRune(b []byte) (rune, error) { + if len(b) > 2 && b[0] == 'U' && b[1] == '+' { + b = b[2:] + } + x, err := strconv.ParseUint(string(b), 16, 32) + return rune(x), err +} + +func (p *Parser) parseRune(b []byte) rune { + x, err := parseRune(b) + p.setError(err) + return x +} + +// Rune parses and returns field i as a rune. +func (p *Parser) Rune(i int) rune { + if i > 0 || p.keepRanges { + return p.parseRune(p.getField(i)) + } + return p.rangeStart +} + +// Runes interprets and returns field i as a sequence of runes. +func (p *Parser) Runes(i int) (runes []rune) { + add := func(b []byte) { + if b = bytes.TrimSpace(b); len(b) > 0 { + runes = append(runes, p.parseRune(b)) + } + } + for b := p.getField(i); ; { + i := bytes.IndexByte(b, ' ') + if i == -1 { + add(b) + break + } + add(b[:i]) + b = b[i+1:] + } + return +} + +var ( + errIncorrectLegacyRange = errors.New("ucd: unmatched <* First>") + + // reRange matches one line of a legacy rune range. + reRange = regexp.MustCompile("^([0-9A-F]*);<([^,]*), ([^>]*)>(.*)$") +) + +// Range parses and returns field i as a rune range. A range is inclusive at +// both ends. If the field only has one rune, first and last will be identical. +// It supports the legacy format for ranges used in UnicodeData.txt. +func (p *Parser) Range(i int) (first, last rune) { + if !p.keepRanges { + return p.rangeStart, p.rangeStart + } + return p.getRange(i) +} + +func (p *Parser) getRange(i int) (first, last rune) { + b := p.getField(i) + if k := bytes.Index(b, []byte("..")); k != -1 { + return p.parseRune(b[:k]), p.parseRune(b[k+2:]) + } + // The first field may not be a rune, in which case we may ignore any error + // and set the range as 0..0. + x, err := parseRune(b) + if err != nil { + // Disable range parsing henceforth. This ensures that an error will be + // returned if the user subsequently will try to parse this field as + // a Rune. + p.keepRanges = true + } + // Special case for UnicodeData that was retained for backwards compatibility. + if i == 0 && len(p.field) > 1 && bytes.HasSuffix(p.field[1], []byte("First>")) { + if p.parsedRange { + return p.rangeStart, p.rangeEnd + } + mf := reRange.FindStringSubmatch(p.scanner.Text()) + if mf == nil || !p.scanner.Scan() { + p.setError(errIncorrectLegacyRange) + return x, x + } + // Using Bytes would be more efficient here, but Text is a lot easier + // and this is not a frequent case. + ml := reRange.FindStringSubmatch(p.scanner.Text()) + if ml == nil || mf[2] != ml[2] || ml[3] != "Last" || mf[4] != ml[4] { + p.setError(errIncorrectLegacyRange) + return x, x + } + p.rangeStart, p.rangeEnd = x, p.parseRune(p.scanner.Bytes()[:len(ml[1])]) + p.parsedRange = true + return p.rangeStart, p.rangeEnd + } + return x, x +} + +// bools recognizes all valid UCD boolean values. +var bools = map[string]bool{ + "": false, + "N": false, + "No": false, + "F": false, + "False": false, + "Y": true, + "Yes": true, + "T": true, + "True": true, +} + +// Bool parses and returns field i as a boolean value. +func (p *Parser) Bool(i int) bool { + b := p.getField(i) + for s, v := range bools { + if bstrEq(b, s) { + return v + } + } + p.setError(strconv.ErrSyntax) + return false +} + +// Int parses and returns field i as an integer value. +func (p *Parser) Int(i int) int { + x, err := strconv.ParseInt(string(p.getField(i)), 10, 64) + p.setError(err) + return int(x) +} + +// Uint parses and returns field i as an unsigned integer value. +func (p *Parser) Uint(i int) uint { + x, err := strconv.ParseUint(string(p.getField(i)), 10, 64) + p.setError(err) + return uint(x) +} + +// Float parses and returns field i as a decimal value. +func (p *Parser) Float(i int) float64 { + x, err := strconv.ParseFloat(string(p.getField(i)), 64) + p.setError(err) + return x +} + +// String parses and returns field i as a string value. +func (p *Parser) String(i int) string { + return string(p.getField(i)) +} + +// Strings parses and returns field i as a space-separated list of strings. +func (p *Parser) Strings(i int) []string { + ss := strings.Split(string(p.getField(i)), " ") + for i, s := range ss { + ss[i] = strings.TrimSpace(s) + } + return ss +} + +// Comment returns the comments for the current line. +func (p *Parser) Comment() string { + return string(p.comment) +} + +var errUndefinedEnum = errors.New("ucd: undefined enum value") + +// Enum interprets and returns field i as a value that must be one of the values +// in enum. +func (p *Parser) Enum(i int, enum ...string) string { + b := p.getField(i) + for _, s := range enum { + if bstrEq(b, s) { + return s + } + } + p.setError(errUndefinedEnum) + return "" +} + +func bstrEq(b []byte, s string) bool { + if len(b) != len(s) { + return false + } + for i, c := range b { + if c != s[i] { + return false + } + } + return true +} diff --git a/vendor/golang.org/x/text/unicode/cldr/base.go b/vendor/golang.org/x/text/unicode/cldr/base.go new file mode 100644 index 00000000..2382f4d6 --- /dev/null +++ b/vendor/golang.org/x/text/unicode/cldr/base.go @@ -0,0 +1,100 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cldr + +import ( + "encoding/xml" + "regexp" + "strconv" +) + +// Elem is implemented by every XML element. +type Elem interface { + setEnclosing(Elem) + setName(string) + enclosing() Elem + + GetCommon() *Common +} + +type hidden struct { + CharData string `xml:",chardata"` + Alias *struct { + Common + Source string `xml:"source,attr"` + Path string `xml:"path,attr"` + } `xml:"alias"` + Def *struct { + Common + Choice string `xml:"choice,attr,omitempty"` + Type string `xml:"type,attr,omitempty"` + } `xml:"default"` +} + +// Common holds several of the most common attributes and sub elements +// of an XML element. +type Common struct { + XMLName xml.Name + name string + enclElem Elem + Type string `xml:"type,attr,omitempty"` + Reference string `xml:"reference,attr,omitempty"` + Alt string `xml:"alt,attr,omitempty"` + ValidSubLocales string `xml:"validSubLocales,attr,omitempty"` + Draft string `xml:"draft,attr,omitempty"` + hidden +} + +// Default returns the default type to select from the enclosed list +// or "" if no default value is specified. +func (e *Common) Default() string { + if e.Def == nil { + return "" + } + if e.Def.Choice != "" { + return e.Def.Choice + } else if e.Def.Type != "" { + // Type is still used by the default element in collation. + return e.Def.Type + } + return "" +} + +// GetCommon returns e. It is provided such that Common implements Elem. +func (e *Common) GetCommon() *Common { + return e +} + +// Data returns the character data accumulated for this element. +func (e *Common) Data() string { + e.CharData = charRe.ReplaceAllStringFunc(e.CharData, replaceUnicode) + return e.CharData +} + +func (e *Common) setName(s string) { + e.name = s +} + +func (e *Common) enclosing() Elem { + return e.enclElem +} + +func (e *Common) setEnclosing(en Elem) { + e.enclElem = en +} + +// Escape characters that can be escaped without further escaping the string. +var charRe = regexp.MustCompile(`&#x[0-9a-fA-F]*;|\\u[0-9a-fA-F]{4}|\\U[0-9a-fA-F]{8}|\\x[0-9a-fA-F]{2}|\\[0-7]{3}|\\[abtnvfr]`) + +// replaceUnicode converts hexadecimal Unicode codepoint notations to a one-rune string. +// It assumes the input string is correctly formatted. +func replaceUnicode(s string) string { + if s[1] == '#' { + r, _ := strconv.ParseInt(s[3:len(s)-1], 16, 32) + return string(r) + } + r, _, _, _ := strconv.UnquoteChar(s, 0) + return string(r) +} diff --git a/vendor/golang.org/x/text/unicode/cldr/cldr.go b/vendor/golang.org/x/text/unicode/cldr/cldr.go new file mode 100644 index 00000000..2197f8ac --- /dev/null +++ b/vendor/golang.org/x/text/unicode/cldr/cldr.go @@ -0,0 +1,130 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:generate go run makexml.go -output xml.go + +// Package cldr provides a parser for LDML and related XML formats. +// This package is intended to be used by the table generation tools +// for the various internationalization-related packages. +// As the XML types are generated from the CLDR DTD, and as the CLDR standard +// is periodically amended, this package may change considerably over time. +// This mostly means that data may appear and disappear between versions. +// That is, old code should keep compiling for newer versions, but data +// may have moved or changed. +// CLDR version 22 is the first version supported by this package. +// Older versions may not work. +package cldr // import "golang.org/x/text/unicode/cldr" + +import ( + "fmt" + "sort" +) + +// CLDR provides access to parsed data of the Unicode Common Locale Data Repository. +type CLDR struct { + parent map[string][]string + locale map[string]*LDML + resolved map[string]*LDML + bcp47 *LDMLBCP47 + supp *SupplementalData +} + +func makeCLDR() *CLDR { + return &CLDR{ + parent: make(map[string][]string), + locale: make(map[string]*LDML), + resolved: make(map[string]*LDML), + bcp47: &LDMLBCP47{}, + supp: &SupplementalData{}, + } +} + +// BCP47 returns the parsed BCP47 LDML data. If no such data was parsed, nil is returned. +func (cldr *CLDR) BCP47() *LDMLBCP47 { + return nil +} + +// Draft indicates the draft level of an element. +type Draft int + +const ( + Approved Draft = iota + Contributed + Provisional + Unconfirmed +) + +var drafts = []string{"unconfirmed", "provisional", "contributed", "approved", ""} + +// ParseDraft returns the Draft value corresponding to the given string. The +// empty string corresponds to Approved. +func ParseDraft(level string) (Draft, error) { + if level == "" { + return Approved, nil + } + for i, s := range drafts { + if level == s { + return Unconfirmed - Draft(i), nil + } + } + return Approved, fmt.Errorf("cldr: unknown draft level %q", level) +} + +func (d Draft) String() string { + return drafts[len(drafts)-1-int(d)] +} + +// SetDraftLevel sets which draft levels to include in the evaluated LDML. +// Any draft element for which the draft level is higher than lev will be excluded. +// If multiple draft levels are available for a single element, the one with the +// lowest draft level will be selected, unless preferDraft is true, in which case +// the highest draft will be chosen. +// It is assumed that the underlying LDML is canonicalized. +func (cldr *CLDR) SetDraftLevel(lev Draft, preferDraft bool) { + // TODO: implement + cldr.resolved = make(map[string]*LDML) +} + +// RawLDML returns the LDML XML for id in unresolved form. +// id must be one of the strings returned by Locales. +func (cldr *CLDR) RawLDML(loc string) *LDML { + return cldr.locale[loc] +} + +// LDML returns the fully resolved LDML XML for loc, which must be one of +// the strings returned by Locales. +func (cldr *CLDR) LDML(loc string) (*LDML, error) { + return cldr.resolve(loc) +} + +// Supplemental returns the parsed supplemental data. If no such data was parsed, +// nil is returned. +func (cldr *CLDR) Supplemental() *SupplementalData { + return cldr.supp +} + +// Locales returns the locales for which there exist files. +// Valid sublocales for which there is no file are not included. +// The root locale is always sorted first. +func (cldr *CLDR) Locales() []string { + loc := []string{"root"} + hasRoot := false + for l, _ := range cldr.locale { + if l == "root" { + hasRoot = true + continue + } + loc = append(loc, l) + } + sort.Strings(loc[1:]) + if !hasRoot { + return loc[1:] + } + return loc +} + +// Get fills in the fields of x based on the XPath path. +func Get(e Elem, path string) (res Elem, err error) { + return walkXPath(e, path) +} diff --git a/vendor/golang.org/x/text/unicode/cldr/collate.go b/vendor/golang.org/x/text/unicode/cldr/collate.go new file mode 100644 index 00000000..80ee28d7 --- /dev/null +++ b/vendor/golang.org/x/text/unicode/cldr/collate.go @@ -0,0 +1,359 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cldr + +import ( + "bufio" + "encoding/xml" + "errors" + "fmt" + "strconv" + "strings" + "unicode" + "unicode/utf8" +) + +// RuleProcessor can be passed to Collator's Process method, which +// parses the rules and calls the respective method for each rule found. +type RuleProcessor interface { + Reset(anchor string, before int) error + Insert(level int, str, context, extend string) error + Index(id string) +} + +const ( + // cldrIndex is a Unicode-reserved sentinel value used to mark the start + // of a grouping within an index. + // We ignore any rule that starts with this rune. + // See http://unicode.org/reports/tr35/#Collation_Elements for details. + cldrIndex = "\uFDD0" + + // specialAnchor is the format in which to represent logical reset positions, + // such as "first tertiary ignorable". + specialAnchor = "<%s/>" +) + +// Process parses the rules for the tailorings of this collation +// and calls the respective methods of p for each rule found. +func (c Collation) Process(p RuleProcessor) (err error) { + if len(c.Cr) > 0 { + if len(c.Cr) > 1 { + return fmt.Errorf("multiple cr elements, want 0 or 1") + } + return processRules(p, c.Cr[0].Data()) + } + if c.Rules.Any != nil { + return c.processXML(p) + } + return errors.New("no tailoring data") +} + +// processRules parses rules in the Collation Rule Syntax defined in +// http://www.unicode.org/reports/tr35/tr35-collation.html#Collation_Tailorings. +func processRules(p RuleProcessor, s string) (err error) { + chk := func(s string, e error) string { + if err == nil { + err = e + } + return s + } + i := 0 // Save the line number for use after the loop. + scanner := bufio.NewScanner(strings.NewReader(s)) + for ; scanner.Scan() && err == nil; i++ { + for s := skipSpace(scanner.Text()); s != "" && s[0] != '#'; s = skipSpace(s) { + level := 5 + var ch byte + switch ch, s = s[0], s[1:]; ch { + case '&': // followed by or '[' ']' + if s = skipSpace(s); consume(&s, '[') { + s = chk(parseSpecialAnchor(p, s)) + } else { + s = chk(parseAnchor(p, 0, s)) + } + case '<': // sort relation '<'{1,4}, optionally followed by '*'. + for level = 1; consume(&s, '<'); level++ { + } + if level > 4 { + err = fmt.Errorf("level %d > 4", level) + } + fallthrough + case '=': // identity relation, optionally followed by *. + if consume(&s, '*') { + s = chk(parseSequence(p, level, s)) + } else { + s = chk(parseOrder(p, level, s)) + } + default: + chk("", fmt.Errorf("illegal operator %q", ch)) + break + } + } + } + if chk("", scanner.Err()); err != nil { + return fmt.Errorf("%d: %v", i, err) + } + return nil +} + +// parseSpecialAnchor parses the anchor syntax which is either of the form +// ['before' ] +// or +// [