1
0
mirror of https://github.com/rancher/types.git synced 2025-06-24 20:51:33 +00:00

Update generated code

This commit is contained in:
Darren Shepherd 2018-01-26 14:10:54 -07:00
parent 84558def93
commit 36927d4c64
21 changed files with 1534 additions and 0 deletions

View File

@ -0,0 +1,252 @@
package v3
import (
"context"
"github.com/rancher/norman/clientbase"
"github.com/rancher/norman/controller"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/watch"
"k8s.io/client-go/tools/cache"
)
var (
ClusterLoggingGroupVersionKind = schema.GroupVersionKind{
Version: Version,
Group: GroupName,
Kind: "ClusterLogging",
}
ClusterLoggingResource = metav1.APIResource{
Name: "clusterloggings",
SingularName: "clusterlogging",
Namespaced: true,
Kind: ClusterLoggingGroupVersionKind.Kind,
}
)
type ClusterLoggingList struct {
metav1.TypeMeta `json:",inline"`
metav1.ListMeta `json:"metadata,omitempty"`
Items []ClusterLogging
}
type ClusterLoggingHandlerFunc func(key string, obj *ClusterLogging) error
type ClusterLoggingLister interface {
List(namespace string, selector labels.Selector) (ret []*ClusterLogging, err error)
Get(namespace, name string) (*ClusterLogging, error)
}
type ClusterLoggingController interface {
Informer() cache.SharedIndexInformer
Lister() ClusterLoggingLister
AddHandler(name string, handler ClusterLoggingHandlerFunc)
AddClusterScopedHandler(name, clusterName string, handler ClusterLoggingHandlerFunc)
Enqueue(namespace, name string)
Sync(ctx context.Context) error
Start(ctx context.Context, threadiness int) error
}
type ClusterLoggingInterface interface {
ObjectClient() *clientbase.ObjectClient
Create(*ClusterLogging) (*ClusterLogging, error)
GetNamespaced(namespace, name string, opts metav1.GetOptions) (*ClusterLogging, error)
Get(name string, opts metav1.GetOptions) (*ClusterLogging, error)
Update(*ClusterLogging) (*ClusterLogging, error)
Delete(name string, options *metav1.DeleteOptions) error
DeleteNamespaced(namespace, name string, options *metav1.DeleteOptions) error
List(opts metav1.ListOptions) (*ClusterLoggingList, error)
Watch(opts metav1.ListOptions) (watch.Interface, error)
DeleteCollection(deleteOpts *metav1.DeleteOptions, listOpts metav1.ListOptions) error
Controller() ClusterLoggingController
AddHandler(name string, sync ClusterLoggingHandlerFunc)
AddLifecycle(name string, lifecycle ClusterLoggingLifecycle)
AddClusterScopedHandler(name, clusterName string, sync ClusterLoggingHandlerFunc)
AddClusterScopedLifecycle(name, clusterName string, lifecycle ClusterLoggingLifecycle)
}
type clusterLoggingLister struct {
controller *clusterLoggingController
}
func (l *clusterLoggingLister) List(namespace string, selector labels.Selector) (ret []*ClusterLogging, err error) {
err = cache.ListAllByNamespace(l.controller.Informer().GetIndexer(), namespace, selector, func(obj interface{}) {
ret = append(ret, obj.(*ClusterLogging))
})
return
}
func (l *clusterLoggingLister) Get(namespace, name string) (*ClusterLogging, error) {
var key string
if namespace != "" {
key = namespace + "/" + name
} else {
key = name
}
obj, exists, err := l.controller.Informer().GetIndexer().GetByKey(key)
if err != nil {
return nil, err
}
if !exists {
return nil, errors.NewNotFound(schema.GroupResource{
Group: ClusterLoggingGroupVersionKind.Group,
Resource: "clusterLogging",
}, name)
}
return obj.(*ClusterLogging), nil
}
type clusterLoggingController struct {
controller.GenericController
}
func (c *clusterLoggingController) Lister() ClusterLoggingLister {
return &clusterLoggingLister{
controller: c,
}
}
func (c *clusterLoggingController) AddHandler(name string, handler ClusterLoggingHandlerFunc) {
c.GenericController.AddHandler(name, func(key string) error {
obj, exists, err := c.Informer().GetStore().GetByKey(key)
if err != nil {
return err
}
if !exists {
return handler(key, nil)
}
return handler(key, obj.(*ClusterLogging))
})
}
func (c *clusterLoggingController) AddClusterScopedHandler(name, cluster string, handler ClusterLoggingHandlerFunc) {
c.GenericController.AddHandler(name, func(key string) error {
obj, exists, err := c.Informer().GetStore().GetByKey(key)
if err != nil {
return err
}
if !exists {
return handler(key, nil)
}
if !controller.ObjectInCluster(cluster, obj) {
return nil
}
return handler(key, obj.(*ClusterLogging))
})
}
type clusterLoggingFactory struct {
}
func (c clusterLoggingFactory) Object() runtime.Object {
return &ClusterLogging{}
}
func (c clusterLoggingFactory) List() runtime.Object {
return &ClusterLoggingList{}
}
func (s *clusterLoggingClient) Controller() ClusterLoggingController {
s.client.Lock()
defer s.client.Unlock()
c, ok := s.client.clusterLoggingControllers[s.ns]
if ok {
return c
}
genericController := controller.NewGenericController(ClusterLoggingGroupVersionKind.Kind+"Controller",
s.objectClient)
c = &clusterLoggingController{
GenericController: genericController,
}
s.client.clusterLoggingControllers[s.ns] = c
s.client.starters = append(s.client.starters, c)
return c
}
type clusterLoggingClient struct {
client *Client
ns string
objectClient *clientbase.ObjectClient
controller ClusterLoggingController
}
func (s *clusterLoggingClient) ObjectClient() *clientbase.ObjectClient {
return s.objectClient
}
func (s *clusterLoggingClient) Create(o *ClusterLogging) (*ClusterLogging, error) {
obj, err := s.objectClient.Create(o)
return obj.(*ClusterLogging), err
}
func (s *clusterLoggingClient) Get(name string, opts metav1.GetOptions) (*ClusterLogging, error) {
obj, err := s.objectClient.Get(name, opts)
return obj.(*ClusterLogging), err
}
func (s *clusterLoggingClient) GetNamespaced(namespace, name string, opts metav1.GetOptions) (*ClusterLogging, error) {
obj, err := s.objectClient.GetNamespaced(namespace, name, opts)
return obj.(*ClusterLogging), err
}
func (s *clusterLoggingClient) Update(o *ClusterLogging) (*ClusterLogging, error) {
obj, err := s.objectClient.Update(o.Name, o)
return obj.(*ClusterLogging), err
}
func (s *clusterLoggingClient) Delete(name string, options *metav1.DeleteOptions) error {
return s.objectClient.Delete(name, options)
}
func (s *clusterLoggingClient) DeleteNamespaced(namespace, name string, options *metav1.DeleteOptions) error {
return s.objectClient.DeleteNamespaced(namespace, name, options)
}
func (s *clusterLoggingClient) List(opts metav1.ListOptions) (*ClusterLoggingList, error) {
obj, err := s.objectClient.List(opts)
return obj.(*ClusterLoggingList), err
}
func (s *clusterLoggingClient) Watch(opts metav1.ListOptions) (watch.Interface, error) {
return s.objectClient.Watch(opts)
}
// Patch applies the patch and returns the patched deployment.
func (s *clusterLoggingClient) Patch(o *ClusterLogging, data []byte, subresources ...string) (*ClusterLogging, error) {
obj, err := s.objectClient.Patch(o.Name, o, data, subresources...)
return obj.(*ClusterLogging), err
}
func (s *clusterLoggingClient) DeleteCollection(deleteOpts *metav1.DeleteOptions, listOpts metav1.ListOptions) error {
return s.objectClient.DeleteCollection(deleteOpts, listOpts)
}
func (s *clusterLoggingClient) AddHandler(name string, sync ClusterLoggingHandlerFunc) {
s.Controller().AddHandler(name, sync)
}
func (s *clusterLoggingClient) AddLifecycle(name string, lifecycle ClusterLoggingLifecycle) {
sync := NewClusterLoggingLifecycleAdapter(name, false, s, lifecycle)
s.AddHandler(name, sync)
}
func (s *clusterLoggingClient) AddClusterScopedHandler(name, clusterName string, sync ClusterLoggingHandlerFunc) {
s.Controller().AddClusterScopedHandler(name, clusterName, sync)
}
func (s *clusterLoggingClient) AddClusterScopedLifecycle(name, clusterName string, lifecycle ClusterLoggingLifecycle) {
sync := NewClusterLoggingLifecycleAdapter(name+"_"+clusterName, true, s, lifecycle)
s.AddClusterScopedHandler(name, clusterName, sync)
}

View File

@ -0,0 +1,51 @@
package v3
import (
"github.com/rancher/norman/lifecycle"
"k8s.io/apimachinery/pkg/runtime"
)
type ClusterLoggingLifecycle interface {
Create(obj *ClusterLogging) (*ClusterLogging, error)
Remove(obj *ClusterLogging) (*ClusterLogging, error)
Updated(obj *ClusterLogging) (*ClusterLogging, error)
}
type clusterLoggingLifecycleAdapter struct {
lifecycle ClusterLoggingLifecycle
}
func (w *clusterLoggingLifecycleAdapter) Create(obj runtime.Object) (runtime.Object, error) {
o, err := w.lifecycle.Create(obj.(*ClusterLogging))
if o == nil {
return nil, err
}
return o, err
}
func (w *clusterLoggingLifecycleAdapter) Finalize(obj runtime.Object) (runtime.Object, error) {
o, err := w.lifecycle.Remove(obj.(*ClusterLogging))
if o == nil {
return nil, err
}
return o, err
}
func (w *clusterLoggingLifecycleAdapter) Updated(obj runtime.Object) (runtime.Object, error) {
o, err := w.lifecycle.Updated(obj.(*ClusterLogging))
if o == nil {
return nil, err
}
return o, err
}
func NewClusterLoggingLifecycleAdapter(name string, clusterScoped bool, client ClusterLoggingInterface, l ClusterLoggingLifecycle) ClusterLoggingHandlerFunc {
adapter := &clusterLoggingLifecycleAdapter{lifecycle: l}
syncFn := lifecycle.NewObjectLifecycleAdapter(name, clusterScoped, adapter, client.ObjectClient())
return func(key string, obj *ClusterLogging) error {
if obj == nil {
return syncFn(key, nil)
}
return syncFn(key, obj)
}
}

View File

@ -39,6 +39,10 @@ func RegisterDeepCopies(scheme *runtime.Scheme) error {
in.(*BaseService).DeepCopyInto(out.(*BaseService))
return nil
}, InType: reflect.TypeOf(&BaseService{})},
conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error {
in.(*BrokerList).DeepCopyInto(out.(*BrokerList))
return nil
}, InType: reflect.TypeOf(&BrokerList{})},
conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error {
in.(*Catalog).DeepCopyInto(out.(*Catalog))
return nil
@ -83,6 +87,18 @@ func RegisterDeepCopies(scheme *runtime.Scheme) error {
in.(*ClusterList).DeepCopyInto(out.(*ClusterList))
return nil
}, InType: reflect.TypeOf(&ClusterList{})},
conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error {
in.(*ClusterLogging).DeepCopyInto(out.(*ClusterLogging))
return nil
}, InType: reflect.TypeOf(&ClusterLogging{})},
conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error {
in.(*ClusterLoggingList).DeepCopyInto(out.(*ClusterLoggingList))
return nil
}, InType: reflect.TypeOf(&ClusterLoggingList{})},
conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error {
in.(*ClusterLoggingSpec).DeepCopyInto(out.(*ClusterLoggingSpec))
return nil
}, InType: reflect.TypeOf(&ClusterLoggingSpec{})},
conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error {
in.(*ClusterRegistrationToken).DeepCopyInto(out.(*ClusterRegistrationToken))
return nil
@ -139,6 +155,14 @@ func RegisterDeepCopies(scheme *runtime.Scheme) error {
in.(*ETCDService).DeepCopyInto(out.(*ETCDService))
return nil
}, InType: reflect.TypeOf(&ETCDService{})},
conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error {
in.(*ElasticsearchConfig).DeepCopyInto(out.(*ElasticsearchConfig))
return nil
}, InType: reflect.TypeOf(&ElasticsearchConfig{})},
conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error {
in.(*EmbeddedConfig).DeepCopyInto(out.(*EmbeddedConfig))
return nil
}, InType: reflect.TypeOf(&EmbeddedConfig{})},
conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error {
in.(*Field).DeepCopyInto(out.(*Field))
return nil
@ -199,6 +223,10 @@ func RegisterDeepCopies(scheme *runtime.Scheme) error {
in.(*K8sServerConfig).DeepCopyInto(out.(*K8sServerConfig))
return nil
}, InType: reflect.TypeOf(&K8sServerConfig{})},
conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error {
in.(*KafkaConfig).DeepCopyInto(out.(*KafkaConfig))
return nil
}, InType: reflect.TypeOf(&KafkaConfig{})},
conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error {
in.(*KubeAPIService).DeepCopyInto(out.(*KubeAPIService))
return nil
@ -231,6 +259,18 @@ func RegisterDeepCopies(scheme *runtime.Scheme) error {
in.(*LocalCredential).DeepCopyInto(out.(*LocalCredential))
return nil
}, InType: reflect.TypeOf(&LocalCredential{})},
conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error {
in.(*LoggingCommonSpec).DeepCopyInto(out.(*LoggingCommonSpec))
return nil
}, InType: reflect.TypeOf(&LoggingCommonSpec{})},
conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error {
in.(*LoggingCondition).DeepCopyInto(out.(*LoggingCondition))
return nil
}, InType: reflect.TypeOf(&LoggingCondition{})},
conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error {
in.(*LoggingStatus).DeepCopyInto(out.(*LoggingStatus))
return nil
}, InType: reflect.TypeOf(&LoggingStatus{})},
conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error {
in.(*LoginInput).DeepCopyInto(out.(*LoginInput))
return nil
@ -343,6 +383,18 @@ func RegisterDeepCopies(scheme *runtime.Scheme) error {
in.(*ProjectList).DeepCopyInto(out.(*ProjectList))
return nil
}, InType: reflect.TypeOf(&ProjectList{})},
conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error {
in.(*ProjectLogging).DeepCopyInto(out.(*ProjectLogging))
return nil
}, InType: reflect.TypeOf(&ProjectLogging{})},
conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error {
in.(*ProjectLoggingList).DeepCopyInto(out.(*ProjectLoggingList))
return nil
}, InType: reflect.TypeOf(&ProjectLoggingList{})},
conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error {
in.(*ProjectLoggingSpec).DeepCopyInto(out.(*ProjectLoggingSpec))
return nil
}, InType: reflect.TypeOf(&ProjectLoggingSpec{})},
conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error {
in.(*ProjectRoleTemplateBinding).DeepCopyInto(out.(*ProjectRoleTemplateBinding))
return nil
@ -403,6 +455,10 @@ func RegisterDeepCopies(scheme *runtime.Scheme) error {
in.(*SettingList).DeepCopyInto(out.(*SettingList))
return nil
}, InType: reflect.TypeOf(&SettingList{})},
conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error {
in.(*SplunkConfig).DeepCopyInto(out.(*SplunkConfig))
return nil
}, InType: reflect.TypeOf(&SplunkConfig{})},
conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error {
in.(*Stack).DeepCopyInto(out.(*Stack))
return nil
@ -419,6 +475,10 @@ func RegisterDeepCopies(scheme *runtime.Scheme) error {
in.(*StackStatus).DeepCopyInto(out.(*StackStatus))
return nil
}, InType: reflect.TypeOf(&StackStatus{})},
conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error {
in.(*SyslogConfig).DeepCopyInto(out.(*SyslogConfig))
return nil
}, InType: reflect.TypeOf(&SyslogConfig{})},
conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error {
in.(*Template).DeepCopyInto(out.(*Template))
return nil
@ -471,6 +531,10 @@ func RegisterDeepCopies(scheme *runtime.Scheme) error {
in.(*Values).DeepCopyInto(out.(*Values))
return nil
}, InType: reflect.TypeOf(&Values{})},
conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error {
in.(*Zookeeper).DeepCopyInto(out.(*Zookeeper))
return nil
}, InType: reflect.TypeOf(&Zookeeper{})},
)
}
@ -575,6 +639,27 @@ func (in *BaseService) DeepCopy() *BaseService {
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *BrokerList) DeepCopyInto(out *BrokerList) {
*out = *in
if in.BrokerList != nil {
in, out := &in.BrokerList, &out.BrokerList
*out = make([]string, len(*in))
copy(*out, *in)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BrokerList.
func (in *BrokerList) DeepCopy() *BrokerList {
if in == nil {
return nil
}
out := new(BrokerList)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Catalog) DeepCopyInto(out *Catalog) {
*out = *in
@ -847,6 +932,96 @@ func (in *ClusterList) DeepCopyObject() runtime.Object {
}
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ClusterLogging) DeepCopyInto(out *ClusterLogging) {
*out = *in
out.Namespaced = in.Namespaced
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.Spec.DeepCopyInto(&out.Spec)
in.Status.DeepCopyInto(&out.Status)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterLogging.
func (in *ClusterLogging) DeepCopy() *ClusterLogging {
if in == nil {
return nil
}
out := new(ClusterLogging)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *ClusterLogging) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
} else {
return nil
}
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ClusterLoggingList) DeepCopyInto(out *ClusterLoggingList) {
*out = *in
out.TypeMeta = in.TypeMeta
out.ListMeta = in.ListMeta
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]ClusterLogging, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterLoggingList.
func (in *ClusterLoggingList) DeepCopy() *ClusterLoggingList {
if in == nil {
return nil
}
out := new(ClusterLoggingList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *ClusterLoggingList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
} else {
return nil
}
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ClusterLoggingSpec) DeepCopyInto(out *ClusterLoggingSpec) {
*out = *in
in.LoggingCommonSpec.DeepCopyInto(&out.LoggingCommonSpec)
if in.EmbeddedConfig != nil {
in, out := &in.EmbeddedConfig, &out.EmbeddedConfig
if *in == nil {
*out = nil
} else {
*out = new(EmbeddedConfig)
**out = **in
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterLoggingSpec.
func (in *ClusterLoggingSpec) DeepCopy() *ClusterLoggingSpec {
if in == nil {
return nil
}
out := new(ClusterLoggingSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ClusterRegistrationToken) DeepCopyInto(out *ClusterRegistrationToken) {
*out = *in
@ -1308,6 +1483,38 @@ func (in *ETCDService) DeepCopy() *ETCDService {
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ElasticsearchConfig) DeepCopyInto(out *ElasticsearchConfig) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ElasticsearchConfig.
func (in *ElasticsearchConfig) DeepCopy() *ElasticsearchConfig {
if in == nil {
return nil
}
out := new(ElasticsearchConfig)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *EmbeddedConfig) DeepCopyInto(out *EmbeddedConfig) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EmbeddedConfig.
func (in *EmbeddedConfig) DeepCopy() *EmbeddedConfig {
if in == nil {
return nil
}
out := new(EmbeddedConfig)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Field) DeepCopyInto(out *Field) {
*out = *in
@ -1699,6 +1906,40 @@ func (in *K8sServerConfig) DeepCopy() *K8sServerConfig {
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *KafkaConfig) DeepCopyInto(out *KafkaConfig) {
*out = *in
if in.Zookeeper != nil {
in, out := &in.Zookeeper, &out.Zookeeper
if *in == nil {
*out = nil
} else {
*out = new(Zookeeper)
**out = **in
}
}
if in.Broker != nil {
in, out := &in.Broker, &out.Broker
if *in == nil {
*out = nil
} else {
*out = new(BrokerList)
(*in).DeepCopyInto(*out)
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KafkaConfig.
func (in *KafkaConfig) DeepCopy() *KafkaConfig {
if in == nil {
return nil
}
out := new(KafkaConfig)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *KubeAPIService) DeepCopyInto(out *KubeAPIService) {
*out = *in
@ -1887,6 +2128,102 @@ func (in *LocalCredential) DeepCopy() *LocalCredential {
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *LoggingCommonSpec) DeepCopyInto(out *LoggingCommonSpec) {
*out = *in
if in.OutputTags != nil {
in, out := &in.OutputTags, &out.OutputTags
*out = make(map[string]string, len(*in))
for key, val := range *in {
(*out)[key] = val
}
}
if in.ElasticsearchConfig != nil {
in, out := &in.ElasticsearchConfig, &out.ElasticsearchConfig
if *in == nil {
*out = nil
} else {
*out = new(ElasticsearchConfig)
**out = **in
}
}
if in.SplunkConfig != nil {
in, out := &in.SplunkConfig, &out.SplunkConfig
if *in == nil {
*out = nil
} else {
*out = new(SplunkConfig)
**out = **in
}
}
if in.KafkaConfig != nil {
in, out := &in.KafkaConfig, &out.KafkaConfig
if *in == nil {
*out = nil
} else {
*out = new(KafkaConfig)
(*in).DeepCopyInto(*out)
}
}
if in.SyslogConfig != nil {
in, out := &in.SyslogConfig, &out.SyslogConfig
if *in == nil {
*out = nil
} else {
*out = new(SyslogConfig)
**out = **in
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LoggingCommonSpec.
func (in *LoggingCommonSpec) DeepCopy() *LoggingCommonSpec {
if in == nil {
return nil
}
out := new(LoggingCommonSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *LoggingCondition) DeepCopyInto(out *LoggingCondition) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LoggingCondition.
func (in *LoggingCondition) DeepCopy() *LoggingCondition {
if in == nil {
return nil
}
out := new(LoggingCondition)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *LoggingStatus) DeepCopyInto(out *LoggingStatus) {
*out = *in
if in.Conditions != nil {
in, out := &in.Conditions, &out.Conditions
*out = make([]LoggingCondition, len(*in))
copy(*out, *in)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LoggingStatus.
func (in *LoggingStatus) DeepCopy() *LoggingStatus {
if in == nil {
return nil
}
out := new(LoggingStatus)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *LoginInput) DeepCopyInto(out *LoginInput) {
*out = *in
@ -2697,6 +3034,87 @@ func (in *ProjectList) DeepCopyObject() runtime.Object {
}
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ProjectLogging) DeepCopyInto(out *ProjectLogging) {
*out = *in
out.Namespaced = in.Namespaced
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.Spec.DeepCopyInto(&out.Spec)
in.Status.DeepCopyInto(&out.Status)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProjectLogging.
func (in *ProjectLogging) DeepCopy() *ProjectLogging {
if in == nil {
return nil
}
out := new(ProjectLogging)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *ProjectLogging) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
} else {
return nil
}
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ProjectLoggingList) DeepCopyInto(out *ProjectLoggingList) {
*out = *in
out.TypeMeta = in.TypeMeta
out.ListMeta = in.ListMeta
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]ProjectLogging, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProjectLoggingList.
func (in *ProjectLoggingList) DeepCopy() *ProjectLoggingList {
if in == nil {
return nil
}
out := new(ProjectLoggingList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *ProjectLoggingList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
} else {
return nil
}
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ProjectLoggingSpec) DeepCopyInto(out *ProjectLoggingSpec) {
*out = *in
in.LoggingCommonSpec.DeepCopyInto(&out.LoggingCommonSpec)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProjectLoggingSpec.
func (in *ProjectLoggingSpec) DeepCopy() *ProjectLoggingSpec {
if in == nil {
return nil
}
out := new(ProjectLoggingSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ProjectRoleTemplateBinding) DeepCopyInto(out *ProjectRoleTemplateBinding) {
*out = *in
@ -3077,6 +3495,22 @@ func (in *SettingList) DeepCopyObject() runtime.Object {
}
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *SplunkConfig) DeepCopyInto(out *SplunkConfig) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SplunkConfig.
func (in *SplunkConfig) DeepCopy() *SplunkConfig {
if in == nil {
return nil
}
out := new(SplunkConfig)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Stack) DeepCopyInto(out *Stack) {
*out = *in
@ -3204,6 +3638,22 @@ func (in *StackStatus) DeepCopy() *StackStatus {
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *SyslogConfig) DeepCopyInto(out *SyslogConfig) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SyslogConfig.
func (in *SyslogConfig) DeepCopy() *SyslogConfig {
if in == nil {
return nil
}
out := new(SyslogConfig)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Template) DeepCopyInto(out *Template) {
*out = *in
@ -3596,3 +4046,19 @@ func (in *Values) DeepCopy() *Values {
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Zookeeper) DeepCopyInto(out *Zookeeper) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Zookeeper.
func (in *Zookeeper) DeepCopy() *Zookeeper {
if in == nil {
return nil
}
out := new(Zookeeper)
in.DeepCopyInto(out)
return out
}

View File

@ -38,6 +38,8 @@ type Interface interface {
DynamicSchemasGetter
StacksGetter
PreferencesGetter
ClusterLoggingsGetter
ProjectLoggingsGetter
ListenConfigsGetter
SettingsGetter
}
@ -71,6 +73,8 @@ type Client struct {
dynamicSchemaControllers map[string]DynamicSchemaController
stackControllers map[string]StackController
preferenceControllers map[string]PreferenceController
clusterLoggingControllers map[string]ClusterLoggingController
projectLoggingControllers map[string]ProjectLoggingController
listenConfigControllers map[string]ListenConfigController
settingControllers map[string]SettingController
}
@ -113,6 +117,8 @@ func NewForConfig(config rest.Config) (Interface, error) {
dynamicSchemaControllers: map[string]DynamicSchemaController{},
stackControllers: map[string]StackController{},
preferenceControllers: map[string]PreferenceController{},
clusterLoggingControllers: map[string]ClusterLoggingController{},
projectLoggingControllers: map[string]ProjectLoggingController{},
listenConfigControllers: map[string]ListenConfigController{},
settingControllers: map[string]SettingController{},
}, nil
@ -442,6 +448,32 @@ func (c *Client) Preferences(namespace string) PreferenceInterface {
}
}
type ClusterLoggingsGetter interface {
ClusterLoggings(namespace string) ClusterLoggingInterface
}
func (c *Client) ClusterLoggings(namespace string) ClusterLoggingInterface {
objectClient := clientbase.NewObjectClient(namespace, c.restClient, &ClusterLoggingResource, ClusterLoggingGroupVersionKind, clusterLoggingFactory{})
return &clusterLoggingClient{
ns: namespace,
client: c,
objectClient: objectClient,
}
}
type ProjectLoggingsGetter interface {
ProjectLoggings(namespace string) ProjectLoggingInterface
}
func (c *Client) ProjectLoggings(namespace string) ProjectLoggingInterface {
objectClient := clientbase.NewObjectClient(namespace, c.restClient, &ProjectLoggingResource, ProjectLoggingGroupVersionKind, projectLoggingFactory{})
return &projectLoggingClient{
ns: namespace,
client: c,
objectClient: objectClient,
}
}
type ListenConfigsGetter interface {
ListenConfigs(namespace string) ListenConfigInterface
}

View File

@ -0,0 +1,252 @@
package v3
import (
"context"
"github.com/rancher/norman/clientbase"
"github.com/rancher/norman/controller"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/watch"
"k8s.io/client-go/tools/cache"
)
var (
ProjectLoggingGroupVersionKind = schema.GroupVersionKind{
Version: Version,
Group: GroupName,
Kind: "ProjectLogging",
}
ProjectLoggingResource = metav1.APIResource{
Name: "projectloggings",
SingularName: "projectlogging",
Namespaced: true,
Kind: ProjectLoggingGroupVersionKind.Kind,
}
)
type ProjectLoggingList struct {
metav1.TypeMeta `json:",inline"`
metav1.ListMeta `json:"metadata,omitempty"`
Items []ProjectLogging
}
type ProjectLoggingHandlerFunc func(key string, obj *ProjectLogging) error
type ProjectLoggingLister interface {
List(namespace string, selector labels.Selector) (ret []*ProjectLogging, err error)
Get(namespace, name string) (*ProjectLogging, error)
}
type ProjectLoggingController interface {
Informer() cache.SharedIndexInformer
Lister() ProjectLoggingLister
AddHandler(name string, handler ProjectLoggingHandlerFunc)
AddClusterScopedHandler(name, clusterName string, handler ProjectLoggingHandlerFunc)
Enqueue(namespace, name string)
Sync(ctx context.Context) error
Start(ctx context.Context, threadiness int) error
}
type ProjectLoggingInterface interface {
ObjectClient() *clientbase.ObjectClient
Create(*ProjectLogging) (*ProjectLogging, error)
GetNamespaced(namespace, name string, opts metav1.GetOptions) (*ProjectLogging, error)
Get(name string, opts metav1.GetOptions) (*ProjectLogging, error)
Update(*ProjectLogging) (*ProjectLogging, error)
Delete(name string, options *metav1.DeleteOptions) error
DeleteNamespaced(namespace, name string, options *metav1.DeleteOptions) error
List(opts metav1.ListOptions) (*ProjectLoggingList, error)
Watch(opts metav1.ListOptions) (watch.Interface, error)
DeleteCollection(deleteOpts *metav1.DeleteOptions, listOpts metav1.ListOptions) error
Controller() ProjectLoggingController
AddHandler(name string, sync ProjectLoggingHandlerFunc)
AddLifecycle(name string, lifecycle ProjectLoggingLifecycle)
AddClusterScopedHandler(name, clusterName string, sync ProjectLoggingHandlerFunc)
AddClusterScopedLifecycle(name, clusterName string, lifecycle ProjectLoggingLifecycle)
}
type projectLoggingLister struct {
controller *projectLoggingController
}
func (l *projectLoggingLister) List(namespace string, selector labels.Selector) (ret []*ProjectLogging, err error) {
err = cache.ListAllByNamespace(l.controller.Informer().GetIndexer(), namespace, selector, func(obj interface{}) {
ret = append(ret, obj.(*ProjectLogging))
})
return
}
func (l *projectLoggingLister) Get(namespace, name string) (*ProjectLogging, error) {
var key string
if namespace != "" {
key = namespace + "/" + name
} else {
key = name
}
obj, exists, err := l.controller.Informer().GetIndexer().GetByKey(key)
if err != nil {
return nil, err
}
if !exists {
return nil, errors.NewNotFound(schema.GroupResource{
Group: ProjectLoggingGroupVersionKind.Group,
Resource: "projectLogging",
}, name)
}
return obj.(*ProjectLogging), nil
}
type projectLoggingController struct {
controller.GenericController
}
func (c *projectLoggingController) Lister() ProjectLoggingLister {
return &projectLoggingLister{
controller: c,
}
}
func (c *projectLoggingController) AddHandler(name string, handler ProjectLoggingHandlerFunc) {
c.GenericController.AddHandler(name, func(key string) error {
obj, exists, err := c.Informer().GetStore().GetByKey(key)
if err != nil {
return err
}
if !exists {
return handler(key, nil)
}
return handler(key, obj.(*ProjectLogging))
})
}
func (c *projectLoggingController) AddClusterScopedHandler(name, cluster string, handler ProjectLoggingHandlerFunc) {
c.GenericController.AddHandler(name, func(key string) error {
obj, exists, err := c.Informer().GetStore().GetByKey(key)
if err != nil {
return err
}
if !exists {
return handler(key, nil)
}
if !controller.ObjectInCluster(cluster, obj) {
return nil
}
return handler(key, obj.(*ProjectLogging))
})
}
type projectLoggingFactory struct {
}
func (c projectLoggingFactory) Object() runtime.Object {
return &ProjectLogging{}
}
func (c projectLoggingFactory) List() runtime.Object {
return &ProjectLoggingList{}
}
func (s *projectLoggingClient) Controller() ProjectLoggingController {
s.client.Lock()
defer s.client.Unlock()
c, ok := s.client.projectLoggingControllers[s.ns]
if ok {
return c
}
genericController := controller.NewGenericController(ProjectLoggingGroupVersionKind.Kind+"Controller",
s.objectClient)
c = &projectLoggingController{
GenericController: genericController,
}
s.client.projectLoggingControllers[s.ns] = c
s.client.starters = append(s.client.starters, c)
return c
}
type projectLoggingClient struct {
client *Client
ns string
objectClient *clientbase.ObjectClient
controller ProjectLoggingController
}
func (s *projectLoggingClient) ObjectClient() *clientbase.ObjectClient {
return s.objectClient
}
func (s *projectLoggingClient) Create(o *ProjectLogging) (*ProjectLogging, error) {
obj, err := s.objectClient.Create(o)
return obj.(*ProjectLogging), err
}
func (s *projectLoggingClient) Get(name string, opts metav1.GetOptions) (*ProjectLogging, error) {
obj, err := s.objectClient.Get(name, opts)
return obj.(*ProjectLogging), err
}
func (s *projectLoggingClient) GetNamespaced(namespace, name string, opts metav1.GetOptions) (*ProjectLogging, error) {
obj, err := s.objectClient.GetNamespaced(namespace, name, opts)
return obj.(*ProjectLogging), err
}
func (s *projectLoggingClient) Update(o *ProjectLogging) (*ProjectLogging, error) {
obj, err := s.objectClient.Update(o.Name, o)
return obj.(*ProjectLogging), err
}
func (s *projectLoggingClient) Delete(name string, options *metav1.DeleteOptions) error {
return s.objectClient.Delete(name, options)
}
func (s *projectLoggingClient) DeleteNamespaced(namespace, name string, options *metav1.DeleteOptions) error {
return s.objectClient.DeleteNamespaced(namespace, name, options)
}
func (s *projectLoggingClient) List(opts metav1.ListOptions) (*ProjectLoggingList, error) {
obj, err := s.objectClient.List(opts)
return obj.(*ProjectLoggingList), err
}
func (s *projectLoggingClient) Watch(opts metav1.ListOptions) (watch.Interface, error) {
return s.objectClient.Watch(opts)
}
// Patch applies the patch and returns the patched deployment.
func (s *projectLoggingClient) Patch(o *ProjectLogging, data []byte, subresources ...string) (*ProjectLogging, error) {
obj, err := s.objectClient.Patch(o.Name, o, data, subresources...)
return obj.(*ProjectLogging), err
}
func (s *projectLoggingClient) DeleteCollection(deleteOpts *metav1.DeleteOptions, listOpts metav1.ListOptions) error {
return s.objectClient.DeleteCollection(deleteOpts, listOpts)
}
func (s *projectLoggingClient) AddHandler(name string, sync ProjectLoggingHandlerFunc) {
s.Controller().AddHandler(name, sync)
}
func (s *projectLoggingClient) AddLifecycle(name string, lifecycle ProjectLoggingLifecycle) {
sync := NewProjectLoggingLifecycleAdapter(name, false, s, lifecycle)
s.AddHandler(name, sync)
}
func (s *projectLoggingClient) AddClusterScopedHandler(name, clusterName string, sync ProjectLoggingHandlerFunc) {
s.Controller().AddClusterScopedHandler(name, clusterName, sync)
}
func (s *projectLoggingClient) AddClusterScopedLifecycle(name, clusterName string, lifecycle ProjectLoggingLifecycle) {
sync := NewProjectLoggingLifecycleAdapter(name+"_"+clusterName, true, s, lifecycle)
s.AddClusterScopedHandler(name, clusterName, sync)
}

View File

@ -0,0 +1,51 @@
package v3
import (
"github.com/rancher/norman/lifecycle"
"k8s.io/apimachinery/pkg/runtime"
)
type ProjectLoggingLifecycle interface {
Create(obj *ProjectLogging) (*ProjectLogging, error)
Remove(obj *ProjectLogging) (*ProjectLogging, error)
Updated(obj *ProjectLogging) (*ProjectLogging, error)
}
type projectLoggingLifecycleAdapter struct {
lifecycle ProjectLoggingLifecycle
}
func (w *projectLoggingLifecycleAdapter) Create(obj runtime.Object) (runtime.Object, error) {
o, err := w.lifecycle.Create(obj.(*ProjectLogging))
if o == nil {
return nil, err
}
return o, err
}
func (w *projectLoggingLifecycleAdapter) Finalize(obj runtime.Object) (runtime.Object, error) {
o, err := w.lifecycle.Remove(obj.(*ProjectLogging))
if o == nil {
return nil, err
}
return o, err
}
func (w *projectLoggingLifecycleAdapter) Updated(obj runtime.Object) (runtime.Object, error) {
o, err := w.lifecycle.Updated(obj.(*ProjectLogging))
if o == nil {
return nil, err
}
return o, err
}
func NewProjectLoggingLifecycleAdapter(name string, clusterScoped bool, client ProjectLoggingInterface, l ProjectLoggingLifecycle) ProjectLoggingHandlerFunc {
adapter := &projectLoggingLifecycleAdapter{lifecycle: l}
syncFn := lifecycle.NewObjectLifecycleAdapter(name, clusterScoped, adapter, client.ObjectClient())
return func(key string, obj *ProjectLogging) error {
if obj == nil {
return syncFn(key, nil)
}
return syncFn(key, obj)
}
}

View File

@ -81,6 +81,10 @@ func addKnownTypes(scheme *runtime.Scheme) error {
&StackList{},
&Preference{},
&PreferenceList{},
&ClusterLogging{},
&ClusterLoggingList{},
&ProjectLogging{},
&ProjectLoggingList{},
&ListenConfig{},
&ListenConfigList{},
&Setting{},

View File

@ -0,0 +1,10 @@
package client
const (
BrokerListType = "brokerList"
BrokerListFieldBrokerList = "brokerList"
)
type BrokerList struct {
BrokerList []string `json:"brokerList,omitempty"`
}

View File

@ -32,6 +32,8 @@ type Client struct {
DynamicSchema DynamicSchemaOperations
Stack StackOperations
Preference PreferenceOperations
ClusterLogging ClusterLoggingOperations
ProjectLogging ProjectLoggingOperations
ListenConfig ListenConfigOperations
Setting SettingOperations
}
@ -71,6 +73,8 @@ func NewClient(opts *clientbase.ClientOpts) (*Client, error) {
client.DynamicSchema = newDynamicSchemaClient(client)
client.Stack = newStackClient(client)
client.Preference = newPreferenceClient(client)
client.ClusterLogging = newClusterLoggingClient(client)
client.ProjectLogging = newProjectLoggingClient(client)
client.ListenConfig = newListenConfigClient(client)
client.Setting = newSettingClient(client)

View File

@ -0,0 +1,119 @@
package client
import (
"github.com/rancher/norman/types"
)
const (
ClusterLoggingType = "clusterLogging"
ClusterLoggingFieldAnnotations = "annotations"
ClusterLoggingFieldClusterId = "clusterId"
ClusterLoggingFieldCreated = "created"
ClusterLoggingFieldCreatorID = "creatorId"
ClusterLoggingFieldDisplayName = "displayName"
ClusterLoggingFieldElasticsearchConfig = "elasticsearchConfig"
ClusterLoggingFieldEmbeddedConfig = "embeddedConfig"
ClusterLoggingFieldKafkaConfig = "kafkaConfig"
ClusterLoggingFieldLabels = "labels"
ClusterLoggingFieldName = "name"
ClusterLoggingFieldNamespaceId = "namespaceId"
ClusterLoggingFieldOutputFlushInterval = "outputFlushInterval"
ClusterLoggingFieldOutputTags = "outputTags"
ClusterLoggingFieldOwnerReferences = "ownerReferences"
ClusterLoggingFieldRemoved = "removed"
ClusterLoggingFieldSplunkConfig = "splunkConfig"
ClusterLoggingFieldState = "state"
ClusterLoggingFieldStatus = "status"
ClusterLoggingFieldSyslogConfig = "syslogConfig"
ClusterLoggingFieldTransitioning = "transitioning"
ClusterLoggingFieldTransitioningMessage = "transitioningMessage"
ClusterLoggingFieldUuid = "uuid"
)
type ClusterLogging struct {
types.Resource
Annotations map[string]string `json:"annotations,omitempty"`
ClusterId string `json:"clusterId,omitempty"`
Created string `json:"created,omitempty"`
CreatorID string `json:"creatorId,omitempty"`
DisplayName string `json:"displayName,omitempty"`
ElasticsearchConfig *ElasticsearchConfig `json:"elasticsearchConfig,omitempty"`
EmbeddedConfig *EmbeddedConfig `json:"embeddedConfig,omitempty"`
KafkaConfig *KafkaConfig `json:"kafkaConfig,omitempty"`
Labels map[string]string `json:"labels,omitempty"`
Name string `json:"name,omitempty"`
NamespaceId string `json:"namespaceId,omitempty"`
OutputFlushInterval *int64 `json:"outputFlushInterval,omitempty"`
OutputTags map[string]string `json:"outputTags,omitempty"`
OwnerReferences []OwnerReference `json:"ownerReferences,omitempty"`
Removed string `json:"removed,omitempty"`
SplunkConfig *SplunkConfig `json:"splunkConfig,omitempty"`
State string `json:"state,omitempty"`
Status *LoggingStatus `json:"status,omitempty"`
SyslogConfig *SyslogConfig `json:"syslogConfig,omitempty"`
Transitioning string `json:"transitioning,omitempty"`
TransitioningMessage string `json:"transitioningMessage,omitempty"`
Uuid string `json:"uuid,omitempty"`
}
type ClusterLoggingCollection struct {
types.Collection
Data []ClusterLogging `json:"data,omitempty"`
client *ClusterLoggingClient
}
type ClusterLoggingClient struct {
apiClient *Client
}
type ClusterLoggingOperations interface {
List(opts *types.ListOpts) (*ClusterLoggingCollection, error)
Create(opts *ClusterLogging) (*ClusterLogging, error)
Update(existing *ClusterLogging, updates interface{}) (*ClusterLogging, error)
ByID(id string) (*ClusterLogging, error)
Delete(container *ClusterLogging) error
}
func newClusterLoggingClient(apiClient *Client) *ClusterLoggingClient {
return &ClusterLoggingClient{
apiClient: apiClient,
}
}
func (c *ClusterLoggingClient) Create(container *ClusterLogging) (*ClusterLogging, error) {
resp := &ClusterLogging{}
err := c.apiClient.Ops.DoCreate(ClusterLoggingType, container, resp)
return resp, err
}
func (c *ClusterLoggingClient) Update(existing *ClusterLogging, updates interface{}) (*ClusterLogging, error) {
resp := &ClusterLogging{}
err := c.apiClient.Ops.DoUpdate(ClusterLoggingType, &existing.Resource, updates, resp)
return resp, err
}
func (c *ClusterLoggingClient) List(opts *types.ListOpts) (*ClusterLoggingCollection, error) {
resp := &ClusterLoggingCollection{}
err := c.apiClient.Ops.DoList(ClusterLoggingType, opts, resp)
resp.client = c
return resp, err
}
func (cc *ClusterLoggingCollection) Next() (*ClusterLoggingCollection, error) {
if cc != nil && cc.Pagination != nil && cc.Pagination.Next != "" {
resp := &ClusterLoggingCollection{}
err := cc.client.apiClient.Ops.DoNext(cc.Pagination.Next, resp)
resp.client = cc.client
return resp, err
}
return nil, nil
}
func (c *ClusterLoggingClient) ByID(id string) (*ClusterLogging, error) {
resp := &ClusterLogging{}
err := c.apiClient.Ops.DoByID(ClusterLoggingType, id, resp)
return resp, err
}
func (c *ClusterLoggingClient) Delete(container *ClusterLogging) error {
return c.apiClient.Ops.DoResourceDelete(ClusterLoggingType, &container.Resource)
}

View File

@ -0,0 +1,26 @@
package client
const (
ClusterLoggingSpecType = "clusterLoggingSpec"
ClusterLoggingSpecFieldClusterId = "clusterId"
ClusterLoggingSpecFieldDisplayName = "displayName"
ClusterLoggingSpecFieldElasticsearchConfig = "elasticsearchConfig"
ClusterLoggingSpecFieldEmbeddedConfig = "embeddedConfig"
ClusterLoggingSpecFieldKafkaConfig = "kafkaConfig"
ClusterLoggingSpecFieldOutputFlushInterval = "outputFlushInterval"
ClusterLoggingSpecFieldOutputTags = "outputTags"
ClusterLoggingSpecFieldSplunkConfig = "splunkConfig"
ClusterLoggingSpecFieldSyslogConfig = "syslogConfig"
)
type ClusterLoggingSpec struct {
ClusterId string `json:"clusterId,omitempty"`
DisplayName string `json:"displayName,omitempty"`
ElasticsearchConfig *ElasticsearchConfig `json:"elasticsearchConfig,omitempty"`
EmbeddedConfig *EmbeddedConfig `json:"embeddedConfig,omitempty"`
KafkaConfig *KafkaConfig `json:"kafkaConfig,omitempty"`
OutputFlushInterval *int64 `json:"outputFlushInterval,omitempty"`
OutputTags map[string]string `json:"outputTags,omitempty"`
SplunkConfig *SplunkConfig `json:"splunkConfig,omitempty"`
SyslogConfig *SyslogConfig `json:"syslogConfig,omitempty"`
}

View File

@ -0,0 +1,20 @@
package client
const (
ElasticsearchConfigType = "elasticsearchConfig"
ElasticsearchConfigFieldAuthPassword = "authPassword"
ElasticsearchConfigFieldAuthUserName = "authUsername"
ElasticsearchConfigFieldDateFormat = "dateFormat"
ElasticsearchConfigFieldHost = "host"
ElasticsearchConfigFieldIndexPrefix = "indexPrefix"
ElasticsearchConfigFieldPort = "port"
)
type ElasticsearchConfig struct {
AuthPassword string `json:"authPassword,omitempty"`
AuthUserName string `json:"authUsername,omitempty"`
DateFormat string `json:"dateFormat,omitempty"`
Host string `json:"host,omitempty"`
IndexPrefix string `json:"indexPrefix,omitempty"`
Port *int64 `json:"port,omitempty"`
}

View File

@ -0,0 +1,12 @@
package client
const (
EmbeddedConfigType = "embeddedConfig"
EmbeddedConfigFieldDateFormat = "dateFormat"
EmbeddedConfigFieldIndexPrefix = "indexPrefix"
)
type EmbeddedConfig struct {
DateFormat string `json:"dateFormat,omitempty"`
IndexPrefix string `json:"indexPrefix,omitempty"`
}

View File

@ -0,0 +1,18 @@
package client
const (
KafkaConfigType = "kafkaConfig"
KafkaConfigFieldBroker = "broker"
KafkaConfigFieldDataType = "dataType"
KafkaConfigFieldMaxSendRetries = "maxSendRetries"
KafkaConfigFieldTopic = "topic"
KafkaConfigFieldZookeeper = "zookeeper"
)
type KafkaConfig struct {
Broker *BrokerList `json:"broker,omitempty"`
DataType string `json:"dataType,omitempty"`
MaxSendRetries *int64 `json:"maxSendRetries,omitempty"`
Topic string `json:"topic,omitempty"`
Zookeeper *Zookeeper `json:"zookeeper,omitempty"`
}

View File

@ -0,0 +1,20 @@
package client
const (
LoggingConditionType = "loggingCondition"
LoggingConditionFieldLastTransitionTime = "lastTransitionTime"
LoggingConditionFieldLastUpdateTime = "lastUpdateTime"
LoggingConditionFieldMessage = "message"
LoggingConditionFieldReason = "reason"
LoggingConditionFieldStatus = "status"
LoggingConditionFieldType = "type"
)
type LoggingCondition struct {
LastTransitionTime string `json:"lastTransitionTime,omitempty"`
LastUpdateTime string `json:"lastUpdateTime,omitempty"`
Message string `json:"message,omitempty"`
Reason string `json:"reason,omitempty"`
Status string `json:"status,omitempty"`
Type string `json:"type,omitempty"`
}

View File

@ -0,0 +1,10 @@
package client
const (
LoggingStatusType = "loggingStatus"
LoggingStatusFieldConditions = "conditions"
)
type LoggingStatus struct {
Conditions []LoggingCondition `json:"conditions,omitempty"`
}

View File

@ -0,0 +1,117 @@
package client
import (
"github.com/rancher/norman/types"
)
const (
ProjectLoggingType = "projectLogging"
ProjectLoggingFieldAnnotations = "annotations"
ProjectLoggingFieldCreated = "created"
ProjectLoggingFieldCreatorID = "creatorId"
ProjectLoggingFieldDisplayName = "displayName"
ProjectLoggingFieldElasticsearchConfig = "elasticsearchConfig"
ProjectLoggingFieldKafkaConfig = "kafkaConfig"
ProjectLoggingFieldLabels = "labels"
ProjectLoggingFieldName = "name"
ProjectLoggingFieldNamespaceId = "namespaceId"
ProjectLoggingFieldOutputFlushInterval = "outputFlushInterval"
ProjectLoggingFieldOutputTags = "outputTags"
ProjectLoggingFieldOwnerReferences = "ownerReferences"
ProjectLoggingFieldProjectId = "projectId"
ProjectLoggingFieldRemoved = "removed"
ProjectLoggingFieldSplunkConfig = "splunkConfig"
ProjectLoggingFieldState = "state"
ProjectLoggingFieldStatus = "status"
ProjectLoggingFieldSyslogConfig = "syslogConfig"
ProjectLoggingFieldTransitioning = "transitioning"
ProjectLoggingFieldTransitioningMessage = "transitioningMessage"
ProjectLoggingFieldUuid = "uuid"
)
type ProjectLogging struct {
types.Resource
Annotations map[string]string `json:"annotations,omitempty"`
Created string `json:"created,omitempty"`
CreatorID string `json:"creatorId,omitempty"`
DisplayName string `json:"displayName,omitempty"`
ElasticsearchConfig *ElasticsearchConfig `json:"elasticsearchConfig,omitempty"`
KafkaConfig *KafkaConfig `json:"kafkaConfig,omitempty"`
Labels map[string]string `json:"labels,omitempty"`
Name string `json:"name,omitempty"`
NamespaceId string `json:"namespaceId,omitempty"`
OutputFlushInterval *int64 `json:"outputFlushInterval,omitempty"`
OutputTags map[string]string `json:"outputTags,omitempty"`
OwnerReferences []OwnerReference `json:"ownerReferences,omitempty"`
ProjectId string `json:"projectId,omitempty"`
Removed string `json:"removed,omitempty"`
SplunkConfig *SplunkConfig `json:"splunkConfig,omitempty"`
State string `json:"state,omitempty"`
Status *LoggingStatus `json:"status,omitempty"`
SyslogConfig *SyslogConfig `json:"syslogConfig,omitempty"`
Transitioning string `json:"transitioning,omitempty"`
TransitioningMessage string `json:"transitioningMessage,omitempty"`
Uuid string `json:"uuid,omitempty"`
}
type ProjectLoggingCollection struct {
types.Collection
Data []ProjectLogging `json:"data,omitempty"`
client *ProjectLoggingClient
}
type ProjectLoggingClient struct {
apiClient *Client
}
type ProjectLoggingOperations interface {
List(opts *types.ListOpts) (*ProjectLoggingCollection, error)
Create(opts *ProjectLogging) (*ProjectLogging, error)
Update(existing *ProjectLogging, updates interface{}) (*ProjectLogging, error)
ByID(id string) (*ProjectLogging, error)
Delete(container *ProjectLogging) error
}
func newProjectLoggingClient(apiClient *Client) *ProjectLoggingClient {
return &ProjectLoggingClient{
apiClient: apiClient,
}
}
func (c *ProjectLoggingClient) Create(container *ProjectLogging) (*ProjectLogging, error) {
resp := &ProjectLogging{}
err := c.apiClient.Ops.DoCreate(ProjectLoggingType, container, resp)
return resp, err
}
func (c *ProjectLoggingClient) Update(existing *ProjectLogging, updates interface{}) (*ProjectLogging, error) {
resp := &ProjectLogging{}
err := c.apiClient.Ops.DoUpdate(ProjectLoggingType, &existing.Resource, updates, resp)
return resp, err
}
func (c *ProjectLoggingClient) List(opts *types.ListOpts) (*ProjectLoggingCollection, error) {
resp := &ProjectLoggingCollection{}
err := c.apiClient.Ops.DoList(ProjectLoggingType, opts, resp)
resp.client = c
return resp, err
}
func (cc *ProjectLoggingCollection) Next() (*ProjectLoggingCollection, error) {
if cc != nil && cc.Pagination != nil && cc.Pagination.Next != "" {
resp := &ProjectLoggingCollection{}
err := cc.client.apiClient.Ops.DoNext(cc.Pagination.Next, resp)
resp.client = cc.client
return resp, err
}
return nil, nil
}
func (c *ProjectLoggingClient) ByID(id string) (*ProjectLogging, error) {
resp := &ProjectLogging{}
err := c.apiClient.Ops.DoByID(ProjectLoggingType, id, resp)
return resp, err
}
func (c *ProjectLoggingClient) Delete(container *ProjectLogging) error {
return c.apiClient.Ops.DoResourceDelete(ProjectLoggingType, &container.Resource)
}

View File

@ -0,0 +1,24 @@
package client
const (
ProjectLoggingSpecType = "projectLoggingSpec"
ProjectLoggingSpecFieldDisplayName = "displayName"
ProjectLoggingSpecFieldElasticsearchConfig = "elasticsearchConfig"
ProjectLoggingSpecFieldKafkaConfig = "kafkaConfig"
ProjectLoggingSpecFieldOutputFlushInterval = "outputFlushInterval"
ProjectLoggingSpecFieldOutputTags = "outputTags"
ProjectLoggingSpecFieldProjectId = "projectId"
ProjectLoggingSpecFieldSplunkConfig = "splunkConfig"
ProjectLoggingSpecFieldSyslogConfig = "syslogConfig"
)
type ProjectLoggingSpec struct {
DisplayName string `json:"displayName,omitempty"`
ElasticsearchConfig *ElasticsearchConfig `json:"elasticsearchConfig,omitempty"`
KafkaConfig *KafkaConfig `json:"kafkaConfig,omitempty"`
OutputFlushInterval *int64 `json:"outputFlushInterval,omitempty"`
OutputTags map[string]string `json:"outputTags,omitempty"`
ProjectId string `json:"projectId,omitempty"`
SplunkConfig *SplunkConfig `json:"splunkConfig,omitempty"`
SyslogConfig *SyslogConfig `json:"syslogConfig,omitempty"`
}

View File

@ -0,0 +1,18 @@
package client
const (
SplunkConfigType = "splunkConfig"
SplunkConfigFieldHost = "host"
SplunkConfigFieldPort = "port"
SplunkConfigFieldProtocol = "protocol"
SplunkConfigFieldSource = "source"
SplunkConfigFieldToken = "token"
)
type SplunkConfig struct {
Host string `json:"host,omitempty"`
Port *int64 `json:"port,omitempty"`
Protocol string `json:"protocol,omitempty"`
Source string `json:"source,omitempty"`
Token string `json:"token,omitempty"`
}

View File

@ -0,0 +1,16 @@
package client
const (
SyslogConfigType = "syslogConfig"
SyslogConfigFieldHost = "host"
SyslogConfigFieldPort = "port"
SyslogConfigFieldProgram = "program"
SyslogConfigFieldSeverity = "severity"
)
type SyslogConfig struct {
Host string `json:"host,omitempty"`
Port *int64 `json:"port,omitempty"`
Program string `json:"program,omitempty"`
Severity string `json:"severity,omitempty"`
}

View File

@ -0,0 +1,12 @@
package client
const (
ZookeeperType = "zookeeper"
ZookeeperFieldHost = "host"
ZookeeperFieldPort = "port"
)
type Zookeeper struct {
Host string `json:"host,omitempty"`
Port *int64 `json:"port,omitempty"`
}