1
0
mirror of https://github.com/rancher/types.git synced 2025-09-01 21:32:10 +00:00

Update generated code

This commit is contained in:
Darren Shepherd
2018-01-26 14:10:54 -07:00
parent 84558def93
commit 36927d4c64
21 changed files with 1534 additions and 0 deletions

View File

@@ -0,0 +1,10 @@
package client
const (
BrokerListType = "brokerList"
BrokerListFieldBrokerList = "brokerList"
)
type BrokerList struct {
BrokerList []string `json:"brokerList,omitempty"`
}

View File

@@ -32,6 +32,8 @@ type Client struct {
DynamicSchema DynamicSchemaOperations
Stack StackOperations
Preference PreferenceOperations
ClusterLogging ClusterLoggingOperations
ProjectLogging ProjectLoggingOperations
ListenConfig ListenConfigOperations
Setting SettingOperations
}
@@ -71,6 +73,8 @@ func NewClient(opts *clientbase.ClientOpts) (*Client, error) {
client.DynamicSchema = newDynamicSchemaClient(client)
client.Stack = newStackClient(client)
client.Preference = newPreferenceClient(client)
client.ClusterLogging = newClusterLoggingClient(client)
client.ProjectLogging = newProjectLoggingClient(client)
client.ListenConfig = newListenConfigClient(client)
client.Setting = newSettingClient(client)

View File

@@ -0,0 +1,119 @@
package client
import (
"github.com/rancher/norman/types"
)
const (
ClusterLoggingType = "clusterLogging"
ClusterLoggingFieldAnnotations = "annotations"
ClusterLoggingFieldClusterId = "clusterId"
ClusterLoggingFieldCreated = "created"
ClusterLoggingFieldCreatorID = "creatorId"
ClusterLoggingFieldDisplayName = "displayName"
ClusterLoggingFieldElasticsearchConfig = "elasticsearchConfig"
ClusterLoggingFieldEmbeddedConfig = "embeddedConfig"
ClusterLoggingFieldKafkaConfig = "kafkaConfig"
ClusterLoggingFieldLabels = "labels"
ClusterLoggingFieldName = "name"
ClusterLoggingFieldNamespaceId = "namespaceId"
ClusterLoggingFieldOutputFlushInterval = "outputFlushInterval"
ClusterLoggingFieldOutputTags = "outputTags"
ClusterLoggingFieldOwnerReferences = "ownerReferences"
ClusterLoggingFieldRemoved = "removed"
ClusterLoggingFieldSplunkConfig = "splunkConfig"
ClusterLoggingFieldState = "state"
ClusterLoggingFieldStatus = "status"
ClusterLoggingFieldSyslogConfig = "syslogConfig"
ClusterLoggingFieldTransitioning = "transitioning"
ClusterLoggingFieldTransitioningMessage = "transitioningMessage"
ClusterLoggingFieldUuid = "uuid"
)
type ClusterLogging struct {
types.Resource
Annotations map[string]string `json:"annotations,omitempty"`
ClusterId string `json:"clusterId,omitempty"`
Created string `json:"created,omitempty"`
CreatorID string `json:"creatorId,omitempty"`
DisplayName string `json:"displayName,omitempty"`
ElasticsearchConfig *ElasticsearchConfig `json:"elasticsearchConfig,omitempty"`
EmbeddedConfig *EmbeddedConfig `json:"embeddedConfig,omitempty"`
KafkaConfig *KafkaConfig `json:"kafkaConfig,omitempty"`
Labels map[string]string `json:"labels,omitempty"`
Name string `json:"name,omitempty"`
NamespaceId string `json:"namespaceId,omitempty"`
OutputFlushInterval *int64 `json:"outputFlushInterval,omitempty"`
OutputTags map[string]string `json:"outputTags,omitempty"`
OwnerReferences []OwnerReference `json:"ownerReferences,omitempty"`
Removed string `json:"removed,omitempty"`
SplunkConfig *SplunkConfig `json:"splunkConfig,omitempty"`
State string `json:"state,omitempty"`
Status *LoggingStatus `json:"status,omitempty"`
SyslogConfig *SyslogConfig `json:"syslogConfig,omitempty"`
Transitioning string `json:"transitioning,omitempty"`
TransitioningMessage string `json:"transitioningMessage,omitempty"`
Uuid string `json:"uuid,omitempty"`
}
type ClusterLoggingCollection struct {
types.Collection
Data []ClusterLogging `json:"data,omitempty"`
client *ClusterLoggingClient
}
type ClusterLoggingClient struct {
apiClient *Client
}
type ClusterLoggingOperations interface {
List(opts *types.ListOpts) (*ClusterLoggingCollection, error)
Create(opts *ClusterLogging) (*ClusterLogging, error)
Update(existing *ClusterLogging, updates interface{}) (*ClusterLogging, error)
ByID(id string) (*ClusterLogging, error)
Delete(container *ClusterLogging) error
}
func newClusterLoggingClient(apiClient *Client) *ClusterLoggingClient {
return &ClusterLoggingClient{
apiClient: apiClient,
}
}
func (c *ClusterLoggingClient) Create(container *ClusterLogging) (*ClusterLogging, error) {
resp := &ClusterLogging{}
err := c.apiClient.Ops.DoCreate(ClusterLoggingType, container, resp)
return resp, err
}
func (c *ClusterLoggingClient) Update(existing *ClusterLogging, updates interface{}) (*ClusterLogging, error) {
resp := &ClusterLogging{}
err := c.apiClient.Ops.DoUpdate(ClusterLoggingType, &existing.Resource, updates, resp)
return resp, err
}
func (c *ClusterLoggingClient) List(opts *types.ListOpts) (*ClusterLoggingCollection, error) {
resp := &ClusterLoggingCollection{}
err := c.apiClient.Ops.DoList(ClusterLoggingType, opts, resp)
resp.client = c
return resp, err
}
func (cc *ClusterLoggingCollection) Next() (*ClusterLoggingCollection, error) {
if cc != nil && cc.Pagination != nil && cc.Pagination.Next != "" {
resp := &ClusterLoggingCollection{}
err := cc.client.apiClient.Ops.DoNext(cc.Pagination.Next, resp)
resp.client = cc.client
return resp, err
}
return nil, nil
}
func (c *ClusterLoggingClient) ByID(id string) (*ClusterLogging, error) {
resp := &ClusterLogging{}
err := c.apiClient.Ops.DoByID(ClusterLoggingType, id, resp)
return resp, err
}
func (c *ClusterLoggingClient) Delete(container *ClusterLogging) error {
return c.apiClient.Ops.DoResourceDelete(ClusterLoggingType, &container.Resource)
}

View File

@@ -0,0 +1,26 @@
package client
const (
ClusterLoggingSpecType = "clusterLoggingSpec"
ClusterLoggingSpecFieldClusterId = "clusterId"
ClusterLoggingSpecFieldDisplayName = "displayName"
ClusterLoggingSpecFieldElasticsearchConfig = "elasticsearchConfig"
ClusterLoggingSpecFieldEmbeddedConfig = "embeddedConfig"
ClusterLoggingSpecFieldKafkaConfig = "kafkaConfig"
ClusterLoggingSpecFieldOutputFlushInterval = "outputFlushInterval"
ClusterLoggingSpecFieldOutputTags = "outputTags"
ClusterLoggingSpecFieldSplunkConfig = "splunkConfig"
ClusterLoggingSpecFieldSyslogConfig = "syslogConfig"
)
type ClusterLoggingSpec struct {
ClusterId string `json:"clusterId,omitempty"`
DisplayName string `json:"displayName,omitempty"`
ElasticsearchConfig *ElasticsearchConfig `json:"elasticsearchConfig,omitempty"`
EmbeddedConfig *EmbeddedConfig `json:"embeddedConfig,omitempty"`
KafkaConfig *KafkaConfig `json:"kafkaConfig,omitempty"`
OutputFlushInterval *int64 `json:"outputFlushInterval,omitempty"`
OutputTags map[string]string `json:"outputTags,omitempty"`
SplunkConfig *SplunkConfig `json:"splunkConfig,omitempty"`
SyslogConfig *SyslogConfig `json:"syslogConfig,omitempty"`
}

View File

@@ -0,0 +1,20 @@
package client
const (
ElasticsearchConfigType = "elasticsearchConfig"
ElasticsearchConfigFieldAuthPassword = "authPassword"
ElasticsearchConfigFieldAuthUserName = "authUsername"
ElasticsearchConfigFieldDateFormat = "dateFormat"
ElasticsearchConfigFieldHost = "host"
ElasticsearchConfigFieldIndexPrefix = "indexPrefix"
ElasticsearchConfigFieldPort = "port"
)
type ElasticsearchConfig struct {
AuthPassword string `json:"authPassword,omitempty"`
AuthUserName string `json:"authUsername,omitempty"`
DateFormat string `json:"dateFormat,omitempty"`
Host string `json:"host,omitempty"`
IndexPrefix string `json:"indexPrefix,omitempty"`
Port *int64 `json:"port,omitempty"`
}

View File

@@ -0,0 +1,12 @@
package client
const (
EmbeddedConfigType = "embeddedConfig"
EmbeddedConfigFieldDateFormat = "dateFormat"
EmbeddedConfigFieldIndexPrefix = "indexPrefix"
)
type EmbeddedConfig struct {
DateFormat string `json:"dateFormat,omitempty"`
IndexPrefix string `json:"indexPrefix,omitempty"`
}

View File

@@ -0,0 +1,18 @@
package client
const (
KafkaConfigType = "kafkaConfig"
KafkaConfigFieldBroker = "broker"
KafkaConfigFieldDataType = "dataType"
KafkaConfigFieldMaxSendRetries = "maxSendRetries"
KafkaConfigFieldTopic = "topic"
KafkaConfigFieldZookeeper = "zookeeper"
)
type KafkaConfig struct {
Broker *BrokerList `json:"broker,omitempty"`
DataType string `json:"dataType,omitempty"`
MaxSendRetries *int64 `json:"maxSendRetries,omitempty"`
Topic string `json:"topic,omitempty"`
Zookeeper *Zookeeper `json:"zookeeper,omitempty"`
}

View File

@@ -0,0 +1,20 @@
package client
const (
LoggingConditionType = "loggingCondition"
LoggingConditionFieldLastTransitionTime = "lastTransitionTime"
LoggingConditionFieldLastUpdateTime = "lastUpdateTime"
LoggingConditionFieldMessage = "message"
LoggingConditionFieldReason = "reason"
LoggingConditionFieldStatus = "status"
LoggingConditionFieldType = "type"
)
type LoggingCondition struct {
LastTransitionTime string `json:"lastTransitionTime,omitempty"`
LastUpdateTime string `json:"lastUpdateTime,omitempty"`
Message string `json:"message,omitempty"`
Reason string `json:"reason,omitempty"`
Status string `json:"status,omitempty"`
Type string `json:"type,omitempty"`
}

View File

@@ -0,0 +1,10 @@
package client
const (
LoggingStatusType = "loggingStatus"
LoggingStatusFieldConditions = "conditions"
)
type LoggingStatus struct {
Conditions []LoggingCondition `json:"conditions,omitempty"`
}

View File

@@ -0,0 +1,117 @@
package client
import (
"github.com/rancher/norman/types"
)
const (
ProjectLoggingType = "projectLogging"
ProjectLoggingFieldAnnotations = "annotations"
ProjectLoggingFieldCreated = "created"
ProjectLoggingFieldCreatorID = "creatorId"
ProjectLoggingFieldDisplayName = "displayName"
ProjectLoggingFieldElasticsearchConfig = "elasticsearchConfig"
ProjectLoggingFieldKafkaConfig = "kafkaConfig"
ProjectLoggingFieldLabels = "labels"
ProjectLoggingFieldName = "name"
ProjectLoggingFieldNamespaceId = "namespaceId"
ProjectLoggingFieldOutputFlushInterval = "outputFlushInterval"
ProjectLoggingFieldOutputTags = "outputTags"
ProjectLoggingFieldOwnerReferences = "ownerReferences"
ProjectLoggingFieldProjectId = "projectId"
ProjectLoggingFieldRemoved = "removed"
ProjectLoggingFieldSplunkConfig = "splunkConfig"
ProjectLoggingFieldState = "state"
ProjectLoggingFieldStatus = "status"
ProjectLoggingFieldSyslogConfig = "syslogConfig"
ProjectLoggingFieldTransitioning = "transitioning"
ProjectLoggingFieldTransitioningMessage = "transitioningMessage"
ProjectLoggingFieldUuid = "uuid"
)
type ProjectLogging struct {
types.Resource
Annotations map[string]string `json:"annotations,omitempty"`
Created string `json:"created,omitempty"`
CreatorID string `json:"creatorId,omitempty"`
DisplayName string `json:"displayName,omitempty"`
ElasticsearchConfig *ElasticsearchConfig `json:"elasticsearchConfig,omitempty"`
KafkaConfig *KafkaConfig `json:"kafkaConfig,omitempty"`
Labels map[string]string `json:"labels,omitempty"`
Name string `json:"name,omitempty"`
NamespaceId string `json:"namespaceId,omitempty"`
OutputFlushInterval *int64 `json:"outputFlushInterval,omitempty"`
OutputTags map[string]string `json:"outputTags,omitempty"`
OwnerReferences []OwnerReference `json:"ownerReferences,omitempty"`
ProjectId string `json:"projectId,omitempty"`
Removed string `json:"removed,omitempty"`
SplunkConfig *SplunkConfig `json:"splunkConfig,omitempty"`
State string `json:"state,omitempty"`
Status *LoggingStatus `json:"status,omitempty"`
SyslogConfig *SyslogConfig `json:"syslogConfig,omitempty"`
Transitioning string `json:"transitioning,omitempty"`
TransitioningMessage string `json:"transitioningMessage,omitempty"`
Uuid string `json:"uuid,omitempty"`
}
type ProjectLoggingCollection struct {
types.Collection
Data []ProjectLogging `json:"data,omitempty"`
client *ProjectLoggingClient
}
type ProjectLoggingClient struct {
apiClient *Client
}
type ProjectLoggingOperations interface {
List(opts *types.ListOpts) (*ProjectLoggingCollection, error)
Create(opts *ProjectLogging) (*ProjectLogging, error)
Update(existing *ProjectLogging, updates interface{}) (*ProjectLogging, error)
ByID(id string) (*ProjectLogging, error)
Delete(container *ProjectLogging) error
}
func newProjectLoggingClient(apiClient *Client) *ProjectLoggingClient {
return &ProjectLoggingClient{
apiClient: apiClient,
}
}
func (c *ProjectLoggingClient) Create(container *ProjectLogging) (*ProjectLogging, error) {
resp := &ProjectLogging{}
err := c.apiClient.Ops.DoCreate(ProjectLoggingType, container, resp)
return resp, err
}
func (c *ProjectLoggingClient) Update(existing *ProjectLogging, updates interface{}) (*ProjectLogging, error) {
resp := &ProjectLogging{}
err := c.apiClient.Ops.DoUpdate(ProjectLoggingType, &existing.Resource, updates, resp)
return resp, err
}
func (c *ProjectLoggingClient) List(opts *types.ListOpts) (*ProjectLoggingCollection, error) {
resp := &ProjectLoggingCollection{}
err := c.apiClient.Ops.DoList(ProjectLoggingType, opts, resp)
resp.client = c
return resp, err
}
func (cc *ProjectLoggingCollection) Next() (*ProjectLoggingCollection, error) {
if cc != nil && cc.Pagination != nil && cc.Pagination.Next != "" {
resp := &ProjectLoggingCollection{}
err := cc.client.apiClient.Ops.DoNext(cc.Pagination.Next, resp)
resp.client = cc.client
return resp, err
}
return nil, nil
}
func (c *ProjectLoggingClient) ByID(id string) (*ProjectLogging, error) {
resp := &ProjectLogging{}
err := c.apiClient.Ops.DoByID(ProjectLoggingType, id, resp)
return resp, err
}
func (c *ProjectLoggingClient) Delete(container *ProjectLogging) error {
return c.apiClient.Ops.DoResourceDelete(ProjectLoggingType, &container.Resource)
}

View File

@@ -0,0 +1,24 @@
package client
const (
ProjectLoggingSpecType = "projectLoggingSpec"
ProjectLoggingSpecFieldDisplayName = "displayName"
ProjectLoggingSpecFieldElasticsearchConfig = "elasticsearchConfig"
ProjectLoggingSpecFieldKafkaConfig = "kafkaConfig"
ProjectLoggingSpecFieldOutputFlushInterval = "outputFlushInterval"
ProjectLoggingSpecFieldOutputTags = "outputTags"
ProjectLoggingSpecFieldProjectId = "projectId"
ProjectLoggingSpecFieldSplunkConfig = "splunkConfig"
ProjectLoggingSpecFieldSyslogConfig = "syslogConfig"
)
type ProjectLoggingSpec struct {
DisplayName string `json:"displayName,omitempty"`
ElasticsearchConfig *ElasticsearchConfig `json:"elasticsearchConfig,omitempty"`
KafkaConfig *KafkaConfig `json:"kafkaConfig,omitempty"`
OutputFlushInterval *int64 `json:"outputFlushInterval,omitempty"`
OutputTags map[string]string `json:"outputTags,omitempty"`
ProjectId string `json:"projectId,omitempty"`
SplunkConfig *SplunkConfig `json:"splunkConfig,omitempty"`
SyslogConfig *SyslogConfig `json:"syslogConfig,omitempty"`
}

View File

@@ -0,0 +1,18 @@
package client
const (
SplunkConfigType = "splunkConfig"
SplunkConfigFieldHost = "host"
SplunkConfigFieldPort = "port"
SplunkConfigFieldProtocol = "protocol"
SplunkConfigFieldSource = "source"
SplunkConfigFieldToken = "token"
)
type SplunkConfig struct {
Host string `json:"host,omitempty"`
Port *int64 `json:"port,omitempty"`
Protocol string `json:"protocol,omitempty"`
Source string `json:"source,omitempty"`
Token string `json:"token,omitempty"`
}

View File

@@ -0,0 +1,16 @@
package client
const (
SyslogConfigType = "syslogConfig"
SyslogConfigFieldHost = "host"
SyslogConfigFieldPort = "port"
SyslogConfigFieldProgram = "program"
SyslogConfigFieldSeverity = "severity"
)
type SyslogConfig struct {
Host string `json:"host,omitempty"`
Port *int64 `json:"port,omitempty"`
Program string `json:"program,omitempty"`
Severity string `json:"severity,omitempty"`
}

View File

@@ -0,0 +1,12 @@
package client
const (
ZookeeperType = "zookeeper"
ZookeeperFieldHost = "host"
ZookeeperFieldPort = "port"
)
type Zookeeper struct {
Host string `json:"host,omitempty"`
Port *int64 `json:"port,omitempty"`
}