1
0
mirror of https://github.com/rancher/types.git synced 2025-07-18 23:36:24 +00:00

Update schemas

This commit is contained in:
Darren Shepherd 2017-11-11 21:14:05 -07:00
parent 88242c48c9
commit 9d4da2e2e1
12 changed files with 866 additions and 72 deletions

43
generator/generator.go Normal file
View File

@ -0,0 +1,43 @@
package generator
import (
"path"
"strings"
"github.com/rancher/norman/generator"
"github.com/rancher/norman/types"
)
var (
basePackage = "github.com/rancher/types"
baseCattle = "client"
)
func Generate(schemas *types.Schemas) {
version := getVersion(schemas)
groupParts := strings.Split(version.Group, ".")
cattleOutputPackage := path.Join(basePackage, baseCattle, groupParts[len(groupParts)-1], version.Version)
k8sOutputPackage := path.Join(basePackage, version.Group, version.Version)
if err := generator.Generate(schemas, cattleOutputPackage, k8sOutputPackage); err != nil {
panic(err)
}
}
func getVersion(schemas *types.Schemas) *types.APIVersion {
var version types.APIVersion
for _, schema := range schemas.Schemas() {
if version.Group == "" {
version = schema.Version
continue
}
if version.Group != schema.Version.Group ||
version.Version != schema.Version.Version {
panic("schema set contains two APIVersions")
}
}
return &version
}

View File

@ -0,0 +1,20 @@
package schema
import (
"github.com/rancher/norman/types"
"github.com/rancher/types/io.cattle.authorization/v1"
)
var (
Version = types.APIVersion{
Version: "v1",
Group: "io.cattle.authorization",
Path: "/v1-authz",
}
Schemas = types.NewSchemas().
MustImport(&Version, v1.Project{}).
MustImport(&Version, v1.RoleTemplate{}).
MustImport(&Version, v1.PodSecurityPolicyTemplate{}).
MustImport(&Version, v1.ProjectRoleBinding{})
)

View File

@ -1,3 +0,0 @@
// +k8s:deepcopy-gen=package,register
package v1

View File

@ -3,11 +3,8 @@ package v1
import (
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
)
var SchemeBuilder = runtime.NewSchemeBuilder()
type ClusterConditionType string
const (
@ -26,8 +23,6 @@ const (
// More conditions can be added if unredlying controllers request it
)
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
type Cluster struct {
metav1.TypeMeta `json:",inline"`
// Standard objects metadata. More info:
@ -41,14 +36,6 @@ type Cluster struct {
Status *ClusterStatus `json:"status"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
type ClusterList struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty"`
Items []Cluster
}
type ClusterSpec struct {
GKEConfig *GKEConfig `json:"gkeConfig,omitempty"`
AKSConfig *AKSConfig `json:"aksConfig,omitempty"`
@ -219,16 +206,6 @@ type baseService struct {
Image string `yaml:"image"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
type ClusterNode struct {
v1.Node
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
type ClusterNodeList struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty"`
Items []ClusterNode
}

View File

@ -0,0 +1,190 @@
package mapper
import (
"github.com/rancher/norman/types"
"github.com/rancher/norman/types/convert"
"k8s.io/api/core/v1"
)
type EnvironmentMapper struct {
}
func (e EnvironmentMapper) FromInternal(data map[string]interface{}) {
env := []v1.EnvVar{}
envFrom := []v1.EnvFromSource{}
envMap := map[string]interface{}{}
envFromMaps := []map[string]interface{}{}
if err := convert.ToObj(data["env"], &env); err == nil {
for _, envVar := range env {
if envVar.ValueFrom == nil {
envMap[envVar.Name] = envVar.Value
continue
}
if envVar.ValueFrom.FieldRef != nil {
envFromMaps = append(envFromMaps, map[string]interface{}{
"source": "field",
"sourceName": envVar.ValueFrom.FieldRef.FieldPath,
"targetKey": envVar.Name,
})
}
if envVar.ValueFrom.ResourceFieldRef != nil {
envFromMaps = append(envFromMaps, map[string]interface{}{
"source": "resource",
"sourceName": envVar.ValueFrom.ResourceFieldRef.ContainerName,
"sourceKey": envVar.ValueFrom.ResourceFieldRef.Resource,
"divisor": envVar.ValueFrom.ResourceFieldRef.Divisor,
"targetKey": envVar.Name,
})
}
if envVar.ValueFrom.ConfigMapKeyRef != nil {
envFromMaps = append(envFromMaps, map[string]interface{}{
"source": "configMap",
"sourceName": envVar.ValueFrom.ConfigMapKeyRef.Name,
"sourceKey": envVar.ValueFrom.ConfigMapKeyRef.Key,
"optional": envVar.ValueFrom.ConfigMapKeyRef.Optional,
"targetKey": envVar.Name,
})
}
if envVar.ValueFrom.SecretKeyRef != nil {
envFromMaps = append(envFromMaps, map[string]interface{}{
"source": "secret",
"sourceName": envVar.ValueFrom.SecretKeyRef.Name,
"sourceKey": envVar.ValueFrom.SecretKeyRef.Key,
"optional": envVar.ValueFrom.SecretKeyRef.Optional,
"targetKey": envVar.Name,
})
}
}
}
if err := convert.ToObj(data["envFrom"], &envFrom); err == nil {
for _, envVar := range envFrom {
if envVar.SecretRef != nil {
envFromMaps = append(envFromMaps, map[string]interface{}{
"source": "secret",
"sourceName": envVar.SecretRef.Name,
"prefix": envVar.Prefix,
"optional": envVar.SecretRef.Optional,
})
}
if envVar.ConfigMapRef != nil {
envFromMaps = append(envFromMaps, map[string]interface{}{
"source": "configMap",
"sourceName": envVar.ConfigMapRef.Name,
"prefix": envVar.Prefix,
"optional": envVar.ConfigMapRef.Optional,
})
}
}
}
delete(data, "env")
delete(data, "envFrom")
if len(envMap) > 0 {
data["environment"] = envMap
}
if len(envFromMaps) > 0 {
data["environmentFrom"] = envFromMaps
}
}
func (e EnvironmentMapper) ToInternal(data map[string]interface{}) {
envVar := []map[string]interface{}{}
envVarFrom := []map[string]interface{}{}
for key, value := range convert.ToMapInterface(data["environment"]) {
envVar = append(envVar, map[string]interface{}{
"name": key,
"value": value,
})
}
for _, value := range convert.ToMapSlice(data["environmentFrom"]) {
source := convert.ToString(value["source"])
if source == "" {
continue
}
targetKey := convert.ToString(value["targetKey"])
if targetKey == "" {
switch source {
case "secret":
envVarFrom = append(envVarFrom, map[string]interface{}{
"prefix": value["prefix"],
"secretRef": map[string]interface{}{
"name": value["sourceName"],
"optional": value["optional"],
},
})
case "configMap":
envVarFrom = append(envVarFrom, map[string]interface{}{
"prefix": value["prefix"],
"configMapRef": map[string]interface{}{
"name": value["sourceName"],
"optional": value["optional"],
},
})
}
} else {
switch source {
case "field":
envVar = append(envVarFrom, map[string]interface{}{
"name": targetKey,
"valueFrom": map[string]interface{}{
"fieldRef": map[string]interface{}{
"fieldPath": value["sourceName"],
},
},
})
case "resource":
envVar = append(envVarFrom, map[string]interface{}{
"name": targetKey,
"valueFrom": map[string]interface{}{
"resourceFieldRef": map[string]interface{}{
"containerName": value["sourceName"],
"resource": value["sourceKey"],
"divisor": value["divisor"],
},
},
})
case "configMap":
envVar = append(envVarFrom, map[string]interface{}{
"name": targetKey,
"valueFrom": map[string]interface{}{
"configMapKeyRef": map[string]interface{}{
"name": value["sourceName"],
"key": value["sourceKey"],
"optional": value["optional"],
},
},
})
case "secret":
envVar = append(envVarFrom, map[string]interface{}{
"name": targetKey,
"valueFrom": map[string]interface{}{
"secretKeyRef": map[string]interface{}{
"name": value["sourceName"],
"key": value["sourceKey"],
"optional": value["optional"],
},
},
})
}
}
}
delete(data, "environment")
delete(data, "environmentFrom")
data["env"] = envVar
data["envFrom"] = envVarFrom
}
func (e EnvironmentMapper) ModifySchema(schema *types.Schema, schemas *types.Schemas) error {
delete(schema.ResourceFields, "env")
delete(schema.ResourceFields, "envFrom")
return nil
}

View File

@ -0,0 +1,45 @@
package mapper
import (
"github.com/rancher/norman/types"
"github.com/rancher/norman/types/convert"
)
type InitContainerMapper struct {
}
func (e InitContainerMapper) FromInternal(data map[string]interface{}) {
containers, _ := data["containers"].([]interface{})
for _, initContainer := range convert.ToMapSlice(data["initContainers"]) {
if initContainer == nil {
continue
}
initContainer["initContainer"] = true
containers = append(containers, initContainer)
}
data["containers"] = containers
}
func (e InitContainerMapper) ToInternal(data map[string]interface{}) {
newContainers := []interface{}{}
newInitContainers := []interface{}{}
for _, container := range convert.ToMapSlice(data["container"]) {
if convert.ToBool(container["initContainer"]) {
newInitContainers = append(newInitContainers, container)
} else {
newContainers = append(newContainers, container)
}
delete(container, "initContainer")
}
data["containers"] = newContainers
data["initContainers"] = newInitContainers
}
func (e InitContainerMapper) ModifySchema(schema *types.Schema, schemas *types.Schemas) error {
delete(schema.ResourceFields, "initContainers")
return nil
}

View File

@ -0,0 +1,44 @@
package mapper
import (
"github.com/rancher/norman/types"
"github.com/rancher/norman/types/convert"
)
var namespaceMapping = map[string]string{
"hostNetwork": "net",
"hostIPC": "ipc",
"hostPID": "pid",
}
type NamespaceMapper struct {
}
func (e NamespaceMapper) FromInternal(data map[string]interface{}) {
for name, friendlyName := range namespaceMapping {
value := convert.ToBool(data[name])
if value {
data[friendlyName] = "host"
}
delete(data, name)
}
}
func (e NamespaceMapper) ToInternal(data map[string]interface{}) {
for name, friendlyName := range namespaceMapping {
value := convert.ToString(data[friendlyName])
if value == "host" {
data[name] = true
} else {
data[name] = false
}
delete(data, friendlyName)
}
}
func (e NamespaceMapper) ModifySchema(schema *types.Schema, schemas *types.Schemas) error {
delete(schema.ResourceFields, "hostNetwork")
delete(schema.ResourceFields, "hostPID")
delete(schema.ResourceFields, "hostIPC")
return nil
}

View File

@ -0,0 +1,43 @@
package mapper
import (
"strings"
"github.com/rancher/norman/types"
m "github.com/rancher/norman/types/mapping/mapper"
)
type ResourceRequirementsMapper struct {
}
func (r ResourceRequirementsMapper) FromInternal(data map[string]interface{}) {
for key, value := range data {
mapValue, ok := value.(map[string]interface{})
if !ok {
continue
}
for subKey, subValue := range mapValue {
m.PutValue(data, subValue, subKey, strings.TrimSuffix(key, "s"))
}
delete(data, key)
}
}
func (r ResourceRequirementsMapper) ToInternal(data map[string]interface{}) {
for key, value := range data {
mapValue, ok := value.(map[string]interface{})
if !ok {
continue
}
for subKey, subValue := range mapValue {
m.PutValue(data, subValue, subKey, key+"s")
}
delete(data, key)
}
}
func (r ResourceRequirementsMapper) ModifySchema(schema *types.Schema, schemas *types.Schemas) error {
return nil
}

View File

@ -0,0 +1,261 @@
package mapper
import (
"fmt"
"sort"
"strings"
"regexp"
"github.com/rancher/norman/types"
"github.com/rancher/norman/types/convert"
"github.com/rancher/norman/types/mapping/mapper"
"k8s.io/api/core/v1"
)
var (
exprRegexp = regexp.MustCompile("^(.*)(=|!=|<|>| in | notin )(.*)$")
)
type SchedulingMapper struct {
}
func (s SchedulingMapper) FromInternal(data map[string]interface{}) {
defer func() {
delete(data, "nodeSelector")
delete(data, "affinity")
}()
requireAll := []string{}
for key, value := range convert.ToMapInterface(data["nodeSelector"]) {
if value == "" {
requireAll = append(requireAll, key)
} else {
requireAll = append(requireAll, fmt.Sprintf("%s = %s", key, value))
}
}
if len(requireAll) > 0 {
mapper.PutValue(data, requireAll, "scheduling", "node", "requireAll")
}
v, ok := data["affinity"]
if !ok || v == nil {
return
}
affinity := &v1.Affinity{}
if err := convert.ToObj(v, affinity); err != nil {
return
}
if affinity.NodeAffinity != nil {
s.nodeAffinity(data, affinity.NodeAffinity)
}
}
func (s SchedulingMapper) nodeAffinity(data map[string]interface{}, nodeAffinity *v1.NodeAffinity) {
requireAll := []string{}
requireAny := []string{}
preferred := []string{}
if nodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution != nil {
for _, term := range nodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms {
exprs := NodeSelectorTermToStrings(term)
if len(exprs) == 0 {
continue
}
if len(requireAny) > 0 {
// Once any is set all new terms go to any
requireAny = append(requireAny, strings.Join(exprs, " && "))
} else if len(requireAll) > 0 {
// If all is already set, we actually need to move everything to any
requireAny = append(requireAny, strings.Join(requireAll, " && "))
requireAny = append(requireAny, strings.Join(exprs, " && "))
requireAll = []string{}
} else {
// The first term is considered all
requireAll = exprs
}
}
}
if nodeAffinity.PreferredDuringSchedulingIgnoredDuringExecution != nil {
sortPreferred(nodeAffinity.PreferredDuringSchedulingIgnoredDuringExecution)
for _, term := range nodeAffinity.PreferredDuringSchedulingIgnoredDuringExecution {
exprs := NodeSelectorTermToStrings(term.Preference)
preferred = append(preferred, strings.Join(exprs, " && "))
}
}
if len(requireAll) > 0 {
mapper.PutValue(data, requireAll, "scheduling", "node", "requireAll")
}
if len(requireAny) > 0 {
mapper.PutValue(data, requireAny, "scheduling", "node", "requireAny")
}
if len(preferred) > 0 {
mapper.PutValue(data, requireAny, "scheduling", "node", "preferred")
}
}
func sortPreferred(terms []v1.PreferredSchedulingTerm) {
sort.Slice(terms, func(i, j int) bool {
return terms[i].Weight > terms[j].Weight
})
}
func NodeSelectorTermToStrings(term v1.NodeSelectorTerm) []string {
exprs := []string{}
for _, expr := range term.MatchExpressions {
nextExpr := ""
switch expr.Operator {
case v1.NodeSelectorOpIn:
if len(expr.Values) > 1 {
nextExpr = fmt.Sprintf("%s in (%s)", expr.Key, strings.Join(expr.Values, ", "))
} else if len(expr.Values) == 1 {
nextExpr = fmt.Sprintf("%s = %s", expr.Key, expr.Values[0])
}
case v1.NodeSelectorOpNotIn:
if len(expr.Values) > 1 {
nextExpr = fmt.Sprintf("%s notin (%s)", expr.Key, strings.Join(expr.Values, ", "))
} else if len(expr.Values) == 1 {
nextExpr = fmt.Sprintf("%s != %s", expr.Key, expr.Values[0])
}
case v1.NodeSelectorOpExists:
nextExpr = expr.Key
case v1.NodeSelectorOpDoesNotExist:
nextExpr = "!" + expr.Key
case v1.NodeSelectorOpGt:
if len(expr.Values) == 1 {
nextExpr = fmt.Sprintf("%s > %s", expr.Key, expr.Values[0])
}
case v1.NodeSelectorOpLt:
if len(expr.Values) == 1 {
nextExpr = fmt.Sprintf("%s < %s", expr.Key, expr.Values[0])
}
}
if nextExpr != "" {
exprs = append(exprs, nextExpr)
}
}
return exprs
}
func StringsToNodeSelectorTerm(exprs []string) []v1.NodeSelectorTerm {
result := []v1.NodeSelectorTerm{}
for _, inter := range exprs {
term := v1.NodeSelectorTerm{}
for _, expr := range strings.Split(inter, "&&") {
groups := exprRegexp.FindStringSubmatch(expr)
selectorRequirement := v1.NodeSelectorRequirement{}
if groups == nil {
if strings.HasPrefix(expr, "!") {
selectorRequirement.Key = strings.TrimSpace(expr[1:])
selectorRequirement.Operator = v1.NodeSelectorOpDoesNotExist
} else {
selectorRequirement.Key = strings.TrimSpace(expr)
selectorRequirement.Operator = v1.NodeSelectorOpExists
}
} else {
selectorRequirement.Key = strings.TrimSpace(groups[1])
selectorRequirement.Values = convert.ToValuesSlice(groups[3])
op := strings.TrimSpace(groups[2])
switch op {
case "=":
selectorRequirement.Operator = v1.NodeSelectorOpIn
case "!=":
selectorRequirement.Operator = v1.NodeSelectorOpNotIn
case "notin":
selectorRequirement.Operator = v1.NodeSelectorOpNotIn
case "in":
selectorRequirement.Operator = v1.NodeSelectorOpIn
case "<":
selectorRequirement.Operator = v1.NodeSelectorOpLt
case ">":
selectorRequirement.Operator = v1.NodeSelectorOpGt
}
}
term.MatchExpressions = append(term.MatchExpressions, selectorRequirement)
}
result = append(result, term)
}
return result
}
func (s SchedulingMapper) ToInternal(data map[string]interface{}) {
defer func() {
delete(data, "scheduling")
}()
nodeName := convert.ToString(mapper.GetValueN(data, "scheduling", "node", "name"))
if nodeName != "" {
data["nodeName"] = nodeName
}
requireAll := convert.ToStringSlice(mapper.GetValueN(data, "scheduling", "node", "requireAll"))
requireAny := convert.ToStringSlice(mapper.GetValueN(data, "scheduling", "node", "requireAny"))
preferred := convert.ToStringSlice(mapper.GetValueN(data, "scheduling", "node", "preferred"))
if len(requireAll) == 0 && len(requireAny) == 0 && len(preferred) == 0 {
return
}
nodeAffinity := v1.NodeAffinity{}
if len(requireAll) > 0 {
nodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution = &v1.NodeSelector{
NodeSelectorTerms: []v1.NodeSelectorTerm{
AggregateTerms(StringsToNodeSelectorTerm(requireAll)),
},
}
}
if len(requireAny) > 0 {
if nodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution == nil {
nodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution = &v1.NodeSelector{}
}
nodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms = StringsToNodeSelectorTerm(requireAny)
}
if len(preferred) > 0 {
count := int32(100)
for _, term := range StringsToNodeSelectorTerm(preferred) {
nodeAffinity.PreferredDuringSchedulingIgnoredDuringExecution = append(
nodeAffinity.PreferredDuringSchedulingIgnoredDuringExecution, v1.PreferredSchedulingTerm{
Weight: count,
Preference: term,
})
count--
}
}
affinity, _ := convert.EncodeToMap(&v1.Affinity{
NodeAffinity: &nodeAffinity,
})
data["affinity"] = affinity
}
func AggregateTerms(terms []v1.NodeSelectorTerm) v1.NodeSelectorTerm {
result := v1.NodeSelectorTerm{}
for _, term := range terms {
result.MatchExpressions = append(result.MatchExpressions, term.MatchExpressions...)
}
return result
}
func (s SchedulingMapper) ModifySchema(schema *types.Schema, schemas *types.Schemas) error {
delete(schema.ResourceFields, "nodeSelector")
delete(schema.ResourceFields, "affinity")
return nil
}

View File

@ -0,0 +1,135 @@
package schema
import (
"github.com/rancher/norman/types"
m "github.com/rancher/norman/types/mapping/mapper"
"github.com/rancher/types/io.cattle.workload/v1/schema/mapper"
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
var (
Version = types.APIVersion{
Version: "v1",
Group: "io.cattle.workload",
Path: "/v1-app",
SubContexts: map[string]bool{
"projects": true,
},
}
Schemas = types.NewSchemas().
AddMapperForType(&Version, v1.Capabilities{}, &types.TypeMapper{
Mappers: []types.Mapper{
m.Move{From: "add", To: "capAdd"},
m.Move{From: "drop", To: "capDrop"},
},
}).
AddMapperForType(&Version, v1.PodSecurityContext{}, &types.TypeMapper{
Mappers: []types.Mapper{
m.Drop{"seLinuxOptions"},
m.Move{From: "runAsUser", To: "uid"},
m.Move{From: "supplementalGroups", To: "gids"},
m.Move{From: "fsGroup", To: "fsgid"},
},
}).
AddMapperForType(&Version, v1.SecurityContext{}, &types.TypeMapper{
Mappers: []types.Mapper{
&m.Embed{Field: "capabilities"},
m.Drop{"seLinuxOptions"},
m.Move{From: "readOnlyRootFilesystem", To: "readOnly"},
m.Move{From: "runAsUser", To: "uid"},
},
}).
AddMapperForType(&Version, v1.Container{}, &types.TypeMapper{
Mappers: []types.Mapper{
m.Move{"command", "entrypoint"},
m.Move{"args", "command"},
m.Move{"livenessProbe", "healthcheck"},
m.Move{"readinessProbe", "readycheck"},
m.Move{"imagePullPolicy", "pullPolicy"},
mapper.EnvironmentMapper{},
&m.Embed{Field: "securityContext"},
&m.Embed{Field: "lifecycle"},
},
}).
AddMapperForType(&Version, v1.ContainerPort{}, &types.TypeMapper{
Mappers: []types.Mapper{
m.Drop{"name"},
},
}).
AddMapperForType(&Version, v1.VolumeMount{}, &types.TypeMapper{
Mappers: []types.Mapper{
m.Enum{
Field: "mountPropagation",
Values: map[string][]string{
"HostToContainer": []string{"rslave"},
"Bidirectional": []string{"rshared", "shared"},
},
},
},
}).
AddMapperForType(&Version, v1.Handler{}, handlerMapper).
AddMapperForType(&Version, v1.Probe{}, handlerMapper).
AddMapperForType(&Version, v1.PodSpec{}, &types.TypeMapper{
Mappers: []types.Mapper{
m.Move{From: "restartPolicy", To: "restart"},
m.Move{From: "imagePullSecrets", To: "pullSecrets"},
mapper.NamespaceMapper{},
mapper.InitContainerMapper{},
mapper.SchedulingMapper{},
&m.Embed{Field: "securityContext"},
&m.SliceToMap{
Field: "containers",
Key: "name",
},
&m.SliceToMap{
Field: "hostAliases",
Key: "ip",
},
},
}).
AddMapperForType(&Version, v1.Pod{}, &types.TypeMapper{
Mappers: []types.Mapper{
&m.Drop{"status"},
&m.Embed{Field: "metadata"},
&m.Embed{Field: "spec"},
},
}).
AddMapperForType(&Version, v1.ResourceRequirements{}, &types.TypeMapper{
Mappers: []types.Mapper{
mapper.ResourceRequirementsMapper{},
},
}).
AddMapperForType(&Version, metav1.ObjectMeta{}, &types.TypeMapper{
Mappers: []types.Mapper{
m.Drop{"generateName"},
m.Drop{"selfLink"},
m.Move{From: "uid", To: "uuid"},
m.Drop{"resourceVersion"},
m.Drop{"generation"},
m.Move{From: "creationTimestamp", To: "created"},
m.Move{From: "deletionTimestamp", To: "removed"},
//DeletionGracePeriodSecondsMapper{},
m.Drop{"initializers"},
m.Drop{"finalizers"},
m.Drop{"clusterName"},
m.Drop{"ownerReferences"},
},
}).
MustImport(&Version, v1.Handler{}, handlerOverride{}).
MustImport(&Version, v1.Probe{}, handlerOverride{}).
MustImport(&Version, v1.Container{}, struct {
Scheduling *Scheduling
Resources *Resources
Environment map[string]string
EnvironmentFrom []EnvironmentFrom
InitContainer bool
}{}).
MustImport(&Version, v1.PodSpec{}, struct {
Net string
PID string
IPC string
}{}).
MustImport(&Version, v1.Pod{})
)

View File

@ -0,0 +1,79 @@
package schema
import (
"github.com/rancher/norman/types"
m "github.com/rancher/norman/types/mapping/mapper"
)
var (
handlerMapper = &types.TypeMapper{
Mappers: []types.Mapper{
&m.UnionEmbed{
Fields: []m.UnionMapping{
{
FieldName: "exec",
CheckFields: []string{"command"},
},
{
FieldName: "tcpSocket",
CheckFields: []string{"tcp", "port"},
},
{
FieldName: "httpGet",
CheckFields: []string{"port"},
},
},
},
},
}
)
type handlerOverride struct {
TCP bool
}
type EnvironmentFrom struct {
Source string
SourceName string
SourceKey string
Prefix string
Optional bool
TargetKey string
}
type Resources struct {
CPU *ResourceRequest
Memory *ResourceRequest
NvidiaGPU *ResourceRequest
}
type ResourceRequest struct {
Request string
Limit string
}
type Scheduling struct {
AntiAffinity string
Node *NodeScheduling
Tolerate []string
Scheduler string
Priority *int64
PriorityClassName string
}
type NodeScheduling struct {
Name string
RequireAll []string
RequireAny []string
Preferred []string
}
type deployParams struct {
BatchSize int64
Scale int64
Global bool
Cron string
Job bool
Ordered bool
QuorumSize int64
}

52
main.go
View File

@ -3,54 +3,14 @@
package main
import (
"path"
"strings"
"github.com/rancher/norman/generator"
"github.com/rancher/norman/types"
"github.com/rancher/types/generator"
authzSchema "github.com/rancher/types/io.cattle.authorization/v1/schema"
clusterSchema "github.com/rancher/types/io.cattle.cluster/v1/schema"
)
var (
basePackage = "github.com/rancher/types"
baseCattle = "client"
workloadSchema "github.com/rancher/types/io.cattle.workload/v1/schema"
)
func main() {
generate(clusterSchema.Schemas)
}
func generate(schemas *types.Schemas) {
version := getVersion(schemas)
groupParts := strings.Split(version.Group, ".")
cattleOutputPackage := path.Join(basePackage, baseCattle, groupParts[len(groupParts)-1], version.Version)
k8sOutputPackage := path.Join(basePackage, version.Group, version.Version)
if err := generator.Generate(schemas, cattleOutputPackage, k8sOutputPackage); err != nil {
panic(err)
}
}
func getVersion(schemas *types.Schemas) *types.APIVersion {
var version types.APIVersion
for _, schema := range schemas.Schemas() {
if version.Group == "" {
version = schema.Version
continue
}
if version.Group != schema.Version.Group ||
version.Version != schema.Version.Version {
panic("schema set contains two APIVersions")
}
}
return &version
}
func must(err error) {
if err != nil {
panic(err)
}
generator.Generate(clusterSchema.Schemas)
generator.Generate(workloadSchema.Schemas)
generator.Generate(authzSchema.Schemas)
}