1
0
mirror of https://github.com/rancher/rke.git synced 2025-09-09 11:01:40 +00:00

Added extra_args type map[string]string to ingress-controller. Added rancher-minimal-ssl.yml and rancher-minimal-passthrough.yml to deploy rancher v2.0 using rke. Updated README.md

This commit is contained in:
rawmind0
2018-04-13 18:06:09 +02:00
committed by Darren Shepherd
parent b3f457426c
commit 14c239c598
13 changed files with 305 additions and 35 deletions

View File

@@ -201,13 +201,15 @@ RKE will ask some questions around the cluster file like number of the hosts, ip
## Ingress Controller
RKE will deploy Nginx controller by default, user can disable this by specifying `none` to ingress `provider` option in the cluster configuration, user also can specify list of options for nginx config map listed in this [doc](https://github.com/kubernetes/ingress-nginx/blob/master/docs/user-guide/configmap.md), for example:
RKE will deploy Nginx controller by default, user can disable this by specifying `none` to ingress `provider` option in the cluster configuration, user also can specify list of options for nginx config map listed in this [doc](https://github.com/kubernetes/ingress-nginx/blob/master/docs/user-guide/configmap.md), and command line extra_args listed in this [doc](https://github.com/kubernetes/ingress-nginx/blob/master/docs/user-guide/cli-arguments.md), for example:
```
ingress:
provider: nginx
options:
map-hash-bucket-size: "128"
ssl-protocols: SSLv2
extra_args:
enable-ssl-passthrough: ""
```
By default, RKE will deploy ingress controller on all schedulable nodes (controlplane and workers), to specify only certain nodes for ingress controller to be deployed, user has to specify `node_selector` for the ingress and the right label on the node, for example:
```
@@ -335,11 +337,40 @@ nodes:
```
## Deploying Rancher 2.0 using rke
Using RKE's pluggable user addons, it's possible to deploy Rancher 2.0 server with a single command after updating the node settings in the [rancher-minimal.yml](https://github.com/rancher/rke/blob/master/rancher-minimal.yml) cluster configuration:
Using RKE's pluggable user addons, it's possible to deploy Rancher 2.0 server in HA with a single command.
Depending how you want to manage your ssl certificates, there are 2 deployment options:
- Use own ssl cerficiates:
- Use [rancher-minimal-ssl.yml](https://github.com/rancher/rke/blob/master/rancher-minimal-ssl.yml)
- Update `nodes` configuration.
- Update <FQDN> at `cattle-ingress-http` ingress definition. FQDN should be a dns a entry pointing to all nodes IP's running ingress-controller (controlplane and workers by default).
- Update certificate, key and ca crt at `cattle-keys-server` secret, <BASE64_CRT>, <BASE64_KEY> and <BASE64_CA>. Content must be in base64 format, `cat <FILE> | base64`
- Update ssl certificate and key at `cattle-keys-ingress` secret, <BASE64_CRT> and <BASE64_KEY>. Content must be in base64 format, `cat <FILE> | base64`. If selfsigned, certificate and key must be signed by same CA.
- Run RKE.
```bash
rke up --config rancher-minimal-ssl.yml
```
- Use SSL-passthrough:
- Use [rancher-minimal-passthrough.yml](https://github.com/rancher/rke/blob/master/rancher-minimal-passthrough.yml)
- Update `nodes` configuration.
- Update FQDN at `cattle-ingress-http` ingress definition. FQDN should be a dns a entry, pointing to all nodes IP's running ingress-controller (controlplane and workers by default).
- Run RKE.
```bash
rke up --config rancher-minimal-passthrough.yml
```
Once RKE execution finish, rancher is deployed at `cattle-system` namespace. You could access to your rancher instance by `https://<FQDN>`
By default, rancher deployment has just 1 replica, scale it to desired replicas.
```bash
rke up --config rancher-minimal.yml
```
kubectl -n cattle-system scale deployment cattle --replicas=3
```
## Operating Systems Notes
### Atomic OS

View File

@@ -174,6 +174,8 @@ private_registries:
# provider: nginx
# node_selector:
# app: ingress
# extra_args:
# enable-ssl-passthrough: ""
ingress:
provider: nginx

View File

@@ -30,6 +30,7 @@ type ingressOptions struct {
RBACConfig string
Options map[string]string
NodeSelector map[string]string
ExtraArgs map[string]string
AlpineImage string
IngressImage string
IngressBackend string
@@ -249,6 +250,7 @@ func (c *Cluster) deployIngress(ctx context.Context) error {
RBACConfig: c.Authorization.Mode,
Options: c.Ingress.Options,
NodeSelector: c.Ingress.NodeSelector,
ExtraArgs: c.Ingress.ExtraArgs,
AlpineImage: c.SystemImages.Alpine,
IngressImage: c.SystemImages.Ingress,
IngressBackend: c.SystemImages.IngressBackend,

View File

@@ -1,9 +1,15 @@
# default k8s version: v1.8.9-rancher1-1
# default network plugin: flannel
nodes:
- address: 1.2.3.4
user: ubuntu
- address: <IP>
user: <USER>
role: [controlplane,etcd,worker]
ssh_key_path: <PEM_FILE>
ingress:
provider: nginx
extra_args:
enable-ssl-passthrough: ""
addons: |-
---
@@ -50,13 +56,16 @@ addons: |-
metadata:
namespace: cattle-system
name: cattle-ingress-http
annotations:
nginx.ingress.kubernetes.io/proxy-connect-timeout: "30"
nginx.ingress.kubernetes.io/proxy-read-timeout: "1800" # Max time in seconds for ws to remain shell window open
nginx.ingress.kubernetes.io/proxy-send-timeout: "1800" # Max time in seconds for ws to remain shell window open
nginx.ingress.kubernetes.io/ssl-passthrough: "true" # Enable ssl-passthrough to backend.
spec:
rules:
- http:
- host: <FQDN> # FQDN to access cattle server
http:
paths:
- backend:
serviceName: cattle-service
servicePort: 80
- backend:
serviceName: cattle-service
servicePort: 443

121
rancher-minimal-ssl.yml Normal file
View File

@@ -0,0 +1,121 @@
# default k8s version: v1.8.9-rancher1-1
# default network plugin: flannel
nodes:
- address: <IP>
user: <USER>
role: [controlplane,etcd,worker]
ssh_key_path: <PEM_FILE>
addons: |-
---
kind: Namespace
apiVersion: v1
metadata:
name: cattle-system
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: cattle-crb
subjects:
- kind: User
name: system:serviceaccount:cattle-system:default
apiGroup: rbac.authorization.k8s.io
roleRef:
kind: ClusterRole
name: cluster-admin
apiGroup: rbac.authorization.k8s.io
---
apiVersion: v1
kind: Secret
metadata:
name: cattle-keys-ingress
namespace: cattle-system
type: Opaque
data:
tls.crt: <BASE64_CRT> # ssl cert for ingress. If selfsigned, must be signed by same CA as cattle server
tls.key: <BASE64_KEY> # ssl key for ingress. If selfsigned, must be signed by same CA as cattle server
---
apiVersion: v1
kind: Secret
metadata:
name: cattle-keys-server
namespace: cattle-system
type: Opaque
data:
cert.pem: <BASE64_CRT> # ssl cert for cattle server.
key.pem: <BASE64_KEY> # ssl key for cattle server.
cacerts.pem: <BASE64_CA> # CA cert used to sign cattle server cert and key
---
apiVersion: v1
kind: Service
metadata:
namespace: cattle-system
name: cattle-service
labels:
app: cattle
spec:
ports:
- port: 80
targetPort: 80
protocol: TCP
name: http
- port: 443
targetPort: 443
protocol: TCP
name: https
selector:
app: cattle
---
apiVersion: extensions/v1beta1
kind: Ingress
metadata:
namespace: cattle-system
name: cattle-ingress-http
annotations:
nginx.ingress.kubernetes.io/proxy-connect-timeout: "30"
nginx.ingress.kubernetes.io/proxy-read-timeout: "1800" # Max time in seconds for ws to remain shell window open
nginx.ingress.kubernetes.io/proxy-send-timeout: "1800" # Max time in seconds for ws to remain shell window open
spec:
rules:
- host: <FQDN> # FQDN to access cattle server
http:
paths:
- backend:
serviceName: cattle-service
servicePort: 80
tls:
- secretName: cattle-keys-ingress
hosts:
- <FQDN> # FQDN to access cattle server
---
kind: Deployment
apiVersion: extensions/v1beta1
metadata:
namespace: cattle-system
name: cattle
spec:
replicas: 1
template:
metadata:
labels:
app: cattle
spec:
containers:
- image: rancher/server:master
imagePullPolicy: Always
name: cattle-server
ports:
- containerPort: 80
protocol: TCP
- containerPort: 443
protocol: TCP
volumeMounts:
- mountPath: /etc/rancher/ssl
name: cattle-keys-volume
readOnly: true
volumes:
- name: cattle-keys-volume
secret:
defaultMode: 420
secretName: cattle-keys-server

View File

@@ -203,6 +203,9 @@ spec:
- --tcp-services-configmap=$(POD_NAMESPACE)/tcp-services
- --udp-services-configmap=$(POD_NAMESPACE)/udp-services
- --annotations-prefix=nginx.ingress.kubernetes.io
{{ range $k, $v := .ExtraArgs }}
- --{{ $k }}{{if ne $v "" }}={{ $v }}{{end}}
{{ end }}
env:
- name: POD_NAME
valueFrom:

View File

@@ -24,4 +24,4 @@ github.com/coreos/go-semver e214231b295a8ea9479f11b70b35d5acf3556d9
github.com/ugorji/go/codec ccfe18359b55b97855cee1d3f74e5efbda4869dc
github.com/rancher/norman ff60298f31f081b06d198815b4c178a578664f7d
github.com/rancher/types 574e26d2fb850f15b9269a54cacb9467505d44c5
github.com/rancher/types 32ed9ccfe5b3ffd6acff15e354c0dd713f5b88d7

View File

@@ -60,6 +60,7 @@ type ClusterSpec struct {
GoogleKubernetesEngineConfig *GoogleKubernetesEngineConfig `json:"googleKubernetesEngineConfig,omitempty"`
AzureKubernetesServiceConfig *AzureKubernetesServiceConfig `json:"azureKubernetesServiceConfig,omitempty"`
RancherKubernetesEngineConfig *RancherKubernetesEngineConfig `json:"rancherKubernetesEngineConfig,omitempty"`
AmazonElasticContainerServiceConfig *AmazonElasticContainerServiceConfig `json:"amazonElasticContainerServiceConfig,omitempty"`
DefaultPodSecurityPolicyTemplateName string `json:"defaultPodSecurityPolicyTemplateName,omitempty" norman:"type=reference[podSecurityPolicyTemplate]"`
DefaultClusterRoleForProjectMembers string `json:"defaultClusterRoleForProjectMembers,omitempty" norman:"type=reference[roleTemplate]"`
}
@@ -193,6 +194,11 @@ type AzureKubernetesServiceConfig struct {
ClientSecret string `json:"clientSecret,omitempty" norman:"required,type=password"`
}
type AmazonElasticContainerServiceConfig struct {
AccessKey string `json:"accessKey" norman:"required"`
SecretKey string `json:"secretKey" norman:"required,type=password"`
}
type ClusterEvent struct {
types.Namespaced
v1.Event

View File

@@ -43,8 +43,9 @@ var (
PluginsDocker: "plugins/docker",
},
LoggingSystemImages: LoggingSystemImages{
Fluentd: "rancher/fluentd:v0.1.4",
FluentdHelper: "rancher/fluentd-helper:v0.1.1",
Fluentd: "rancher/fluentd:v0.1.6",
FluentdHelper: "rancher/fluentd-helper:v0.1.2",
LogAggregatorFlexVolumeDriver: "rancher/log-aggregator:v0.1.2",
Elaticsearch: "rancher/docker-elasticsearch-kubernetes:5.6.2",
Kibana: "kibana:5.6.4",
Busybox: "busybox",

View File

@@ -130,4 +130,5 @@ type LoggingSystemImages struct {
Elaticsearch string `json:"elaticsearch,omitempty"`
Kibana string `json:"kibana,omitempty"`
Busybox string `json:"busybox,omitempty"`
LogAggregatorFlexVolumeDriver string `json:"logAggregatorFlexVolumeDriver,omitempty"`
}

View File

@@ -72,6 +72,28 @@ type NodeStatus struct {
NodeAnnotations map[string]string `json:"nodeAnnotations,omitempty"`
NodeLabels map[string]string `json:"nodeLabels,omitempty"`
NodeTaints []v1.Taint `json:"nodeTaints,omitempty"`
DockerInfo *DockerInfo `json:"dockerInfo,omitempty"`
}
type DockerInfo struct {
ID string
Driver string
Debug bool
LoggingDriver string
CgroupDriver string
KernelVersion string
OperatingSystem string
OSType string
Architecture string
IndexServerAddress string
DockerRootDir string
HTTPProxy string
HTTPSProxy string
NoProxy string
Name string
Labels []string
ExperimentalBuild bool
ServerVersion string
}
var (

View File

@@ -250,6 +250,8 @@ type IngressConfig struct {
Options map[string]string `yaml:"options" json:"options,omitempty"`
// NodeSelector key pair
NodeSelector map[string]string `yaml:"node_selector" json:"nodeSelector,omitempty"`
// Ingress controller extra arguments
ExtraArgs map[string]string `yaml:"extra_args" json:"extraArgs,omitempty"`
}
type RKEPlan struct {

View File

@@ -48,6 +48,10 @@ func RegisterDeepCopies(scheme *runtime.Scheme) error {
in.(*AlertSystemImages).DeepCopyInto(out.(*AlertSystemImages))
return nil
}, InType: reflect.TypeOf(&AlertSystemImages{})},
conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error {
in.(*AmazonElasticContainerServiceConfig).DeepCopyInto(out.(*AmazonElasticContainerServiceConfig))
return nil
}, InType: reflect.TypeOf(&AmazonElasticContainerServiceConfig{})},
conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error {
in.(*AuthAppInput).DeepCopyInto(out.(*AuthAppInput))
return nil
@@ -248,6 +252,10 @@ func RegisterDeepCopies(scheme *runtime.Scheme) error {
in.(*CustomConfig).DeepCopyInto(out.(*CustomConfig))
return nil
}, InType: reflect.TypeOf(&CustomConfig{})},
conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error {
in.(*DockerInfo).DeepCopyInto(out.(*DockerInfo))
return nil
}, InType: reflect.TypeOf(&DockerInfo{})},
conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error {
in.(*DynamicSchema).DeepCopyInto(out.(*DynamicSchema))
return nil
@@ -1062,6 +1070,22 @@ func (in *AlertSystemImages) DeepCopy() *AlertSystemImages {
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *AmazonElasticContainerServiceConfig) DeepCopyInto(out *AmazonElasticContainerServiceConfig) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AmazonElasticContainerServiceConfig.
func (in *AmazonElasticContainerServiceConfig) DeepCopy() *AmazonElasticContainerServiceConfig {
if in == nil {
return nil
}
out := new(AmazonElasticContainerServiceConfig)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *AuthAppInput) DeepCopyInto(out *AuthAppInput) {
*out = *in
@@ -2231,6 +2255,15 @@ func (in *ClusterSpec) DeepCopyInto(out *ClusterSpec) {
(*in).DeepCopyInto(*out)
}
}
if in.AmazonElasticContainerServiceConfig != nil {
in, out := &in.AmazonElasticContainerServiceConfig, &out.AmazonElasticContainerServiceConfig
if *in == nil {
*out = nil
} else {
*out = new(AmazonElasticContainerServiceConfig)
**out = **in
}
}
return
}
@@ -2404,6 +2437,27 @@ func (in *CustomConfig) DeepCopy() *CustomConfig {
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *DockerInfo) DeepCopyInto(out *DockerInfo) {
*out = *in
if in.Labels != nil {
in, out := &in.Labels, &out.Labels
*out = make([]string, len(*in))
copy(*out, *in)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DockerInfo.
func (in *DockerInfo) DeepCopy() *DockerInfo {
if in == nil {
return nil
}
out := new(DockerInfo)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *DynamicSchema) DeepCopyInto(out *DynamicSchema) {
*out = *in
@@ -3162,6 +3216,13 @@ func (in *IngressConfig) DeepCopyInto(out *IngressConfig) {
(*out)[key] = val
}
}
if in.ExtraArgs != nil {
in, out := &in.ExtraArgs, &out.ExtraArgs
*out = make(map[string]string, len(*in))
for key, val := range *in {
(*out)[key] = val
}
}
return
}
@@ -4065,6 +4126,15 @@ func (in *NodeStatus) DeepCopyInto(out *NodeStatus) {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.DockerInfo != nil {
in, out := &in.DockerInfo, &out.DockerInfo
if *in == nil {
*out = nil
} else {
*out = new(DockerInfo)
(*in).DeepCopyInto(*out)
}
}
return
}