Increase security around cluster shell

This commit is contained in:
Darren Shepherd 2020-06-02 08:51:42 -07:00
parent 3eba71d06b
commit 445acdc240
6 changed files with 104 additions and 38 deletions

View File

@ -107,13 +107,8 @@ func (p *Factory) K8sInterface(ctx *types.APIRequest) (kubernetes.Interface, err
return kubernetes.NewForConfig(cfg)
}
func (p *Factory) AdminK8sInterface(ctx *types.APIRequest) (kubernetes.Interface, error) {
cfg, err := setupConfig(ctx, p.clientCfg, false)
if err != nil {
return nil, err
}
return kubernetes.NewForConfig(cfg)
func (p *Factory) AdminK8sInterface() (kubernetes.Interface, error) {
return kubernetes.NewForConfig(p.clientCfg)
}
func (p *Factory) Client(ctx *types.APIRequest, s *types.APISchema, namespace string) (dynamic.ResourceInterface, error) {

View File

@ -1,13 +1,16 @@
package clusters
import (
"context"
"net/http"
"github.com/rancher/steve/pkg/server/store/proxy"
"github.com/rancher/steve/pkg/clustercache"
"github.com/rancher/steve/pkg/schemaserver/store/empty"
"github.com/rancher/steve/pkg/schemaserver/types"
"github.com/rancher/steve/pkg/server/store/proxy"
"github.com/rancher/wrangler/pkg/schemas/validation"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
)
const (
@ -31,16 +34,21 @@ var (
type Cluster struct {
}
func Register(schemas *types.APISchemas, cg proxy.ClientGetter) {
func Register(ctx context.Context, schemas *types.APISchemas, cg proxy.ClientGetter, cluster clustercache.ClusterCache) {
shell := &shell{
cg: cg,
namespace: "dashboard-shells",
}
cluster.OnAdd(ctx, shell.PurgeOldShell)
cluster.OnChange(ctx, func(gvr schema.GroupVersionResource, key string, obj, oldObj runtime.Object) error {
return shell.PurgeOldShell(gvr, key, obj)
})
schemas.MustImportAndCustomize(Cluster{}, func(schema *types.APISchema) {
schema.CollectionMethods = []string{http.MethodGet}
schema.ResourceMethods = []string{http.MethodGet}
schema.Store = &Store{}
shell := &shell{
cg: cg,
namespace: "dashboard-shells",
}
schema.LinkHandlers = map[string]http.Handler{
"shell": shell,
}

View File

@ -7,7 +7,6 @@ import (
"net/http/httputil"
"time"
"github.com/rancher/steve/pkg/schemaserver/types"
"github.com/rancher/steve/pkg/server/store/proxy"
"github.com/rancher/wrangler/pkg/condition"
"github.com/rancher/wrangler/pkg/schemas/validation"
@ -15,7 +14,10 @@ import (
v1 "k8s.io/api/core/v1"
rbacv1 "k8s.io/api/rbac/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/api/meta"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apiserver/pkg/authentication/user"
"k8s.io/apiserver/pkg/endpoints/request"
"k8s.io/client-go/kubernetes"
@ -25,11 +27,50 @@ import (
clientcmdapi "k8s.io/client-go/tools/clientcmd/api"
)
const (
roleLabel = "shell.cattle.io/cluster-role"
)
type shell struct {
namespace string
cg proxy.ClientGetter
}
func (s *shell) PurgeOldShell(gvr schema.GroupVersionResource, key string, obj runtime.Object) error {
if obj == nil ||
gvr.Version != "v1" ||
gvr.Group != rbacv1.GroupName ||
gvr.Resource != "clusterroles" {
return nil
}
meta, err := meta.Accessor(obj)
if err != nil {
// ignore error
logrus.Warnf("failed to find metadata for %v, %s", gvr, key)
return nil
}
if meta.GetLabels()[roleLabel] != "true" {
return nil
}
if meta.GetCreationTimestamp().Add(time.Hour).Before(time.Now()) {
client, err := s.cg.AdminK8sInterface()
if err != nil {
return nil
}
name := meta.GetName()
go func() {
ctx, cancel := context.WithTimeout(context.Background(), 20*time.Second)
defer cancel()
client.RbacV1().ClusterRoles().Delete(ctx, name, metav1.DeleteOptions{})
}()
}
return nil
}
func (s *shell) ServeHTTP(rw http.ResponseWriter, req *http.Request) {
ctx, user, client, err := s.contextAndClient(req)
if err != nil {
@ -42,7 +83,12 @@ func (s *shell) ServeHTTP(rw http.ResponseWriter, req *http.Request) {
http.Error(rw, err.Error(), http.StatusInternalServerError)
return
}
defer client.RbacV1().ClusterRoles().Delete(ctx, role.Name, metav1.DeleteOptions{})
defer func() {
// Don't use request context as it already be canceled at this point
ctx, cancel := context.WithTimeout(context.Background(), 20*time.Second)
defer cancel()
client.RbacV1().ClusterRoles().Delete(ctx, role.Name, metav1.DeleteOptions{})
}()
pod, err := s.createPod(ctx, user, role, client)
if err != nil {
@ -66,7 +112,7 @@ func (s *shell) proxyRequest(rw http.ResponseWriter, req *http.Request, pod *v1.
Stdout: false,
Stderr: false,
TTY: false,
Container: "",
Container: "shell",
}, scheme.ParameterCodec).URL()
httpClient := client.CoreV1().RESTClient().(*rest.RESTClient).Client
@ -86,9 +132,7 @@ func (s *shell) proxyRequest(rw http.ResponseWriter, req *http.Request, pod *v1.
func (s *shell) contextAndClient(req *http.Request) (context.Context, user.Info, kubernetes.Interface, error) {
ctx := req.Context()
apiContext := types.GetAPIContext(req.Context())
client, err := s.cg.AdminK8sInterface(apiContext)
client, err := s.cg.AdminK8sInterface()
if err != nil {
return ctx, nil, nil, err
}
@ -122,6 +166,9 @@ func (s *shell) createRole(ctx context.Context, user user.Info, client kubernete
return client.RbacV1().ClusterRoles().Create(ctx, &rbacv1.ClusterRole{
ObjectMeta: metav1.ObjectMeta{
GenerateName: "dashboard-shell-",
Labels: map[string]string{
roleLabel: "true",
},
},
Rules: []rbacv1.PolicyRule{
{
@ -260,19 +307,16 @@ func (s *shell) createPod(ctx context.Context, user user.Info, role *rbacv1.Clus
return nil, err
}
hour := int64(15)
zero := int64(0)
t := true
pod, err := client.CoreV1().Pods(s.namespace).Create(ctx, &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
GenerateName: "dashboard-shell-",
Namespace: s.namespace,
Labels: map[string]string{
"clusterrolename": role.Name,
"clusterroleuid": string(role.UID),
},
OwnerReferences: ref(role),
},
Spec: v1.PodSpec{
ActiveDeadlineSeconds: &hour,
TerminationGracePeriodSeconds: &zero,
Volumes: []v1.Volume{
{
Name: "config",
@ -289,13 +333,21 @@ func (s *shell) createPod(ctx context.Context, user user.Info, role *rbacv1.Clus
ServiceAccountName: sa.Name,
Containers: []v1.Container{
{
Name: "shell",
TTY: true,
Stdin: true,
StdinOnce: true,
Image: "rancher/rancher-agent:v2.4.3",
Name: "proxy",
Image: "ibuildthecloud/shell:v0.0.1",
ImagePullPolicy: v1.PullIfNotPresent,
Command: []string{"bash"},
Env: []v1.EnvVar{
{
Name: "KUBECONFIG",
Value: "/root/.kube/config",
},
},
Command: []string{"kubectl", "proxy"},
SecurityContext: &v1.SecurityContext{
RunAsUser: &zero,
RunAsGroup: &zero,
ReadOnlyRootFilesystem: &t,
},
VolumeMounts: []v1.VolumeMount{
{
Name: "config",
@ -305,6 +357,15 @@ func (s *shell) createPod(ctx context.Context, user user.Info, role *rbacv1.Clus
},
},
},
{
Name: "shell",
TTY: true,
Stdin: true,
StdinOnce: true,
Image: "ibuildthecloud/shell:v0.0.1",
ImagePullPolicy: v1.PullIfNotPresent,
Command: []string{"bash"},
},
},
},
}, metav1.CreateOptions{})

View File

@ -1,6 +1,8 @@
package resources
import (
"context"
"github.com/rancher/steve/pkg/accesscontrol"
"github.com/rancher/steve/pkg/client"
"github.com/rancher/steve/pkg/clustercache"
@ -17,12 +19,12 @@ import (
"k8s.io/client-go/discovery"
)
func DefaultSchemas(baseSchema *types.APISchemas, ccache clustercache.ClusterCache, cg proxy.ClientGetter) *types.APISchemas {
func DefaultSchemas(ctx context.Context, baseSchema *types.APISchemas, ccache clustercache.ClusterCache, cg proxy.ClientGetter) *types.APISchemas {
counts.Register(baseSchema, ccache)
subscribe.Register(baseSchema)
apiroot.Register(baseSchema, []string{"v1"}, []string{"proxy:/apis"})
userpreferences.Register(baseSchema, cg)
clusters.Register(baseSchema, cg)
clusters.Register(ctx, baseSchema, cg, ccache)
return baseSchema
}

View File

@ -67,7 +67,7 @@ func setup(ctx context.Context, server *Server) (http.Handler, *schema.Collectio
ccache := clustercache.NewClusterCache(ctx, cf.DynamicClient())
server.BaseSchemas = resources.DefaultSchemas(server.BaseSchemas, ccache, cf)
server.BaseSchemas = resources.DefaultSchemas(ctx, server.BaseSchemas, ccache, cf)
server.SchemaTemplates = append(server.SchemaTemplates, resources.DefaultSchemaTemplates(cf, asl, server.K8s.Discovery())...)
cols, err := common.NewDynamicColumns(server.RestConfig)

View File

@ -34,7 +34,7 @@ var (
type ClientGetter interface {
IsImpersonating() bool
K8sInterface(ctx *types.APIRequest) (kubernetes.Interface, error)
AdminK8sInterface(ctx *types.APIRequest) (kubernetes.Interface, error)
AdminK8sInterface() (kubernetes.Interface, error)
Client(ctx *types.APIRequest, schema *types.APISchema, namespace string) (dynamic.ResourceInterface, error)
AdminClient(ctx *types.APIRequest, schema *types.APISchema, namespace string) (dynamic.ResourceInterface, error)
TableClient(ctx *types.APIRequest, schema *types.APISchema, namespace string) (dynamic.ResourceInterface, error)