Merge pull request #130738 from ritazh/dra-user-rbac

DRA: add user rbac
This commit is contained in:
Kubernetes Prow Robot 2025-03-14 19:09:46 -07:00 committed by GitHub
commit b29c2f5343
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
3 changed files with 1585 additions and 62 deletions

View File

@ -108,6 +108,83 @@ func addClusterRoleBindingLabel(rolebindings []rbacv1.ClusterRoleBinding) {
return
}
func viewRules() []rbacv1.PolicyRule {
rules := []rbacv1.PolicyRule{
rbacv1helpers.NewRule(Read...).Groups(legacyGroup).Resources("pods", "replicationcontrollers", "replicationcontrollers/scale", "serviceaccounts",
"services", "services/status", "endpoints", "persistentvolumeclaims", "persistentvolumeclaims/status", "configmaps").RuleOrDie(),
rbacv1helpers.NewRule(Read...).Groups(legacyGroup).Resources("limitranges", "resourcequotas", "bindings", "events",
"pods/status", "resourcequotas/status", "namespaces/status", "replicationcontrollers/status", "pods/log").RuleOrDie(),
// read access to namespaces at the namespace scope means you can read *this* namespace. This can be used as an
// indicator of which namespaces you have access to.
rbacv1helpers.NewRule(Read...).Groups(legacyGroup).Resources("namespaces").RuleOrDie(),
rbacv1helpers.NewRule(Read...).Groups(discoveryGroup).Resources("endpointslices").RuleOrDie(),
rbacv1helpers.NewRule(Read...).Groups(appsGroup).Resources(
"controllerrevisions",
"statefulsets", "statefulsets/status", "statefulsets/scale",
"daemonsets", "daemonsets/status",
"deployments", "deployments/status", "deployments/scale",
"replicasets", "replicasets/status", "replicasets/scale").RuleOrDie(),
rbacv1helpers.NewRule(Read...).Groups(autoscalingGroup).Resources("horizontalpodautoscalers", "horizontalpodautoscalers/status").RuleOrDie(),
rbacv1helpers.NewRule(Read...).Groups(batchGroup).Resources("jobs", "cronjobs", "cronjobs/status", "jobs/status").RuleOrDie(),
rbacv1helpers.NewRule(Read...).Groups(extensionsGroup).Resources("daemonsets", "daemonsets/status", "deployments", "deployments/scale", "deployments/status",
"ingresses", "ingresses/status", "replicasets", "replicasets/scale", "replicasets/status", "replicationcontrollers/scale",
"networkpolicies").RuleOrDie(),
rbacv1helpers.NewRule(Read...).Groups(policyGroup).Resources("poddisruptionbudgets", "poddisruptionbudgets/status").RuleOrDie(),
rbacv1helpers.NewRule(Read...).Groups(networkingGroup).Resources("networkpolicies", "ingresses", "ingresses/status").RuleOrDie(),
}
if utilfeature.DefaultFeatureGate.Enabled(features.DynamicResourceAllocation) {
rules = append(rules, rbacv1helpers.NewRule(Read...).Groups(resourceGroup).Resources("resourceclaims", "resourceclaims/status", "resourceclaimtemplates").RuleOrDie())
}
return rules
}
func editRules() []rbacv1.PolicyRule {
rules := []rbacv1.PolicyRule{
// Allow read on escalating resources
rbacv1helpers.NewRule(Read...).Groups(legacyGroup).Resources("pods/attach", "pods/proxy", "pods/exec", "pods/portforward", "secrets", "services/proxy").RuleOrDie(),
rbacv1helpers.NewRule("impersonate").Groups(legacyGroup).Resources("serviceaccounts").RuleOrDie(),
rbacv1helpers.NewRule(Write...).Groups(legacyGroup).Resources("pods", "pods/attach", "pods/proxy", "pods/exec", "pods/portforward").RuleOrDie(),
rbacv1helpers.NewRule("create").Groups(legacyGroup).Resources("pods/eviction").RuleOrDie(),
rbacv1helpers.NewRule(Write...).Groups(legacyGroup).Resources("replicationcontrollers", "replicationcontrollers/scale", "serviceaccounts",
"services", "services/proxy", "persistentvolumeclaims", "configmaps", "secrets", "events").RuleOrDie(),
rbacv1helpers.NewRule("create").Groups(legacyGroup).Resources("serviceaccounts/token").RuleOrDie(),
rbacv1helpers.NewRule(Write...).Groups(appsGroup).Resources(
"statefulsets", "statefulsets/scale",
"daemonsets",
"deployments", "deployments/scale", "deployments/rollback",
"replicasets", "replicasets/scale").RuleOrDie(),
rbacv1helpers.NewRule(Write...).Groups(autoscalingGroup).Resources("horizontalpodautoscalers").RuleOrDie(),
rbacv1helpers.NewRule(Write...).Groups(batchGroup).Resources("jobs", "cronjobs").RuleOrDie(),
rbacv1helpers.NewRule(Write...).Groups(extensionsGroup).Resources("daemonsets",
"deployments", "deployments/scale", "deployments/rollback", "ingresses",
"replicasets", "replicasets/scale", "replicationcontrollers/scale",
"networkpolicies").RuleOrDie(),
rbacv1helpers.NewRule(Write...).Groups(policyGroup).Resources("poddisruptionbudgets").RuleOrDie(),
rbacv1helpers.NewRule(Write...).Groups(networkingGroup).Resources("networkpolicies", "ingresses").RuleOrDie(),
rbacv1helpers.NewRule(ReadWrite...).Groups(coordinationGroup).Resources("leases").RuleOrDie(),
}
if utilfeature.DefaultFeatureGate.Enabled(features.DynamicResourceAllocation) {
rules = append(rules, rbacv1helpers.NewRule(Write...).Groups(resourceGroup).Resources("resourceclaims", "resourceclaimtemplates").RuleOrDie())
}
return rules
}
// NodeRules returns node policy rules, it is slice of rbacv1.PolicyRule.
func NodeRules() []rbacv1.PolicyRule {
nodePolicyRules := []rbacv1.PolicyRule{
@ -313,73 +390,13 @@ func ClusterRoles() []rbacv1.ClusterRole {
// It does not grant powers for "privileged" resources which are domain of the system: `/status`
// subresources or `quota`/`limits` which are used to control namespaces
ObjectMeta: metav1.ObjectMeta{Name: "system:aggregate-to-edit", Labels: map[string]string{"rbac.authorization.k8s.io/aggregate-to-edit": "true"}},
Rules: []rbacv1.PolicyRule{
// Allow read on escalating resources
rbacv1helpers.NewRule(Read...).Groups(legacyGroup).Resources("pods/attach", "pods/proxy", "pods/exec", "pods/portforward", "secrets", "services/proxy").RuleOrDie(),
rbacv1helpers.NewRule("impersonate").Groups(legacyGroup).Resources("serviceaccounts").RuleOrDie(),
rbacv1helpers.NewRule(Write...).Groups(legacyGroup).Resources("pods", "pods/attach", "pods/proxy", "pods/exec", "pods/portforward").RuleOrDie(),
rbacv1helpers.NewRule("create").Groups(legacyGroup).Resources("pods/eviction").RuleOrDie(),
rbacv1helpers.NewRule(Write...).Groups(legacyGroup).Resources("replicationcontrollers", "replicationcontrollers/scale", "serviceaccounts",
"services", "services/proxy", "persistentvolumeclaims", "configmaps", "secrets", "events").RuleOrDie(),
rbacv1helpers.NewRule("create").Groups(legacyGroup).Resources("serviceaccounts/token").RuleOrDie(),
rbacv1helpers.NewRule(Write...).Groups(appsGroup).Resources(
"statefulsets", "statefulsets/scale",
"daemonsets",
"deployments", "deployments/scale", "deployments/rollback",
"replicasets", "replicasets/scale").RuleOrDie(),
rbacv1helpers.NewRule(Write...).Groups(autoscalingGroup).Resources("horizontalpodautoscalers").RuleOrDie(),
rbacv1helpers.NewRule(Write...).Groups(batchGroup).Resources("jobs", "cronjobs").RuleOrDie(),
rbacv1helpers.NewRule(Write...).Groups(extensionsGroup).Resources("daemonsets",
"deployments", "deployments/scale", "deployments/rollback", "ingresses",
"replicasets", "replicasets/scale", "replicationcontrollers/scale",
"networkpolicies").RuleOrDie(),
rbacv1helpers.NewRule(Write...).Groups(policyGroup).Resources("poddisruptionbudgets").RuleOrDie(),
rbacv1helpers.NewRule(Write...).Groups(networkingGroup).Resources("networkpolicies", "ingresses").RuleOrDie(),
rbacv1helpers.NewRule(ReadWrite...).Groups(coordinationGroup).Resources("leases").RuleOrDie(),
},
Rules: editRules(),
},
{
// a role for namespace level viewing. It grants Read-only access to non-escalating resources in
// a namespace.
ObjectMeta: metav1.ObjectMeta{Name: "system:aggregate-to-view", Labels: map[string]string{"rbac.authorization.k8s.io/aggregate-to-view": "true"}},
Rules: []rbacv1.PolicyRule{
rbacv1helpers.NewRule(Read...).Groups(legacyGroup).Resources("pods", "replicationcontrollers", "replicationcontrollers/scale", "serviceaccounts",
"services", "services/status", "endpoints", "persistentvolumeclaims", "persistentvolumeclaims/status", "configmaps").RuleOrDie(),
rbacv1helpers.NewRule(Read...).Groups(legacyGroup).Resources("limitranges", "resourcequotas", "bindings", "events",
"pods/status", "resourcequotas/status", "namespaces/status", "replicationcontrollers/status", "pods/log").RuleOrDie(),
// read access to namespaces at the namespace scope means you can read *this* namespace. This can be used as an
// indicator of which namespaces you have access to.
rbacv1helpers.NewRule(Read...).Groups(legacyGroup).Resources("namespaces").RuleOrDie(),
rbacv1helpers.NewRule(Read...).Groups(discoveryGroup).Resources("endpointslices").RuleOrDie(),
rbacv1helpers.NewRule(Read...).Groups(appsGroup).Resources(
"controllerrevisions",
"statefulsets", "statefulsets/status", "statefulsets/scale",
"daemonsets", "daemonsets/status",
"deployments", "deployments/status", "deployments/scale",
"replicasets", "replicasets/status", "replicasets/scale").RuleOrDie(),
rbacv1helpers.NewRule(Read...).Groups(autoscalingGroup).Resources("horizontalpodautoscalers", "horizontalpodautoscalers/status").RuleOrDie(),
rbacv1helpers.NewRule(Read...).Groups(batchGroup).Resources("jobs", "cronjobs", "cronjobs/status", "jobs/status").RuleOrDie(),
rbacv1helpers.NewRule(Read...).Groups(extensionsGroup).Resources("daemonsets", "daemonsets/status", "deployments", "deployments/scale", "deployments/status",
"ingresses", "ingresses/status", "replicasets", "replicasets/scale", "replicasets/status", "replicationcontrollers/scale",
"networkpolicies").RuleOrDie(),
rbacv1helpers.NewRule(Read...).Groups(policyGroup).Resources("poddisruptionbudgets", "poddisruptionbudgets/status").RuleOrDie(),
rbacv1helpers.NewRule(Read...).Groups(networkingGroup).Resources("networkpolicies", "ingresses", "ingresses/status").RuleOrDie(),
},
Rules: viewRules(),
},
{
// a role to use for heapster's connections back to the API server

View File

@ -31,6 +31,8 @@ import (
"k8s.io/apimachinery/pkg/api/meta"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/apiserver/pkg/util/feature"
featuregatetesting "k8s.io/component-base/featuregate/testing"
"k8s.io/component-helpers/auth/rbac/validation"
"k8s.io/kubernetes/pkg/api/legacyscheme"
api "k8s.io/kubernetes/pkg/apis/core"
@ -175,6 +177,26 @@ func TestBootstrapClusterRoles(t *testing.T) {
testObjects(t, list, "cluster-roles.yaml")
}
func TestBootstrapClusterRolesWithFeatureGatesEnabled(t *testing.T) {
featuregatetesting.SetFeatureGateDuringTest(t, feature.DefaultFeatureGate, "AllAlpha", true)
featuregatetesting.SetFeatureGateDuringTest(t, feature.DefaultFeatureGate, "AllBeta", true)
bootstrapRoles := bootstrappolicy.ClusterRoles()
featureGateList := &api.List{}
featureGateNames := sets.NewString()
featureGateRoles := map[string]runtime.Object{}
for i := range bootstrapRoles {
role := bootstrapRoles[i]
featureGateNames.Insert(role.Name)
featureGateRoles[role.Name] = &role
}
for _, featureGateName := range featureGateNames.List() {
featureGateList.Items = append(featureGateList.Items, featureGateRoles[featureGateName])
}
testObjects(t, featureGateList, "cluster-roles-featuregates.yaml")
}
func TestBootstrapClusterRoleBindings(t *testing.T) {
list := &api.List{}
names := sets.NewString()

File diff suppressed because it is too large Load Diff