K-EXPLORER: Merge branch 'master' bf2e9655f5 into ke/v0.3

This commit is contained in:
niusmallnan
2023-07-11 11:29:51 +08:00
13 changed files with 2387 additions and 97 deletions

View File

@@ -35,7 +35,7 @@ steps:
- name: validate
image: registry.suse.com/bci/bci-base:15.4
commands:
- zypper in -y go=1.19 git tar gzip make
- zypper in -y "golang(API)=1.19" git tar gzip make
- curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s v1.49.0
- mv ./bin/golangci-lint /usr/local/bin/golangci-lint
- GOBIN=/usr/local/bin go install github.com/golang/mock/mockgen@v1.6.0

9
.github/renovate.json vendored Normal file
View File

@@ -0,0 +1,9 @@
{
"extends": [
"github>rancher/renovate-config#release"
],
"baseBranches": [
"master"
],
"prHourlyLimit": 2
}

25
.github/workflows/renovate.yml vendored Normal file
View File

@@ -0,0 +1,25 @@
name: Renovate
on:
workflow_dispatch:
inputs:
logLevel:
description: "Override default log level"
required: false
default: "info"
type: string
overrideSchedule:
description: "Override all schedules"
required: false
default: "false"
type: string
# Run twice in the early morning (UTC) for initial and follow up steps (create pull request and merge)
schedule:
- cron: '30 4,6 * * *'
jobs:
call-workflow:
uses: rancher/renovate-config/.github/workflows/renovate.yml@release
with:
logLevel: ${{ inputs.logLevel || 'info' }}
overrideSchedule: ${{ github.event.inputs.overrideSchedule == 'true' && '{''schedule'':null}' || '' }}
secrets: inherit

View File

@@ -97,7 +97,13 @@ Example, filtering by object name:
/v1/{type}?filter=metadata.name=foo
```
Filters are ANDed together, so an object must match all filters to be
One filter can list multiple possible fields to match, these are ORed together:
```
/v1/{type}?filter=metadata.name=foo,metadata.namespace=foo
```
Stacked filters are ANDed together, so an object must match all filters to be
included in the list.
```
@@ -111,6 +117,36 @@ item is included in the list.
/v1/{type}?filter=spec.containers.image=alpine
```
#### `projectsornamespaces`
Resources can also be filtered by the Rancher projects their namespaces belong
to. Since a project isn't an intrinsic part of the resource itself, the filter
parameter for filtering by projects is separate from the main `filter`
parameter. This query parameter is only applicable when steve is runnning in
concert with Rancher.
The list can be filtered by either projects or namespaces or both.
Filtering by a single project or a single namespace:
```
/v1/{type}?projectsornamespaces=p1
```
Filtering by multiple projects or namespaces is done with a comma separated
list. A resource matching any project or namespace in the list is included in
the result:
```
/v1/{type}?projectsornamespaces=p1,n1,n2
```
The list can be negated to exclude results:
```
/v1/{type}?projectsornamespaces!=p1,n1,n2
```
#### `sort`
Only applicable to list requests (`/v1/{type}` and `/v1/{type}/{namespace}`).

View File

@@ -11,6 +11,7 @@ import (
"github.com/rancher/steve/pkg/stores/proxy"
"github.com/rancher/steve/pkg/summarycache"
"github.com/rancher/wrangler/pkg/data"
corecontrollers "github.com/rancher/wrangler/pkg/generated/controllers/core/v1"
"github.com/rancher/wrangler/pkg/slice"
"github.com/rancher/wrangler/pkg/summary"
"k8s.io/apimachinery/pkg/api/meta"
@@ -21,9 +22,10 @@ import (
func DefaultTemplate(clientGetter proxy.ClientGetter,
summaryCache *summarycache.SummaryCache,
asl accesscontrol.AccessSetLookup) schema.Template {
asl accesscontrol.AccessSetLookup,
namespaceCache corecontrollers.NamespaceCache) schema.Template {
return schema.Template{
Store: metricsStore.NewMetricsStore(proxy.NewProxyStore(clientGetter, summaryCache, asl)),
Store: metricsStore.NewMetricsStore(proxy.NewProxyStore(clientGetter, summaryCache, asl, namespaceCache)),
Formatter: formatter(summaryCache),
}
}

View File

@@ -19,6 +19,7 @@ import (
steveschema "github.com/rancher/steve/pkg/schema"
"github.com/rancher/steve/pkg/stores/proxy"
"github.com/rancher/steve/pkg/summarycache"
corecontrollers "github.com/rancher/wrangler/pkg/generated/controllers/core/v1"
"k8s.io/apiserver/pkg/endpoints/request"
"k8s.io/client-go/discovery"
)
@@ -46,9 +47,10 @@ func DefaultSchemaTemplates(cf *client.Factory,
baseSchemas *types.APISchemas,
summaryCache *summarycache.SummaryCache,
lookup accesscontrol.AccessSetLookup,
discovery discovery.DiscoveryInterface) []schema.Template {
discovery discovery.DiscoveryInterface,
namespaceCache corecontrollers.NamespaceCache) []schema.Template {
return []schema.Template{
common.DefaultTemplate(cf, summaryCache, lookup),
common.DefaultTemplate(cf, summaryCache, lookup, namespaceCache),
apigroups.Template(discovery),
{
ID: "configmap",

View File

@@ -145,7 +145,7 @@ func setup(ctx context.Context, server *Server) error {
summaryCache := summarycache.New(sf, ccache)
summaryCache.Start(ctx)
for _, template := range resources.DefaultSchemaTemplates(cf, server.BaseSchemas, summaryCache, asl, server.controllers.K8s.Discovery()) {
for _, template := range resources.DefaultSchemaTemplates(cf, server.BaseSchemas, summaryCache, asl, server.controllers.K8s.Discovery(), server.controllers.Core.Namespace().Cache()) {
sf.AddTemplate(template)
}

View File

@@ -2,6 +2,7 @@
package listprocessor
import (
"regexp"
"sort"
"strconv"
"strings"
@@ -9,28 +10,44 @@ import (
"github.com/rancher/apiserver/pkg/types"
"github.com/rancher/wrangler/pkg/data"
"github.com/rancher/wrangler/pkg/data/convert"
corecontrollers "github.com/rancher/wrangler/pkg/generated/controllers/core/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
)
const (
defaultLimit = 100000
continueParam = "continue"
limitParam = "limit"
filterParam = "filter"
sortParam = "sort"
pageSizeParam = "pagesize"
pageParam = "page"
revisionParam = "revision"
defaultLimit = 100000
continueParam = "continue"
limitParam = "limit"
filterParam = "filter"
sortParam = "sort"
pageSizeParam = "pagesize"
pageParam = "page"
revisionParam = "revision"
projectsOrNamespacesVar = "projectsornamespaces"
projectIDFieldLabel = "field.cattle.io/projectId"
orOp = ","
notOp = "!"
)
var opReg = regexp.MustCompile(`[!]?=`)
type op string
const (
eq op = ""
notEq op = "!="
)
// ListOptions represents the query parameters that may be included in a list request.
type ListOptions struct {
ChunkSize int
Resume string
Filters []Filter
Sort Sort
Pagination Pagination
Revision string
ChunkSize int
Resume string
Filters []OrFilter
Sort Sort
Pagination Pagination
Revision string
ProjectsOrNamespaces ProjectsOrNamespacesFilter
}
// Filter represents a field to filter by.
@@ -39,6 +56,7 @@ type ListOptions struct {
type Filter struct {
field []string
match string
op op
}
// String returns the filter as a query string.
@@ -47,6 +65,25 @@ func (f Filter) String() string {
return field + "=" + f.match
}
// OrFilter represents a set of possible fields to filter by, where an item may match any filter in the set to be included in the result.
type OrFilter struct {
filters []Filter
}
// String returns the filter as a query string.
func (f OrFilter) String() string {
var fields strings.Builder
for i, field := range f.filters {
fields.WriteString(strings.Join(field.field, "."))
fields.WriteByte('=')
fields.WriteString(field.match)
if i < len(f.filters)-1 {
fields.WriteByte(',')
}
}
return fields.String()
}
// SortOrder represents whether the list should be ascending or descending.
type SortOrder int
@@ -96,26 +133,59 @@ func (p Pagination) PageSize() int {
return p.pageSize
}
type ProjectsOrNamespacesFilter struct {
filter map[string]struct{}
op op
}
// ParseQuery parses the query params of a request and returns a ListOptions.
func ParseQuery(apiOp *types.APIRequest) *ListOptions {
chunkSize := getLimit(apiOp)
opts := ListOptions{}
opts.ChunkSize = getLimit(apiOp)
q := apiOp.Request.URL.Query()
cont := q.Get(continueParam)
opts.Resume = cont
filterParams := q[filterParam]
filterOpts := []Filter{}
filterOpts := []OrFilter{}
for _, filters := range filterParams {
filter := strings.Split(filters, "=")
if len(filter) != 2 {
continue
orFilters := strings.Split(filters, orOp)
orFilter := OrFilter{}
for _, filter := range orFilters {
var op op
if strings.Contains(filter, "!=") {
op = "!="
}
filter := opReg.Split(filter, -1)
if len(filter) != 2 {
continue
}
orFilter.filters = append(orFilter.filters, Filter{field: strings.Split(filter[0], "."), match: filter[1], op: op})
}
filterOpts = append(filterOpts, Filter{field: strings.Split(filter[0], "."), match: filter[1]})
filterOpts = append(filterOpts, orFilter)
}
// sort the filter fields so they can be used as a cache key in the store
for _, orFilter := range filterOpts {
sort.Slice(orFilter.filters, func(i, j int) bool {
fieldI := strings.Join(orFilter.filters[i].field, ".")
fieldJ := strings.Join(orFilter.filters[j].field, ".")
return fieldI < fieldJ
})
}
sort.Slice(filterOpts, func(i, j int) bool {
fieldI := strings.Join(filterOpts[i].field, ".")
fieldJ := strings.Join(filterOpts[j].field, ".")
return fieldI < fieldJ
var fieldI, fieldJ strings.Builder
for _, f := range filterOpts[i].filters {
fieldI.WriteString(strings.Join(f.field, "."))
}
for _, f := range filterOpts[j].filters {
fieldJ.WriteString(strings.Join(f.field, "."))
}
return fieldI.String() < fieldJ.String()
})
opts.Filters = filterOpts
sortOpts := Sort{}
sortKeys := q.Get(sortParam)
if sortKeys != "" {
@@ -139,6 +209,8 @@ func ParseQuery(apiOp *types.APIRequest) *ListOptions {
}
}
}
opts.Sort = sortOpts
var err error
pagination := Pagination{}
pagination.pageSize, err = strconv.Atoi(q.Get(pageSizeParam))
@@ -149,15 +221,29 @@ func ParseQuery(apiOp *types.APIRequest) *ListOptions {
if err != nil {
pagination.page = 1
}
opts.Pagination = pagination
revision := q.Get(revisionParam)
return &ListOptions{
ChunkSize: chunkSize,
Resume: cont,
Filters: filterOpts,
Sort: sortOpts,
Pagination: pagination,
Revision: revision,
opts.Revision = revision
projectsOptions := ProjectsOrNamespacesFilter{}
var op op
projectsOrNamespaces := q.Get(projectsOrNamespacesVar)
if projectsOrNamespaces == "" {
projectsOrNamespaces = q.Get(projectsOrNamespacesVar + notOp)
if projectsOrNamespaces != "" {
op = notEq
}
}
if projectsOrNamespaces != "" {
projectsOptions.filter = make(map[string]struct{})
for _, pn := range strings.Split(projectsOrNamespaces, ",") {
projectsOptions.filter[pn] = struct{}{}
}
projectsOptions.op = op
opts.ProjectsOrNamespaces = projectsOptions
}
return &opts
}
// getLimit extracts the limit parameter from the request or sets a default of 100000.
@@ -174,7 +260,7 @@ func getLimit(apiOp *types.APIRequest) int {
// FilterList accepts a channel of unstructured objects and a slice of filters and returns the filtered list.
// Filters are ANDed together.
func FilterList(list <-chan []unstructured.Unstructured, filters []Filter) []unstructured.Unstructured {
func FilterList(list <-chan []unstructured.Unstructured, filters []OrFilter) []unstructured.Unstructured {
result := []unstructured.Unstructured{}
for items := range list {
for _, item := range items {
@@ -214,15 +300,15 @@ func matchesOne(obj map[string]interface{}, filter Filter) bool {
return true
}
case []interface{}:
filter = Filter{field: subField, match: filter.match}
if matchesAny(typedVal, filter) {
filter = Filter{field: subField, match: filter.match, op: filter.op}
if matchesOneInList(typedVal, filter) {
return true
}
}
return false
}
func matchesAny(obj []interface{}, filter Filter) bool {
func matchesOneInList(obj []interface{}, filter Filter) bool {
for _, v := range obj {
switch typedItem := v.(type) {
case string, int, bool:
@@ -235,7 +321,7 @@ func matchesAny(obj []interface{}, filter Filter) bool {
return true
}
case []interface{}:
if matchesAny(typedItem, filter) {
if matchesOneInList(typedItem, filter) {
return true
}
}
@@ -243,9 +329,19 @@ func matchesAny(obj []interface{}, filter Filter) bool {
return false
}
func matchesAll(obj map[string]interface{}, filters []Filter) bool {
func matchesAny(obj map[string]interface{}, filter OrFilter) bool {
for _, f := range filter.filters {
matches := matchesOne(obj, f)
if (matches && f.op == eq) || (!matches && f.op == notEq) {
return true
}
}
return false
}
func matchesAll(obj map[string]interface{}, filters []OrFilter) bool {
for _, f := range filters {
if !matchesOne(obj, f) {
if !matchesAny(obj, f) {
return false
}
}
@@ -298,3 +394,31 @@ func PaginateList(list []unstructured.Unstructured, p Pagination) ([]unstructure
}
return list[offset : offset+p.pageSize], pages
}
func FilterByProjectsAndNamespaces(list []unstructured.Unstructured, projectsOrNamespaces ProjectsOrNamespacesFilter, namespaceCache corecontrollers.NamespaceCache) []unstructured.Unstructured {
if len(projectsOrNamespaces.filter) == 0 {
return list
}
result := []unstructured.Unstructured{}
for _, obj := range list {
namespaceName := obj.GetNamespace()
if namespaceName == "" {
continue
}
namespace, err := namespaceCache.Get(namespaceName)
if namespace == nil || err != nil {
continue
}
projectLabel, _ := namespace.GetLabels()[projectIDFieldLabel]
_, matchesProject := projectsOrNamespaces.filter[projectLabel]
_, matchesNamespace := projectsOrNamespaces.filter[namespaceName]
matches := matchesProject || matchesNamespace
if projectsOrNamespaces.op == eq && matches {
result = append(result, obj)
}
if projectsOrNamespaces.op == notEq && !matches {
result = append(result, obj)
}
}
return result
}

File diff suppressed because it is too large Load Diff

View File

@@ -13,6 +13,7 @@ import (
"github.com/rancher/apiserver/pkg/types"
"github.com/rancher/steve/pkg/accesscontrol"
"github.com/rancher/steve/pkg/stores/partition/listprocessor"
corecontrollers "github.com/rancher/wrangler/pkg/generated/controllers/core/v1"
"github.com/sirupsen/logrus"
"golang.org/x/sync/errgroup"
"k8s.io/apimachinery/pkg/api/meta"
@@ -29,7 +30,7 @@ const (
// Not related to the total size in memory of the cache, as any item could take any amount of memory.
cacheSizeEnv = "CATTLE_REQUEST_CACHE_SIZE_INT"
defaultCacheSize = 1000
// Set to non-empty to disable list request caching entirely.
// Set to "false" to enable list request caching.
cacheDisableEnv = "CATTLE_REQUEST_CACHE_DISABLED"
)
@@ -42,13 +43,14 @@ type Partitioner interface {
// Store implements types.Store for partitions.
type Store struct {
Partitioner Partitioner
listCache *cache.LRUExpireCache
asl accesscontrol.AccessSetLookup
Partitioner Partitioner
listCache *cache.LRUExpireCache
asl accesscontrol.AccessSetLookup
namespaceCache corecontrollers.NamespaceCache
}
// NewStore creates a types.Store implementation with a partitioner and an LRU expiring cache for list responses.
func NewStore(partitioner Partitioner, asl accesscontrol.AccessSetLookup) *Store {
func NewStore(partitioner Partitioner, asl accesscontrol.AccessSetLookup, namespaceCache corecontrollers.NamespaceCache) *Store {
cacheSize := defaultCacheSize
if v := os.Getenv(cacheSizeEnv); v != "" {
sizeInt, err := strconv.Atoi(v)
@@ -57,10 +59,11 @@ func NewStore(partitioner Partitioner, asl accesscontrol.AccessSetLookup) *Store
}
}
s := &Store{
Partitioner: partitioner,
asl: asl,
Partitioner: partitioner,
asl: asl,
namespaceCache: namespaceCache,
}
if v := os.Getenv(cacheDisableEnv); v == "" {
if v := os.Getenv(cacheDisableEnv); v == "false" {
s.listCache = cache.NewLRUExpireCache(cacheSize)
}
return s
@@ -203,6 +206,7 @@ func (s *Store) List(apiOp *types.APIRequest, schema *types.APISchema) (types.AP
listToCache := &unstructured.UnstructuredList{
Items: list,
}
list = listprocessor.FilterByProjectsAndNamespaces(list, opts.ProjectsOrNamespaces, s.namespaceCache)
c := lister.Continue()
if c != "" {
listToCache.SetContinue(c)

View File

@@ -12,9 +12,13 @@ import (
"github.com/rancher/apiserver/pkg/types"
"github.com/rancher/steve/pkg/accesscontrol"
corecontrollers "github.com/rancher/wrangler/pkg/generated/controllers/core/v1"
"github.com/rancher/wrangler/pkg/schemas"
"github.com/stretchr/testify/assert"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/watch"
"k8s.io/apiserver/pkg/authentication/user"
"k8s.io/apiserver/pkg/endpoints/request"
@@ -278,6 +282,12 @@ func TestList(t *testing.T) {
apiOps: []*types.APIRequest{
newRequest("filter=data.color=green", "user1"),
newRequest("filter=data.color=green&filter=metadata.name=bramley", "user1"),
newRequest("filter=data.color=green,data.color=pink", "user1"),
newRequest("filter=data.color=green,data.color=pink&filter=metadata.name=fuji", "user1"),
newRequest("filter=data.color=green,data.color=pink&filter=metadata.name=crispin", "user1"),
newRequest("filter=data.color!=green", "user1"),
newRequest("filter=data.color!=green,metadata.name=granny-smith", "user1"),
newRequest("filter=data.color!=green&filter=metadata.name!=crispin", "user1"),
},
access: []map[string]string{
{
@@ -286,6 +296,24 @@ func TestList(t *testing.T) {
{
"user1": "roleA",
},
{
"user1": "roleA",
},
{
"user1": "roleA",
},
{
"user1": "roleA",
},
{
"user1": "roleA",
},
{
"user1": "roleA",
},
{
"user1": "roleA",
},
},
partitions: map[string][]Partition{
"user1": {
@@ -318,6 +346,44 @@ func TestList(t *testing.T) {
newApple("bramley").toObj(),
},
},
{
Count: 3,
Objects: []types.APIObject{
newApple("fuji").toObj(),
newApple("granny-smith").toObj(),
newApple("bramley").toObj(),
},
},
{
Count: 1,
Objects: []types.APIObject{
newApple("fuji").toObj(),
},
},
{
Count: 0,
},
{
Count: 2,
Objects: []types.APIObject{
newApple("fuji").toObj(),
newApple("crispin").toObj(),
},
},
{
Count: 3,
Objects: []types.APIObject{
newApple("fuji").toObj(),
newApple("granny-smith").toObj(),
newApple("crispin").toObj(),
},
},
{
Count: 1,
Objects: []types.APIObject{
newApple("fuji").toObj(),
},
},
},
},
{
@@ -1739,6 +1805,245 @@ func TestList(t *testing.T) {
{"green": 2},
},
},
{
name: "pagination with or filters",
apiOps: []*types.APIRequest{
newRequest("filter=metadata.name=el,data.color=el&pagesize=2", "user1"),
newRequest("filter=metadata.name=el,data.color=el&pagesize=2&page=2&revision=42", "user1"),
newRequest("filter=metadata.name=el,data.color=el&pagesize=2&page=3&revision=42", "user1"),
},
access: []map[string]string{
{
"user1": "roleA",
},
{
"user1": "roleA",
},
{
"user1": "roleA",
},
},
partitions: map[string][]Partition{
"user1": {
mockPartition{
name: "all",
},
},
},
objects: map[string]*unstructured.UnstructuredList{
"all": {
Object: map[string]interface{}{
"metadata": map[string]interface{}{
"resourceVersion": "42",
},
},
Items: []unstructured.Unstructured{
newApple("fuji").Unstructured,
newApple("granny-smith").Unstructured,
newApple("red-delicious").Unstructured,
newApple("golden-delicious").Unstructured,
newApple("crispin").Unstructured,
},
},
},
want: []types.APIObjectList{
{
Count: 3,
Pages: 2,
Revision: "42",
Objects: []types.APIObject{
newApple("red-delicious").toObj(),
newApple("golden-delicious").toObj(),
},
},
{
Count: 3,
Pages: 2,
Revision: "42",
Objects: []types.APIObject{
newApple("crispin").toObj(),
},
},
{
Count: 3,
Pages: 2,
Revision: "42",
},
},
wantCache: []mockCache{
{
contents: map[cacheKey]*unstructured.UnstructuredList{
{
chunkSize: 100000,
filters: "data.color=el,metadata.name=el",
pageSize: 2,
accessID: getAccessID("user1", "roleA"),
resourcePath: "/apples",
revision: "42",
}: {
Items: []unstructured.Unstructured{
newApple("red-delicious").Unstructured,
newApple("golden-delicious").Unstructured,
newApple("crispin").Unstructured,
},
},
},
},
{
contents: map[cacheKey]*unstructured.UnstructuredList{
{
chunkSize: 100000,
filters: "data.color=el,metadata.name=el",
pageSize: 2,
accessID: getAccessID("user1", "roleA"),
resourcePath: "/apples",
revision: "42",
}: {
Items: []unstructured.Unstructured{
newApple("red-delicious").Unstructured,
newApple("golden-delicious").Unstructured,
newApple("crispin").Unstructured,
},
},
},
},
{
contents: map[cacheKey]*unstructured.UnstructuredList{
{
chunkSize: 100000,
filters: "data.color=el,metadata.name=el",
pageSize: 2,
accessID: getAccessID("user1", "roleA"),
resourcePath: "/apples",
revision: "42",
}: {
Items: []unstructured.Unstructured{
newApple("red-delicious").Unstructured,
newApple("golden-delicious").Unstructured,
newApple("crispin").Unstructured,
},
},
},
},
},
wantListCalls: []map[string]int{
{"all": 1},
{"all": 1},
{"all": 1},
},
},
{
name: "with project filters",
apiOps: []*types.APIRequest{
newRequest("projectsornamespaces=p-abcde", "user1"),
newRequest("projectsornamespaces=p-abcde,p-fghij", "user1"),
newRequest("projectsornamespaces=p-abcde,n2", "user1"),
newRequest("projectsornamespaces!=p-abcde", "user1"),
newRequest("projectsornamespaces!=p-abcde,p-fghij", "user1"),
newRequest("projectsornamespaces!=p-abcde,n2", "user1"),
newRequest("projectsornamespaces=foobar", "user1"),
newRequest("projectsornamespaces!=foobar", "user1"),
},
access: []map[string]string{
{
"user1": "roleA",
},
{
"user1": "roleA",
},
{
"user1": "roleA",
},
{
"user1": "roleA",
},
{
"user1": "roleA",
},
{
"user1": "roleA",
},
{
"user1": "roleA",
},
{
"user1": "roleA",
},
},
partitions: map[string][]Partition{
"user1": {
mockPartition{
name: "all",
},
},
},
objects: map[string]*unstructured.UnstructuredList{
"all": {
Items: []unstructured.Unstructured{
newApple("fuji").withNamespace("n1").Unstructured,
newApple("granny-smith").withNamespace("n1").Unstructured,
newApple("bramley").withNamespace("n2").Unstructured,
newApple("crispin").withNamespace("n3").Unstructured,
},
},
},
want: []types.APIObjectList{
{
Count: 2,
Objects: []types.APIObject{
newApple("fuji").withNamespace("n1").toObj(),
newApple("granny-smith").withNamespace("n1").toObj(),
},
},
{
Count: 3,
Objects: []types.APIObject{
newApple("fuji").withNamespace("n1").toObj(),
newApple("granny-smith").withNamespace("n1").toObj(),
newApple("bramley").withNamespace("n2").toObj(),
},
},
{
Count: 3,
Objects: []types.APIObject{
newApple("fuji").withNamespace("n1").toObj(),
newApple("granny-smith").withNamespace("n1").toObj(),
newApple("bramley").withNamespace("n2").toObj(),
},
},
{
Count: 2,
Objects: []types.APIObject{
newApple("bramley").withNamespace("n2").toObj(),
newApple("crispin").withNamespace("n3").toObj(),
},
},
{
Count: 1,
Objects: []types.APIObject{
newApple("crispin").withNamespace("n3").toObj(),
},
},
{
Count: 1,
Objects: []types.APIObject{
newApple("crispin").withNamespace("n3").toObj(),
},
},
{
Count: 0,
},
{
Count: 4,
Objects: []types.APIObject{
newApple("fuji").withNamespace("n1").toObj(),
newApple("granny-smith").withNamespace("n1").toObj(),
newApple("bramley").withNamespace("n2").toObj(),
newApple("crispin").withNamespace("n3").toObj(),
},
},
},
},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
@@ -1752,13 +2057,13 @@ func TestList(t *testing.T) {
}
}
asl := &mockAccessSetLookup{userRoles: test.access}
if test.disableCache {
t.Setenv("CATTLE_REQUEST_CACHE_DISABLED", "Y")
if !test.disableCache {
t.Setenv("CATTLE_REQUEST_CACHE_DISABLED", "false")
}
store := NewStore(mockPartitioner{
stores: stores,
partitions: test.partitions,
}, asl)
}, asl, mockNamespaceCache{})
for i, req := range test.apiOps {
got, gotErr := store.List(req, schema)
assert.Nil(t, gotErr)
@@ -1833,9 +2138,8 @@ func TestListByRevision(t *testing.T) {
},
},
},
}, asl)
}, asl, mockNamespaceCache{})
req := newRequest("", "user1")
t.Setenv("CATTLE_REQUEST_CACHE_DISABLED", "Y")
got, gotErr := store.List(req, schema)
assert.Nil(t, gotErr)
@@ -2026,9 +2330,15 @@ func newApple(name string) apple {
}
func (a apple) toObj() types.APIObject {
meta := a.Object["metadata"].(map[string]interface{})
id := meta["name"].(string)
ns, ok := meta["namespace"]
if ok {
id = ns.(string) + "/" + id
}
return types.APIObject{
Type: "apple",
ID: a.Object["metadata"].(map[string]interface{})["name"].(string),
ID: id,
Object: &a.Unstructured,
}
}
@@ -2040,6 +2350,11 @@ func (a apple) with(data map[string]string) apple {
return a
}
func (a apple) withNamespace(namespace string) apple {
a.Object["metadata"].(map[string]interface{})["namespace"] = namespace
return a
}
type mockAccessSetLookup struct {
accessID string
userRoles []map[string]string
@@ -2062,3 +2377,51 @@ func getAccessID(user, role string) string {
h := sha256.Sum256([]byte(user + role))
return string(h[:])
}
var namespaces = map[string]*corev1.Namespace{
"n1": &corev1.Namespace{
ObjectMeta: metav1.ObjectMeta{
Name: "n1",
Labels: map[string]string{
"field.cattle.io/projectId": "p-abcde",
},
},
},
"n2": &corev1.Namespace{
ObjectMeta: metav1.ObjectMeta{
Name: "n2",
Labels: map[string]string{
"field.cattle.io/projectId": "p-fghij",
},
},
},
"n3": &corev1.Namespace{
ObjectMeta: metav1.ObjectMeta{
Name: "n3",
Labels: map[string]string{
"field.cattle.io/projectId": "p-klmno",
},
},
},
"n4": &corev1.Namespace{
ObjectMeta: metav1.ObjectMeta{
Name: "n4",
},
},
}
type mockNamespaceCache struct{}
func (m mockNamespaceCache) Get(name string) (*corev1.Namespace, error) {
return namespaces[name], nil
}
func (m mockNamespaceCache) List(selector labels.Selector) ([]*corev1.Namespace, error) {
panic("not implemented")
}
func (m mockNamespaceCache) AddIndexer(indexName string, indexer corecontrollers.NamespaceIndexer) {
panic("not implemented")
}
func (m mockNamespaceCache) GetByIndex(indexName, key string) ([]*corev1.Namespace, error) {
panic("not implemented")
}

View File

@@ -19,6 +19,7 @@ import (
metricsStore "github.com/rancher/steve/pkg/stores/metrics"
"github.com/rancher/steve/pkg/stores/partition"
"github.com/rancher/wrangler/pkg/data"
corecontrollers "github.com/rancher/wrangler/pkg/generated/controllers/core/v1"
"github.com/rancher/wrangler/pkg/schemas/validation"
"github.com/rancher/wrangler/pkg/summary"
"github.com/sirupsen/logrus"
@@ -85,7 +86,7 @@ type Store struct {
}
// NewProxyStore returns a wrapped types.Store.
func NewProxyStore(clientGetter ClientGetter, notifier RelationshipNotifier, lookup accesscontrol.AccessSetLookup) types.Store {
func NewProxyStore(clientGetter ClientGetter, notifier RelationshipNotifier, lookup accesscontrol.AccessSetLookup, namespaceCache corecontrollers.NamespaceCache) types.Store {
return &errorStore{
Store: &WatchRefresh{
Store: partition.NewStore(
@@ -96,6 +97,7 @@ func NewProxyStore(clientGetter ClientGetter, notifier RelationshipNotifier, loo
},
},
lookup,
namespaceCache,
),
asl: lookup,
},
@@ -309,7 +311,6 @@ func (s *Store) listAndWatch(apiOp *types.APIRequest, client dynamic.ResourceInt
rowToObject(obj)
result <- watch.Event{Type: watch.Modified, Object: obj}
} else {
logrus.Debugf("notifier watch error: %v", err)
returnErr(errors.Wrapf(err, "notifier watch error: %v", err), result)
}
}
@@ -321,7 +322,6 @@ func (s *Store) listAndWatch(apiOp *types.APIRequest, client dynamic.ResourceInt
for event := range watcher.ResultChan() {
if event.Type == watch.Error {
if status, ok := event.Object.(*metav1.Status); ok {
logrus.Debugf("event watch error: %s", status.Message)
returnErr(fmt.Errorf("event watch error: %s", status.Message), result)
} else {
logrus.Debugf("event watch error: could not decode event object %T", event.Object)
@@ -361,12 +361,22 @@ func (s *Store) WatchNames(apiOp *types.APIRequest, schema *types.APISchema, w t
go func() {
defer close(result)
for item := range c {
if item.Type == watch.Error {
if status, ok := item.Object.(*metav1.Status); ok {
logrus.Debugf("WatchNames received error: %s", status.Message)
} else {
logrus.Debugf("WatchNames received error: %v", item)
}
continue
}
m, err := meta.Accessor(item.Object)
if err != nil {
return
logrus.Debugf("WatchNames cannot process unexpected object: %s", err)
continue
}
if item.Type != watch.Error && names.Has(m.GetName()) {
if names.Has(m.GetName()) {
result <- item
}
}

View File

@@ -0,0 +1,104 @@
package proxy
import (
"fmt"
"net/http"
"testing"
"time"
"github.com/pkg/errors"
"github.com/rancher/apiserver/pkg/types"
"github.com/rancher/steve/pkg/client"
"github.com/rancher/wrangler/pkg/schemas"
"github.com/stretchr/testify/assert"
"golang.org/x/sync/errgroup"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
schema2 "k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/apimachinery/pkg/watch"
"k8s.io/client-go/dynamic"
"k8s.io/client-go/dynamic/fake"
"k8s.io/client-go/rest"
clientgotesting "k8s.io/client-go/testing"
)
var c *watch.FakeWatcher
type testFactory struct {
*client.Factory
fakeClient *fake.FakeDynamicClient
}
func TestWatchNamesErrReceive(t *testing.T) {
testClientFactory, err := client.NewFactory(&rest.Config{}, false)
assert.Nil(t, err)
fakeClient := fake.NewSimpleDynamicClient(runtime.NewScheme())
c = watch.NewFakeWithChanSize(5, true)
defer c.Stop()
errMsgsToSend := []string{"err1", "err2", "err3"}
c.Add(&v1.Secret{ObjectMeta: metav1.ObjectMeta{Name: "testsecret1"}})
for index := range errMsgsToSend {
c.Error(&metav1.Status{
Message: errMsgsToSend[index],
})
}
c.Add(&v1.Secret{ObjectMeta: metav1.ObjectMeta{Name: "testsecret2"}})
fakeClient.PrependWatchReactor("*", func(action clientgotesting.Action) (handled bool, ret watch.Interface, err error) {
return true, c, nil
})
testStore := Store{
clientGetter: &testFactory{Factory: testClientFactory,
fakeClient: fakeClient,
},
}
apiSchema := &types.APISchema{Schema: &schemas.Schema{Attributes: map[string]interface{}{"table": "something"}}}
wc, err := testStore.WatchNames(&types.APIRequest{Namespace: "", Schema: apiSchema, Request: &http.Request{}}, apiSchema, types.WatchRequest{}, sets.NewString("testsecret1", "testsecret2"))
assert.Nil(t, err)
eg := errgroup.Group{}
eg.Go(func() error { return receiveUntil(wc, 5*time.Second) })
err = eg.Wait()
assert.Nil(t, err)
assert.Equal(t, 0, len(c.ResultChan()), "Expected all secrets to have been received")
}
func (t *testFactory) TableAdminClientForWatch(ctx *types.APIRequest, schema *types.APISchema, namespace string, warningHandler rest.WarningHandler) (dynamic.ResourceInterface, error) {
return t.fakeClient.Resource(schema2.GroupVersionResource{}), nil
}
func receiveUntil(wc chan watch.Event, d time.Duration) error {
timer := time.NewTicker(d)
defer timer.Stop()
secretNames := []string{"testsecret1", "testsecret2"}
for {
select {
case event, ok := <-wc:
if !ok {
return errors.New("watch chan should not have been closed")
}
if event.Type == watch.Error {
return errors.New(fmt.Sprintf("watch chan should not have sent events of type [%s]", watch.Error))
}
secret, ok := event.Object.(*v1.Secret)
if !ok {
continue
}
if secret.Name == secretNames[0] {
secretNames = secretNames[1:]
}
if len(secretNames) == 0 {
return nil
}
continue
case <-timer.C:
return errors.New("timed out waiting to receiving objects from chan")
}
}
}