1
0
mirror of https://github.com/rancher/steve.git synced 2025-08-28 19:11:32 +00:00
This commit is contained in:
Eric Promislow 2025-04-24 13:02:31 -04:00 committed by GitHub
commit e207671b4f
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
3 changed files with 73 additions and 17 deletions

View File

@ -265,7 +265,23 @@ func TestParseQuery(t *testing.T) {
URL: &url.URL{RawQuery: `filter=a1="c1"`}, URL: &url.URL{RawQuery: `filter=a1="c1"`},
}, },
}, },
errExpected: true, expectedLO: informer.ListOptions{
ChunkSize: defaultLimit,
Filters: []informer.OrFilter{
{
Filters: []informer.Filter{
{
Field: []string{"a1"},
Matches: []string{"c1"},
Op: informer.Eq,
},
},
},
},
Pagination: informer.Pagination{
Page: 1,
},
},
}) })
tests = append(tests, testCase{ tests = append(tests, testCase{
description: "ParseQuery() with a labels filter param should create a labels-specific filter.", description: "ParseQuery() with a labels filter param should create a labels-specific filter.",

View File

@ -22,7 +22,7 @@ https://github.com/kubernetes/apimachinery/blob/90df4d1d2d40ea9b3a522bec6e357723
/** /**
Main changes: Main changes:
1. The upstream `selector.go` file does parsing and applying to the objects being test. 1. The upstream `selector.go` file does parsing and applying to the objects being tested.
We only care about the parser, so the selection part is dropped. We only care about the parser, so the selection part is dropped.
2. I dropped label value validation in the parser 2. I dropped label value validation in the parser
@ -469,6 +469,31 @@ IdentifierLoop:
return IdentifierToken, s // otherwise is an identifier return IdentifierToken, s // otherwise is an identifier
} }
func (l *Lexer) scanQuotedString(delim byte) (tok Token, lit string) {
var buffer []byte
inEscape := false
StringLoop:
for {
switch ch := l.read(); {
case ch == 0:
s := string(buffer)
if len(s) > 12 {
s = s[0:10] + "..."
}
return ErrorToken, fmt.Sprintf("unterminated string starting with '%s'", s)
case inEscape:
buffer = append(buffer, ch)
inEscape = false
case ch == delim:
// Don't include the end-delimiter
break StringLoop
default:
buffer = append(buffer, ch)
}
}
return QuotedStringToken, string(buffer)
}
// scanSpecialSymbol scans string starting with special symbol. // scanSpecialSymbol scans string starting with special symbol.
// special symbol identify non literal operators. "!=", "==", "=", "!~" // special symbol identify non literal operators. "!=", "==", "=", "!~"
func (l *Lexer) scanSpecialSymbol() (Token, string) { func (l *Lexer) scanSpecialSymbol() (Token, string) {
@ -521,6 +546,8 @@ func (l *Lexer) Lex() (Token, string) {
case isIdentifierStartChar(ch): case isIdentifierStartChar(ch):
l.unread() l.unread()
return l.scanIDOrKeyword() return l.scanIDOrKeyword()
case ch == '"' || ch == '\'':
return l.scanQuotedString(ch)
default: default:
return ErrorToken, fmt.Sprintf("unexpected character '%c'", ch) return ErrorToken, fmt.Sprintf("unexpected character '%c'", ch)
} }

View File

@ -54,6 +54,9 @@ func TestSelectorParse(t *testing.T) {
"metadata.labels[im.here]", "metadata.labels[im.here]",
"!metadata.labels[im.not.here]", "!metadata.labels[im.not.here]",
"metadata.labels[k8s.io/meta-stuff] ~ has-dashes_underscores.dots.only", "metadata.labels[k8s.io/meta-stuff] ~ has-dashes_underscores.dots.only",
`metadata.labels[k8s.io/meta-stuff] ~ "m!a@t#c$h%e^v&e*r(y)t-_i=n+g)t{o[$]c}o]m|m\\a:;'<.>"`,
`x="double quotes ok"`,
`x='single quotes ok'`,
} }
testBadStrings := []string{ testBadStrings := []string{
"!no-label-absence-test", "!no-label-absence-test",
@ -75,8 +78,6 @@ func TestSelectorParse(t *testing.T) {
"metadata.labels-im.here", "metadata.labels-im.here",
"metadata.labels[missing/close-bracket", "metadata.labels[missing/close-bracket",
"!metadata.labels(im.not.here)", "!metadata.labels(im.not.here)",
`x="no double quotes allowed"`,
`x='no single quotes allowed'`,
} }
for _, test := range testGoodStrings { for _, test := range testGoodStrings {
_, err := Parse(test) _, err := Parse(test)
@ -110,11 +111,13 @@ func TestLexer(t *testing.T) {
{"!=", NotEqualsToken}, {"!=", NotEqualsToken},
{"(", OpenParToken}, {"(", OpenParToken},
{")", ClosedParToken}, {")", ClosedParToken},
{`'sq string''`, ErrorToken}, {`'sq string'`, QuotedStringToken},
{`"dq string"`, ErrorToken}, {`"dq string"`, QuotedStringToken},
{"~", PartialEqualsToken}, {"~", PartialEqualsToken},
{"!~", NotPartialEqualsToken}, {"!~", NotPartialEqualsToken},
{"||", ErrorToken}, {"||", ErrorToken},
{`"double-quoted string"`, QuotedStringToken},
{`'single-quoted string'`, QuotedStringToken},
} }
for _, v := range testcases { for _, v := range testcases {
l := &Lexer{s: v.s, pos: 0} l := &Lexer{s: v.s, pos: 0}
@ -122,8 +125,14 @@ func TestLexer(t *testing.T) {
if token != v.t { if token != v.t {
t.Errorf("Got %d it should be %d for '%s'", token, v.t, v.s) t.Errorf("Got %d it should be %d for '%s'", token, v.t, v.s)
} }
if v.t != ErrorToken && lit != v.s { if v.t != ErrorToken {
t.Errorf("Got '%s' it should be '%s'", lit, v.s) exp := v.s
if v.t == QuotedStringToken {
exp = exp[1 : len(exp)-1]
}
if lit != exp {
t.Errorf("Got '%s' it should be '%s'", lit, exp)
}
} }
} }
} }
@ -153,6 +162,8 @@ func TestLexerSequence(t *testing.T) {
{"key<1", []Token{IdentifierToken, LessThanToken, IdentifierToken}}, {"key<1", []Token{IdentifierToken, LessThanToken, IdentifierToken}},
{"key gt 3", []Token{IdentifierToken, IdentifierToken, IdentifierToken}}, {"key gt 3", []Token{IdentifierToken, IdentifierToken, IdentifierToken}},
{"key lt 4", []Token{IdentifierToken, IdentifierToken, IdentifierToken}}, {"key lt 4", []Token{IdentifierToken, IdentifierToken, IdentifierToken}},
{`key = 'sqs'`, []Token{IdentifierToken, EqualsToken, QuotedStringToken}},
{`key = "dqs"`, []Token{IdentifierToken, EqualsToken, QuotedStringToken}},
{"key=value", []Token{IdentifierToken, EqualsToken, IdentifierToken}}, {"key=value", []Token{IdentifierToken, EqualsToken, IdentifierToken}},
{"key == value", []Token{IdentifierToken, DoubleEqualsToken, IdentifierToken}}, {"key == value", []Token{IdentifierToken, DoubleEqualsToken, IdentifierToken}},
{"key ~ value", []Token{IdentifierToken, PartialEqualsToken, IdentifierToken}}, {"key ~ value", []Token{IdentifierToken, PartialEqualsToken, IdentifierToken}},
@ -184,6 +195,7 @@ func TestLexerSequence(t *testing.T) {
} }
} }
} }
func TestParserLookahead(t *testing.T) { func TestParserLookahead(t *testing.T) {
testcases := []struct { testcases := []struct {
s string s string
@ -200,9 +212,9 @@ func TestParserLookahead(t *testing.T) {
{"== != (), = notin", []Token{DoubleEqualsToken, NotEqualsToken, OpenParToken, ClosedParToken, CommaToken, EqualsToken, NotInToken, EndOfStringToken}}, {"== != (), = notin", []Token{DoubleEqualsToken, NotEqualsToken, OpenParToken, ClosedParToken, CommaToken, EqualsToken, NotInToken, EndOfStringToken}},
{"key>2", []Token{IdentifierToken, GreaterThanToken, IdentifierToken, EndOfStringToken}}, {"key>2", []Token{IdentifierToken, GreaterThanToken, IdentifierToken, EndOfStringToken}},
{"key<1", []Token{IdentifierToken, LessThanToken, IdentifierToken, EndOfStringToken}}, {"key<1", []Token{IdentifierToken, LessThanToken, IdentifierToken, EndOfStringToken}},
{"key gt 3", []Token{IdentifierToken, GreaterThanToken, IdentifierToken, EndOfStringToken}}, {"key gt 3", []Token{IdentifierToken, IdentifierToken, IdentifierToken, EndOfStringToken}},
{"key lt 4", []Token{IdentifierToken, LessThanToken, IdentifierToken, EndOfStringToken}}, {"key lt 4", []Token{IdentifierToken, IdentifierToken, IdentifierToken, EndOfStringToken}},
{`key = multi-word-string`, []Token{IdentifierToken, EqualsToken, QuotedStringToken, EndOfStringToken}}, {`key = "multi-word-string"`, []Token{IdentifierToken, EqualsToken, QuotedStringToken, EndOfStringToken}},
} }
for _, v := range testcases { for _, v := range testcases {
p := &Parser{l: &Lexer{s: v.s, pos: 0}, position: 0} p := &Parser{l: &Lexer{s: v.s, pos: 0}, position: 0}
@ -210,15 +222,16 @@ func TestParserLookahead(t *testing.T) {
if len(p.scannedItems) != len(v.t) { if len(p.scannedItems) != len(v.t) {
t.Errorf("Expected %d items for test %s, found %d", len(v.t), v.s, len(p.scannedItems)) t.Errorf("Expected %d items for test %s, found %d", len(v.t), v.s, len(p.scannedItems))
} }
for { for i, entry := range v.t {
token, lit := p.lookahead(KeyAndOperator) token, _ := p.consume(KeyAndOperator)
token2, lit2 := p.consume(KeyAndOperator)
if token == EndOfStringToken { if token == EndOfStringToken {
if i != len(v.t)-1 {
t.Errorf("Expected end of string token at position %d for test '%s', but length is %d", i, v.s, len(v.t))
}
break break
} }
if token != token2 || lit != lit2 { if token != entry {
t.Errorf("Bad values") t.Errorf("Expected token %v at position %d for test '%s', but got %v", entry, i, v.s, token)
} }
} }
} }