1
0
mirror of https://github.com/rancher/steve.git synced 2025-09-16 15:29:04 +00:00

Support quoting values in queries. (#601)

* Support quoting values in queries.

* Correct test: 'informer' => 'sqltypes', and update description.

* Enable backslash-escaping of quoted strings.
This commit is contained in:
Eric Promislow
2025-05-13 11:40:54 -07:00
committed by GitHub
parent 27ed443fff
commit 18ac23afe7
3 changed files with 103 additions and 18 deletions

View File

@@ -259,13 +259,54 @@ func TestParseQuery(t *testing.T) {
}, },
}) })
tests = append(tests, testCase{ tests = append(tests, testCase{
description: "ParseQuery() with filter param set, with value in double quotes should return an error.", description: "ParseQuery() with filter param set, with value in double quotes.",
req: &types.APIRequest{ req: &types.APIRequest{
Request: &http.Request{ Request: &http.Request{
URL: &url.URL{RawQuery: `filter=a1="c1"`}, URL: &url.URL{RawQuery: `filter=a1="c1"`},
}, },
}, },
errExpected: true, expectedLO: sqltypes.ListOptions{
ChunkSize: defaultLimit,
Filters: []sqltypes.OrFilter{
{
Filters: []sqltypes.Filter{
{
Field: []string{"a1"},
Matches: []string{"c1"},
Op: sqltypes.Eq,
},
},
},
},
Pagination: sqltypes.Pagination{
Page: 1,
},
},
})
tests = append(tests, testCase{
description: "ParseQuery() with filter param set, with value in single quotes.",
req: &types.APIRequest{
Request: &http.Request{
URL: &url.URL{RawQuery: "filter=a1b='c1b'"},
},
},
expectedLO: sqltypes.ListOptions{
ChunkSize: defaultLimit,
Filters: []sqltypes.OrFilter{
{
Filters: []sqltypes.Filter{
{
Field: []string{"a1b"},
Matches: []string{"c1b"},
Op: sqltypes.Eq,
},
},
},
},
Pagination: sqltypes.Pagination{
Page: 1,
},
},
}) })
tests = append(tests, testCase{ tests = append(tests, testCase{
description: "ParseQuery() with a labels filter param should create a labels-specific filter.", description: "ParseQuery() with a labels filter param should create a labels-specific filter.",

View File

@@ -22,7 +22,7 @@ https://github.com/kubernetes/apimachinery/blob/90df4d1d2d40ea9b3a522bec6e357723
/** /**
Main changes: Main changes:
1. The upstream `selector.go` file does parsing and applying to the objects being test. 1. The upstream `selector.go` file does parsing and applying to the objects being tested.
We only care about the parser, so the selection part is dropped. We only care about the parser, so the selection part is dropped.
2. I dropped label value validation in the parser 2. I dropped label value validation in the parser
@@ -469,6 +469,33 @@ IdentifierLoop:
return IdentifierToken, s // otherwise is an identifier return IdentifierToken, s // otherwise is an identifier
} }
func (l *Lexer) scanQuotedString(delim byte) (tok Token, lit string) {
var buffer []byte
inEscape := false
StringLoop:
for {
switch ch := l.read(); {
case ch == 0:
s := string(buffer)
if len(s) > 12 {
s = s[0:10] + "..."
}
return ErrorToken, fmt.Sprintf("unterminated string starting with '%s'", s)
case inEscape:
buffer = append(buffer, ch)
inEscape = false
case ch == '\\':
inEscape = true
case ch == delim:
// Don't include the end-delimiter
break StringLoop
default:
buffer = append(buffer, ch)
}
}
return QuotedStringToken, string(buffer)
}
// scanSpecialSymbol scans string starting with special symbol. // scanSpecialSymbol scans string starting with special symbol.
// special symbol identify non literal operators. "!=", "==", "=", "!~" // special symbol identify non literal operators. "!=", "==", "=", "!~"
func (l *Lexer) scanSpecialSymbol() (Token, string) { func (l *Lexer) scanSpecialSymbol() (Token, string) {
@@ -521,6 +548,8 @@ func (l *Lexer) Lex() (Token, string) {
case isIdentifierStartChar(ch): case isIdentifierStartChar(ch):
l.unread() l.unread()
return l.scanIDOrKeyword() return l.scanIDOrKeyword()
case ch == '"' || ch == '\'':
return l.scanQuotedString(ch)
default: default:
return ErrorToken, fmt.Sprintf("unexpected character '%c'", ch) return ErrorToken, fmt.Sprintf("unexpected character '%c'", ch)
} }

View File

@@ -54,6 +54,11 @@ func TestSelectorParse(t *testing.T) {
"metadata.labels[im.here]", "metadata.labels[im.here]",
"!metadata.labels[im.not.here]", "!metadata.labels[im.not.here]",
"metadata.labels[k8s.io/meta-stuff] ~ has-dashes_underscores.dots.only", "metadata.labels[k8s.io/meta-stuff] ~ has-dashes_underscores.dots.only",
`metadata.labels[k8s.io/meta-stuff] ~ "m!a@t#c$h%e^v&e*r(y)t-_i=n+g)t{o[$]c}o]m|m\\a:;'<.>"`,
`x="double quotes ok"`,
`x='single quotes ok'`,
`x="double quotes with \\ and \" ok"`,
`x='single quotes with \\ and \' ok'`,
} }
testBadStrings := []string{ testBadStrings := []string{
"!no-label-absence-test", "!no-label-absence-test",
@@ -75,8 +80,6 @@ func TestSelectorParse(t *testing.T) {
"metadata.labels-im.here", "metadata.labels-im.here",
"metadata.labels[missing/close-bracket", "metadata.labels[missing/close-bracket",
"!metadata.labels(im.not.here)", "!metadata.labels(im.not.here)",
`x="no double quotes allowed"`,
`x='no single quotes allowed'`,
} }
for _, test := range testGoodStrings { for _, test := range testGoodStrings {
_, err := Parse(test) _, err := Parse(test)
@@ -110,11 +113,13 @@ func TestLexer(t *testing.T) {
{"!=", NotEqualsToken}, {"!=", NotEqualsToken},
{"(", OpenParToken}, {"(", OpenParToken},
{")", ClosedParToken}, {")", ClosedParToken},
{`'sq string''`, ErrorToken}, {`'sq string'`, QuotedStringToken},
{`"dq string"`, ErrorToken}, {`"dq string"`, QuotedStringToken},
{"~", PartialEqualsToken}, {"~", PartialEqualsToken},
{"!~", NotPartialEqualsToken}, {"!~", NotPartialEqualsToken},
{"||", ErrorToken}, {"||", ErrorToken},
{`"double-quoted string"`, QuotedStringToken},
{`'single-quoted string'`, QuotedStringToken},
} }
for _, v := range testcases { for _, v := range testcases {
l := &Lexer{s: v.s, pos: 0} l := &Lexer{s: v.s, pos: 0}
@@ -122,8 +127,14 @@ func TestLexer(t *testing.T) {
if token != v.t { if token != v.t {
t.Errorf("Got %d it should be %d for '%s'", token, v.t, v.s) t.Errorf("Got %d it should be %d for '%s'", token, v.t, v.s)
} }
if v.t != ErrorToken && lit != v.s { if v.t != ErrorToken {
t.Errorf("Got '%s' it should be '%s'", lit, v.s) exp := v.s
if v.t == QuotedStringToken {
exp = exp[1 : len(exp)-1]
}
if lit != exp {
t.Errorf("Got '%s' it should be '%s'", lit, exp)
}
} }
} }
} }
@@ -153,6 +164,8 @@ func TestLexerSequence(t *testing.T) {
{"key<1", []Token{IdentifierToken, LessThanToken, IdentifierToken}}, {"key<1", []Token{IdentifierToken, LessThanToken, IdentifierToken}},
{"key gt 3", []Token{IdentifierToken, IdentifierToken, IdentifierToken}}, {"key gt 3", []Token{IdentifierToken, IdentifierToken, IdentifierToken}},
{"key lt 4", []Token{IdentifierToken, IdentifierToken, IdentifierToken}}, {"key lt 4", []Token{IdentifierToken, IdentifierToken, IdentifierToken}},
{`key = 'sqs'`, []Token{IdentifierToken, EqualsToken, QuotedStringToken}},
{`key = "dqs"`, []Token{IdentifierToken, EqualsToken, QuotedStringToken}},
{"key=value", []Token{IdentifierToken, EqualsToken, IdentifierToken}}, {"key=value", []Token{IdentifierToken, EqualsToken, IdentifierToken}},
{"key == value", []Token{IdentifierToken, DoubleEqualsToken, IdentifierToken}}, {"key == value", []Token{IdentifierToken, DoubleEqualsToken, IdentifierToken}},
{"key ~ value", []Token{IdentifierToken, PartialEqualsToken, IdentifierToken}}, {"key ~ value", []Token{IdentifierToken, PartialEqualsToken, IdentifierToken}},
@@ -184,6 +197,7 @@ func TestLexerSequence(t *testing.T) {
} }
} }
} }
func TestParserLookahead(t *testing.T) { func TestParserLookahead(t *testing.T) {
testcases := []struct { testcases := []struct {
s string s string
@@ -200,9 +214,9 @@ func TestParserLookahead(t *testing.T) {
{"== != (), = notin", []Token{DoubleEqualsToken, NotEqualsToken, OpenParToken, ClosedParToken, CommaToken, EqualsToken, NotInToken, EndOfStringToken}}, {"== != (), = notin", []Token{DoubleEqualsToken, NotEqualsToken, OpenParToken, ClosedParToken, CommaToken, EqualsToken, NotInToken, EndOfStringToken}},
{"key>2", []Token{IdentifierToken, GreaterThanToken, IdentifierToken, EndOfStringToken}}, {"key>2", []Token{IdentifierToken, GreaterThanToken, IdentifierToken, EndOfStringToken}},
{"key<1", []Token{IdentifierToken, LessThanToken, IdentifierToken, EndOfStringToken}}, {"key<1", []Token{IdentifierToken, LessThanToken, IdentifierToken, EndOfStringToken}},
{"key gt 3", []Token{IdentifierToken, GreaterThanToken, IdentifierToken, EndOfStringToken}}, {"key gt 3", []Token{IdentifierToken, IdentifierToken, IdentifierToken, EndOfStringToken}},
{"key lt 4", []Token{IdentifierToken, LessThanToken, IdentifierToken, EndOfStringToken}}, {"key lt 4", []Token{IdentifierToken, IdentifierToken, IdentifierToken, EndOfStringToken}},
{`key = multi-word-string`, []Token{IdentifierToken, EqualsToken, QuotedStringToken, EndOfStringToken}}, {`key = "multi-word-string"`, []Token{IdentifierToken, EqualsToken, QuotedStringToken, EndOfStringToken}},
} }
for _, v := range testcases { for _, v := range testcases {
p := &Parser{l: &Lexer{s: v.s, pos: 0}, position: 0} p := &Parser{l: &Lexer{s: v.s, pos: 0}, position: 0}
@@ -210,15 +224,16 @@ func TestParserLookahead(t *testing.T) {
if len(p.scannedItems) != len(v.t) { if len(p.scannedItems) != len(v.t) {
t.Errorf("Expected %d items for test %s, found %d", len(v.t), v.s, len(p.scannedItems)) t.Errorf("Expected %d items for test %s, found %d", len(v.t), v.s, len(p.scannedItems))
} }
for { for i, entry := range v.t {
token, lit := p.lookahead(KeyAndOperator) token, _ := p.consume(KeyAndOperator)
token2, lit2 := p.consume(KeyAndOperator)
if token == EndOfStringToken { if token == EndOfStringToken {
if i != len(v.t)-1 {
t.Errorf("Expected end of string token at position %d for test '%s', but length is %d", i, v.s, len(v.t))
}
break break
} }
if token != token2 || lit != lit2 { if token != entry {
t.Errorf("Bad values") t.Errorf("Expected token %v at position %d for test '%s', but got %v", entry, i, v.s, token)
} }
} }
} }