Merge pull request #113707 from lucacome/bump-grpc

Bump grpc to v1.51.0
This commit is contained in:
Kubernetes Prow Robot 2022-12-10 16:43:42 -08:00 committed by GitHub
commit 909f5be802
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
80 changed files with 941 additions and 472 deletions

2
go.mod
View File

@ -86,7 +86,7 @@ require (
golang.org/x/tools v0.2.0
google.golang.org/api v0.60.0
google.golang.org/genproto v0.0.0-20220502173005-c8bf987b8c21
google.golang.org/grpc v1.49.0
google.golang.org/grpc v1.51.0
google.golang.org/protobuf v1.28.1
gopkg.in/gcfg.v1 v1.2.0
gopkg.in/square/go-jose.v2 v2.2.2

3
go.sum
View File

@ -1212,8 +1212,9 @@ google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9K
google.golang.org/grpc v1.41.0/go.mod h1:U3l9uK9J0sini8mHphKoXyaqDA/8VyGnDee1zzIUK6k=
google.golang.org/grpc v1.42.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU=
google.golang.org/grpc v1.46.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk=
google.golang.org/grpc v1.49.0 h1:WTLtQzmQori5FUH25Pq4WT22oCsv8USpQ+F6rqtsmxw=
google.golang.org/grpc v1.49.0/go.mod h1:ZgQEeidpAuNRZ8iRrlBKXZQP1ghovWIVhdJRyCDK+GI=
google.golang.org/grpc v1.51.0 h1:E1eGv1FTqoLIdnBCZufiSHgKjlqG6fKFf6pPWtMTh8U=
google.golang.org/grpc v1.51.0/go.mod h1:wgNDFcnuBGmxLKI/qn4T+m5BtEBYXJPvibbUPsAIPww=
google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw=
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=

View File

@ -20,7 +20,7 @@ require (
go.opentelemetry.io/otel v1.10.0
go.opentelemetry.io/otel/trace v1.10.0
google.golang.org/genproto v0.0.0-20220502173005-c8bf987b8c21
google.golang.org/grpc v1.49.0
google.golang.org/grpc v1.51.0
google.golang.org/protobuf v1.28.1
gopkg.in/yaml.v2 v2.4.0
k8s.io/api v0.0.0

View File

@ -816,8 +816,8 @@ google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9K
google.golang.org/grpc v1.41.0/go.mod h1:U3l9uK9J0sini8mHphKoXyaqDA/8VyGnDee1zzIUK6k=
google.golang.org/grpc v1.42.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU=
google.golang.org/grpc v1.46.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk=
google.golang.org/grpc v1.49.0 h1:WTLtQzmQori5FUH25Pq4WT22oCsv8USpQ+F6rqtsmxw=
google.golang.org/grpc v1.49.0/go.mod h1:ZgQEeidpAuNRZ8iRrlBKXZQP1ghovWIVhdJRyCDK+GI=
google.golang.org/grpc v1.51.0 h1:E1eGv1FTqoLIdnBCZufiSHgKjlqG6fKFf6pPWtMTh8U=
google.golang.org/grpc v1.51.0/go.mod h1:wgNDFcnuBGmxLKI/qn4T+m5BtEBYXJPvibbUPsAIPww=
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=

View File

@ -38,7 +38,7 @@ require (
golang.org/x/sys v0.3.0
golang.org/x/time v0.0.0-20220210224613-90d013bbcef8
google.golang.org/genproto v0.0.0-20220502173005-c8bf987b8c21
google.golang.org/grpc v1.49.0
google.golang.org/grpc v1.51.0
google.golang.org/protobuf v1.28.1
gopkg.in/natefinch/lumberjack.v2 v2.0.0
gopkg.in/square/go-jose.v2 v2.2.2

View File

@ -930,8 +930,8 @@ google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9K
google.golang.org/grpc v1.41.0/go.mod h1:U3l9uK9J0sini8mHphKoXyaqDA/8VyGnDee1zzIUK6k=
google.golang.org/grpc v1.42.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU=
google.golang.org/grpc v1.46.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk=
google.golang.org/grpc v1.49.0 h1:WTLtQzmQori5FUH25Pq4WT22oCsv8USpQ+F6rqtsmxw=
google.golang.org/grpc v1.49.0/go.mod h1:ZgQEeidpAuNRZ8iRrlBKXZQP1ghovWIVhdJRyCDK+GI=
google.golang.org/grpc v1.51.0 h1:E1eGv1FTqoLIdnBCZufiSHgKjlqG6fKFf6pPWtMTh8U=
google.golang.org/grpc v1.51.0/go.mod h1:wgNDFcnuBGmxLKI/qn4T+m5BtEBYXJPvibbUPsAIPww=
google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw=
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=

View File

@ -92,7 +92,7 @@ require (
golang.org/x/time v0.0.0-20220210224613-90d013bbcef8 // indirect
google.golang.org/appengine v1.6.7 // indirect
google.golang.org/genproto v0.0.0-20220502173005-c8bf987b8c21 // indirect
google.golang.org/grpc v1.49.0 // indirect
google.golang.org/grpc v1.51.0 // indirect
google.golang.org/protobuf v1.28.1 // indirect
gopkg.in/inf.v0 v0.9.1 // indirect
gopkg.in/natefinch/lumberjack.v2 v2.0.0 // indirect

View File

@ -673,8 +673,8 @@ google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9K
google.golang.org/grpc v1.41.0/go.mod h1:U3l9uK9J0sini8mHphKoXyaqDA/8VyGnDee1zzIUK6k=
google.golang.org/grpc v1.42.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU=
google.golang.org/grpc v1.46.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk=
google.golang.org/grpc v1.49.0 h1:WTLtQzmQori5FUH25Pq4WT22oCsv8USpQ+F6rqtsmxw=
google.golang.org/grpc v1.49.0/go.mod h1:ZgQEeidpAuNRZ8iRrlBKXZQP1ghovWIVhdJRyCDK+GI=
google.golang.org/grpc v1.51.0 h1:E1eGv1FTqoLIdnBCZufiSHgKjlqG6fKFf6pPWtMTh8U=
google.golang.org/grpc v1.51.0/go.mod h1:wgNDFcnuBGmxLKI/qn4T+m5BtEBYXJPvibbUPsAIPww=
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=

View File

@ -71,7 +71,7 @@ require (
golang.org/x/time v0.0.0-20220210224613-90d013bbcef8 // indirect
google.golang.org/appengine v1.6.7 // indirect
google.golang.org/genproto v0.0.0-20220502173005-c8bf987b8c21 // indirect
google.golang.org/grpc v1.49.0 // indirect
google.golang.org/grpc v1.51.0 // indirect
google.golang.org/protobuf v1.28.1 // indirect
gopkg.in/inf.v0 v0.9.1 // indirect
gopkg.in/yaml.v2 v2.4.0 // indirect

View File

@ -611,8 +611,8 @@ google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAG
google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34=
google.golang.org/grpc v1.42.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU=
google.golang.org/grpc v1.46.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk=
google.golang.org/grpc v1.49.0 h1:WTLtQzmQori5FUH25Pq4WT22oCsv8USpQ+F6rqtsmxw=
google.golang.org/grpc v1.49.0/go.mod h1:ZgQEeidpAuNRZ8iRrlBKXZQP1ghovWIVhdJRyCDK+GI=
google.golang.org/grpc v1.51.0 h1:E1eGv1FTqoLIdnBCZufiSHgKjlqG6fKFf6pPWtMTh8U=
google.golang.org/grpc v1.51.0/go.mod h1:wgNDFcnuBGmxLKI/qn4T+m5BtEBYXJPvibbUPsAIPww=
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=

View File

@ -87,7 +87,7 @@ require (
golang.org/x/time v0.0.0-20220210224613-90d013bbcef8 // indirect
google.golang.org/appengine v1.6.7 // indirect
google.golang.org/genproto v0.0.0-20220502173005-c8bf987b8c21 // indirect
google.golang.org/grpc v1.49.0 // indirect
google.golang.org/grpc v1.51.0 // indirect
google.golang.org/protobuf v1.28.1 // indirect
gopkg.in/inf.v0 v0.9.1 // indirect
gopkg.in/yaml.v2 v2.4.0 // indirect

View File

@ -662,8 +662,8 @@ google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9K
google.golang.org/grpc v1.41.0/go.mod h1:U3l9uK9J0sini8mHphKoXyaqDA/8VyGnDee1zzIUK6k=
google.golang.org/grpc v1.42.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU=
google.golang.org/grpc v1.46.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk=
google.golang.org/grpc v1.49.0 h1:WTLtQzmQori5FUH25Pq4WT22oCsv8USpQ+F6rqtsmxw=
google.golang.org/grpc v1.49.0/go.mod h1:ZgQEeidpAuNRZ8iRrlBKXZQP1ghovWIVhdJRyCDK+GI=
google.golang.org/grpc v1.51.0 h1:E1eGv1FTqoLIdnBCZufiSHgKjlqG6fKFf6pPWtMTh8U=
google.golang.org/grpc v1.51.0/go.mod h1:wgNDFcnuBGmxLKI/qn4T+m5BtEBYXJPvibbUPsAIPww=
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=

View File

@ -7,7 +7,7 @@ go 1.19
require (
github.com/gogo/protobuf v1.3.2
github.com/stretchr/testify v1.8.0
google.golang.org/grpc v1.49.0
google.golang.org/grpc v1.51.0
)
require (

View File

@ -147,8 +147,8 @@ google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8
google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0=
google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
google.golang.org/grpc v1.46.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk=
google.golang.org/grpc v1.49.0 h1:WTLtQzmQori5FUH25Pq4WT22oCsv8USpQ+F6rqtsmxw=
google.golang.org/grpc v1.49.0/go.mod h1:ZgQEeidpAuNRZ8iRrlBKXZQP1ghovWIVhdJRyCDK+GI=
google.golang.org/grpc v1.51.0 h1:E1eGv1FTqoLIdnBCZufiSHgKjlqG6fKFf6pPWtMTh8U=
google.golang.org/grpc v1.51.0/go.mod h1:wgNDFcnuBGmxLKI/qn4T+m5BtEBYXJPvibbUPsAIPww=
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=

View File

@ -8,7 +8,7 @@ require (
github.com/go-logr/logr v1.2.3
github.com/google/go-cmp v0.5.9
github.com/stretchr/testify v1.8.0
google.golang.org/grpc v1.49.0
google.golang.org/grpc v1.51.0
k8s.io/api v0.0.0
k8s.io/apimachinery v0.0.0
k8s.io/client-go v0.0.0

View File

@ -454,8 +454,8 @@ google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM
google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0=
google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
google.golang.org/grpc v1.46.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk=
google.golang.org/grpc v1.49.0 h1:WTLtQzmQori5FUH25Pq4WT22oCsv8USpQ+F6rqtsmxw=
google.golang.org/grpc v1.49.0/go.mod h1:ZgQEeidpAuNRZ8iRrlBKXZQP1ghovWIVhdJRyCDK+GI=
google.golang.org/grpc v1.51.0 h1:E1eGv1FTqoLIdnBCZufiSHgKjlqG6fKFf6pPWtMTh8U=
google.golang.org/grpc v1.51.0/go.mod h1:wgNDFcnuBGmxLKI/qn4T+m5BtEBYXJPvibbUPsAIPww=
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=

View File

@ -6,7 +6,7 @@ go 1.19
require (
github.com/gogo/protobuf v1.3.2
google.golang.org/grpc v1.49.0
google.golang.org/grpc v1.51.0
)
require (

View File

@ -133,8 +133,8 @@ google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8
google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0=
google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
google.golang.org/grpc v1.46.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk=
google.golang.org/grpc v1.49.0 h1:WTLtQzmQori5FUH25Pq4WT22oCsv8USpQ+F6rqtsmxw=
google.golang.org/grpc v1.49.0/go.mod h1:ZgQEeidpAuNRZ8iRrlBKXZQP1ghovWIVhdJRyCDK+GI=
google.golang.org/grpc v1.51.0 h1:E1eGv1FTqoLIdnBCZufiSHgKjlqG6fKFf6pPWtMTh8U=
google.golang.org/grpc v1.51.0/go.mod h1:wgNDFcnuBGmxLKI/qn4T+m5BtEBYXJPvibbUPsAIPww=
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=

View File

@ -94,7 +94,7 @@ require (
golang.org/x/tools v0.2.0 // indirect
google.golang.org/appengine v1.6.7 // indirect
google.golang.org/genproto v0.0.0-20220502173005-c8bf987b8c21 // indirect
google.golang.org/grpc v1.49.0 // indirect
google.golang.org/grpc v1.51.0 // indirect
google.golang.org/protobuf v1.28.1 // indirect
gopkg.in/inf.v0 v0.9.1 // indirect
gopkg.in/natefinch/lumberjack.v2 v2.0.0 // indirect

View File

@ -672,8 +672,8 @@ google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9K
google.golang.org/grpc v1.41.0/go.mod h1:U3l9uK9J0sini8mHphKoXyaqDA/8VyGnDee1zzIUK6k=
google.golang.org/grpc v1.42.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU=
google.golang.org/grpc v1.46.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk=
google.golang.org/grpc v1.49.0 h1:WTLtQzmQori5FUH25Pq4WT22oCsv8USpQ+F6rqtsmxw=
google.golang.org/grpc v1.49.0/go.mod h1:ZgQEeidpAuNRZ8iRrlBKXZQP1ghovWIVhdJRyCDK+GI=
google.golang.org/grpc v1.51.0 h1:E1eGv1FTqoLIdnBCZufiSHgKjlqG6fKFf6pPWtMTh8U=
google.golang.org/grpc v1.51.0/go.mod h1:wgNDFcnuBGmxLKI/qn4T+m5BtEBYXJPvibbUPsAIPww=
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=

View File

@ -7,7 +7,7 @@ go 1.19
require (
github.com/gogo/protobuf v1.3.2
golang.org/x/net v0.3.1-0.20221206200815-1e63c2f08a10
google.golang.org/grpc v1.49.0
google.golang.org/grpc v1.51.0
k8s.io/api v0.0.0
k8s.io/apimachinery v0.0.0
k8s.io/component-base v0.0.0

View File

@ -505,8 +505,8 @@ google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM
google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0=
google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
google.golang.org/grpc v1.46.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk=
google.golang.org/grpc v1.49.0 h1:WTLtQzmQori5FUH25Pq4WT22oCsv8USpQ+F6rqtsmxw=
google.golang.org/grpc v1.49.0/go.mod h1:ZgQEeidpAuNRZ8iRrlBKXZQP1ghovWIVhdJRyCDK+GI=
google.golang.org/grpc v1.51.0 h1:E1eGv1FTqoLIdnBCZufiSHgKjlqG6fKFf6pPWtMTh8U=
google.golang.org/grpc v1.51.0/go.mod h1:wgNDFcnuBGmxLKI/qn4T+m5BtEBYXJPvibbUPsAIPww=
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=

View File

@ -83,7 +83,7 @@ require (
golang.org/x/time v0.0.0-20220210224613-90d013bbcef8 // indirect
google.golang.org/appengine v1.6.7 // indirect
google.golang.org/genproto v0.0.0-20220502173005-c8bf987b8c21 // indirect
google.golang.org/grpc v1.49.0 // indirect
google.golang.org/grpc v1.51.0 // indirect
google.golang.org/protobuf v1.28.1 // indirect
gopkg.in/inf.v0 v0.9.1 // indirect
gopkg.in/warnings.v0 v0.1.1 // indirect

View File

@ -753,8 +753,8 @@ google.golang.org/grpc v1.39.0/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnD
google.golang.org/grpc v1.39.1/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE=
google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34=
google.golang.org/grpc v1.46.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk=
google.golang.org/grpc v1.49.0 h1:WTLtQzmQori5FUH25Pq4WT22oCsv8USpQ+F6rqtsmxw=
google.golang.org/grpc v1.49.0/go.mod h1:ZgQEeidpAuNRZ8iRrlBKXZQP1ghovWIVhdJRyCDK+GI=
google.golang.org/grpc v1.51.0 h1:E1eGv1FTqoLIdnBCZufiSHgKjlqG6fKFf6pPWtMTh8U=
google.golang.org/grpc v1.51.0/go.mod h1:wgNDFcnuBGmxLKI/qn4T+m5BtEBYXJPvibbUPsAIPww=
google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw=
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=

View File

@ -90,7 +90,7 @@ require (
golang.org/x/time v0.0.0-20220210224613-90d013bbcef8 // indirect
google.golang.org/appengine v1.6.7 // indirect
google.golang.org/genproto v0.0.0-20220502173005-c8bf987b8c21 // indirect
google.golang.org/grpc v1.49.0 // indirect
google.golang.org/grpc v1.51.0 // indirect
google.golang.org/protobuf v1.28.1 // indirect
gopkg.in/inf.v0 v0.9.1 // indirect
gopkg.in/natefinch/lumberjack.v2 v2.0.0 // indirect

View File

@ -666,8 +666,8 @@ google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9K
google.golang.org/grpc v1.41.0/go.mod h1:U3l9uK9J0sini8mHphKoXyaqDA/8VyGnDee1zzIUK6k=
google.golang.org/grpc v1.42.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU=
google.golang.org/grpc v1.46.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk=
google.golang.org/grpc v1.49.0 h1:WTLtQzmQori5FUH25Pq4WT22oCsv8USpQ+F6rqtsmxw=
google.golang.org/grpc v1.49.0/go.mod h1:ZgQEeidpAuNRZ8iRrlBKXZQP1ghovWIVhdJRyCDK+GI=
google.golang.org/grpc v1.51.0 h1:E1eGv1FTqoLIdnBCZufiSHgKjlqG6fKFf6pPWtMTh8U=
google.golang.org/grpc v1.51.0/go.mod h1:wgNDFcnuBGmxLKI/qn4T+m5BtEBYXJPvibbUPsAIPww=
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=

View File

@ -88,7 +88,7 @@ require (
golang.org/x/tools v0.2.0 // indirect
google.golang.org/appengine v1.6.7 // indirect
google.golang.org/genproto v0.0.0-20220502173005-c8bf987b8c21 // indirect
google.golang.org/grpc v1.49.0 // indirect
google.golang.org/grpc v1.51.0 // indirect
google.golang.org/protobuf v1.28.1 // indirect
gopkg.in/inf.v0 v0.9.1 // indirect
gopkg.in/natefinch/lumberjack.v2 v2.0.0 // indirect

View File

@ -667,8 +667,8 @@ google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9K
google.golang.org/grpc v1.41.0/go.mod h1:U3l9uK9J0sini8mHphKoXyaqDA/8VyGnDee1zzIUK6k=
google.golang.org/grpc v1.42.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU=
google.golang.org/grpc v1.46.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk=
google.golang.org/grpc v1.49.0 h1:WTLtQzmQori5FUH25Pq4WT22oCsv8USpQ+F6rqtsmxw=
google.golang.org/grpc v1.49.0/go.mod h1:ZgQEeidpAuNRZ8iRrlBKXZQP1ghovWIVhdJRyCDK+GI=
google.golang.org/grpc v1.51.0 h1:E1eGv1FTqoLIdnBCZufiSHgKjlqG6fKFf6pPWtMTh8U=
google.golang.org/grpc v1.51.0/go.mod h1:wgNDFcnuBGmxLKI/qn4T+m5BtEBYXJPvibbUPsAIPww=
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=

View File

@ -19,7 +19,7 @@
// Package attributes defines a generic key/value store used in various gRPC
// components.
//
// Experimental
// # Experimental
//
// Notice: This package is EXPERIMENTAL and may be changed or removed in a
// later release.

View File

@ -48,7 +48,7 @@ type BackoffConfig struct {
// here for more details:
// https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md.
//
// Experimental
// # Experimental
//
// Notice: This type is EXPERIMENTAL and may be changed or removed in a
// later release.

View File

@ -110,6 +110,11 @@ type SubConn interface {
UpdateAddresses([]resolver.Address)
// Connect starts the connecting for this SubConn.
Connect()
// GetOrBuildProducer returns a reference to the existing Producer for this
// ProducerBuilder in this SubConn, or, if one does not currently exist,
// creates a new one and returns it. Returns a close function which must
// be called when the Producer is no longer needed.
GetOrBuildProducer(ProducerBuilder) (p Producer, close func())
}
// NewSubConnOptions contains options to create new SubConn.
@ -244,7 +249,7 @@ type DoneInfo struct {
// ServerLoad is the load received from server. It's usually sent as part of
// trailing metadata.
//
// The only supported type now is *orca_v1.LoadReport.
// The only supported type now is *orca_v3.LoadReport.
ServerLoad interface{}
}
@ -371,3 +376,21 @@ type ClientConnState struct {
// ErrBadResolverState may be returned by UpdateClientConnState to indicate a
// problem with the provided name resolver data.
var ErrBadResolverState = errors.New("bad resolver state")
// A ProducerBuilder is a simple constructor for a Producer. It is used by the
// SubConn to create producers when needed.
type ProducerBuilder interface {
// Build creates a Producer. The first parameter is always a
// grpc.ClientConnInterface (a type to allow creating RPCs/streams on the
// associated SubConn), but is declared as interface{} to avoid a
// dependency cycle. Should also return a close function that will be
// called when all references to the Producer have been given up.
Build(grpcClientConnInterface interface{}) (p Producer, close func())
}
// A Producer is a type shared among potentially many consumers. It is
// associated with a SubConn, and an implementation will typically contain
// other methods to provide additional functionality, e.g. configuration or
// subscription registration.
type Producer interface {
}

View File

@ -157,8 +157,8 @@ func (b *baseBalancer) mergeErrors() error {
// regeneratePicker takes a snapshot of the balancer, and generates a picker
// from it. The picker is
// - errPicker if the balancer is in TransientFailure,
// - built by the pickerBuilder with all READY SubConns otherwise.
// - errPicker if the balancer is in TransientFailure,
// - built by the pickerBuilder with all READY SubConns otherwise.
func (b *baseBalancer) regeneratePicker() {
if b.state == connectivity.TransientFailure {
b.picker = NewErrPicker(b.mergeErrors())

View File

@ -34,10 +34,10 @@ type ConnectivityStateEvaluator struct {
// RecordTransition records state change happening in subConn and based on that
// it evaluates what aggregated state should be.
//
// - If at least one SubConn in Ready, the aggregated state is Ready;
// - Else if at least one SubConn in Connecting, the aggregated state is Connecting;
// - Else if at least one SubConn is Idle, the aggregated state is Idle;
// - Else if at least one SubConn is TransientFailure (or there are no SubConns), the aggregated state is Transient Failure.
// - If at least one SubConn in Ready, the aggregated state is Ready;
// - Else if at least one SubConn in Connecting, the aggregated state is Connecting;
// - Else if at least one SubConn is Idle, the aggregated state is Idle;
// - Else if at least one SubConn is TransientFailure (or there are no SubConns), the aggregated state is Transient Failure.
//
// Shutdown is not considered.
func (cse *ConnectivityStateEvaluator) RecordTransition(oldState, newState connectivity.State) connectivity.State {
@ -55,7 +55,11 @@ func (cse *ConnectivityStateEvaluator) RecordTransition(oldState, newState conne
cse.numIdle += updateVal
}
}
return cse.CurrentState()
}
// CurrentState returns the current aggregate conn state by evaluating the counters
func (cse *ConnectivityStateEvaluator) CurrentState() connectivity.State {
// Evaluate.
if cse.numReady > 0 {
return connectivity.Ready

View File

@ -22,7 +22,7 @@
package roundrobin
import (
"sync"
"sync/atomic"
"google.golang.org/grpc/balancer"
"google.golang.org/grpc/balancer/base"
@ -60,7 +60,7 @@ func (*rrPickerBuilder) Build(info base.PickerBuildInfo) balancer.Picker {
// Start at a random index, as the same RR balancer rebuilds a new
// picker when SubConn states change, and we don't want to apply excess
// load to the first server in the list.
next: grpcrand.Intn(len(scs)),
next: uint32(grpcrand.Intn(len(scs))),
}
}
@ -69,15 +69,13 @@ type rrPicker struct {
// created. The slice is immutable. Each Get() will do a round robin
// selection from it and return the selected SubConn.
subConns []balancer.SubConn
mu sync.Mutex
next int
next uint32
}
func (p *rrPicker) Pick(balancer.PickInfo) (balancer.PickResult, error) {
p.mu.Lock()
sc := p.subConns[p.next]
p.next = (p.next + 1) % len(p.subConns)
p.mu.Unlock()
subConnsLen := uint32(len(p.subConns))
nextIndex := atomic.AddUint32(&p.next, 1)
sc := p.subConns[nextIndex%subConnsLen]
return balancer.PickResult{SubConn: sc}, nil
}

View File

@ -19,17 +19,20 @@
package grpc
import (
"context"
"fmt"
"strings"
"sync"
"google.golang.org/grpc/balancer"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/connectivity"
"google.golang.org/grpc/internal/balancer/gracefulswitch"
"google.golang.org/grpc/internal/buffer"
"google.golang.org/grpc/internal/channelz"
"google.golang.org/grpc/internal/grpcsync"
"google.golang.org/grpc/resolver"
"google.golang.org/grpc/status"
)
// ccBalancerWrapper sits between the ClientConn and the Balancer.
@ -305,7 +308,7 @@ func (ccb *ccBalancerWrapper) NewSubConn(addrs []resolver.Address, opts balancer
channelz.Warningf(logger, ccb.cc.channelzID, "acBalancerWrapper: NewSubConn: failed to newAddrConn: %v", err)
return nil, err
}
acbw := &acBalancerWrapper{ac: ac}
acbw := &acBalancerWrapper{ac: ac, producers: make(map[balancer.ProducerBuilder]*refCountedProducer)}
acbw.ac.mu.Lock()
ac.acbw = acbw
acbw.ac.mu.Unlock()
@ -359,8 +362,9 @@ func (ccb *ccBalancerWrapper) Target() string {
// acBalancerWrapper is a wrapper on top of ac for balancers.
// It implements balancer.SubConn interface.
type acBalancerWrapper struct {
mu sync.Mutex
ac *addrConn
mu sync.Mutex
ac *addrConn
producers map[balancer.ProducerBuilder]*refCountedProducer
}
func (acbw *acBalancerWrapper) UpdateAddresses(addrs []resolver.Address) {
@ -414,3 +418,64 @@ func (acbw *acBalancerWrapper) getAddrConn() *addrConn {
defer acbw.mu.Unlock()
return acbw.ac
}
var errSubConnNotReady = status.Error(codes.Unavailable, "SubConn not currently connected")
// NewStream begins a streaming RPC on the addrConn. If the addrConn is not
// ready, returns errSubConnNotReady.
func (acbw *acBalancerWrapper) NewStream(ctx context.Context, desc *StreamDesc, method string, opts ...CallOption) (ClientStream, error) {
transport := acbw.ac.getReadyTransport()
if transport == nil {
return nil, errSubConnNotReady
}
return newNonRetryClientStream(ctx, desc, method, transport, acbw.ac, opts...)
}
// Invoke performs a unary RPC. If the addrConn is not ready, returns
// errSubConnNotReady.
func (acbw *acBalancerWrapper) Invoke(ctx context.Context, method string, args interface{}, reply interface{}, opts ...CallOption) error {
cs, err := acbw.NewStream(ctx, unaryStreamDesc, method, opts...)
if err != nil {
return err
}
if err := cs.SendMsg(args); err != nil {
return err
}
return cs.RecvMsg(reply)
}
type refCountedProducer struct {
producer balancer.Producer
refs int // number of current refs to the producer
close func() // underlying producer's close function
}
func (acbw *acBalancerWrapper) GetOrBuildProducer(pb balancer.ProducerBuilder) (balancer.Producer, func()) {
acbw.mu.Lock()
defer acbw.mu.Unlock()
// Look up existing producer from this builder.
pData := acbw.producers[pb]
if pData == nil {
// Not found; create a new one and add it to the producers map.
p, close := pb.Build(acbw)
pData = &refCountedProducer{producer: p, close: close}
acbw.producers[pb] = pData
}
// Account for this new reference.
pData.refs++
// Return a cleanup function wrapped in a OnceFunc to remove this reference
// and delete the refCountedProducer from the map if the total reference
// count goes to zero.
unref := func() {
acbw.mu.Lock()
pData.refs--
if pData.refs == 0 {
defer pData.close() // Run outside the acbw mutex
delete(acbw.producers, pb)
}
acbw.mu.Unlock()
}
return pData.producer, grpcsync.OnceFunc(unref)
}

View File

@ -261,6 +261,7 @@ type GrpcLogEntry struct {
// according to the type of the log entry.
//
// Types that are assignable to Payload:
//
// *GrpcLogEntry_ClientHeader
// *GrpcLogEntry_ServerHeader
// *GrpcLogEntry_Message
@ -694,12 +695,12 @@ func (x *Message) GetData() []byte {
// Header keys added by gRPC are omitted. To be more specific,
// implementations will not log the following entries, and this is
// not to be treated as a truncation:
// - entries handled by grpc that are not user visible, such as those
// that begin with 'grpc-' (with exception of grpc-trace-bin)
// or keys like 'lb-token'
// - transport specific entries, including but not limited to:
// ':path', ':authority', 'content-encoding', 'user-agent', 'te', etc
// - entries added for call credentials
// - entries handled by grpc that are not user visible, such as those
// that begin with 'grpc-' (with exception of grpc-trace-bin)
// or keys like 'lb-token'
// - transport specific entries, including but not limited to:
// ':path', ':authority', 'content-encoding', 'user-agent', 'te', etc
// - entries added for call credentials
//
// Implementations must always log grpc-trace-bin if it is present.
// Practically speaking it will only be visible on server side because

View File

@ -23,7 +23,7 @@
// https://github.com/grpc/proposal/blob/master/A14-channelz.md, is provided by
// the `internal/channelz` package.
//
// Experimental
// # Experimental
//
// Notice: All APIs in this package are experimental and may be removed in a
// later release.

View File

@ -503,7 +503,7 @@ type ClientConn struct {
// WaitForStateChange waits until the connectivity.State of ClientConn changes from sourceState or
// ctx expires. A true value is returned in former case and false in latter.
//
// Experimental
// # Experimental
//
// Notice: This API is EXPERIMENTAL and may be changed or removed in a
// later release.
@ -522,7 +522,7 @@ func (cc *ClientConn) WaitForStateChange(ctx context.Context, sourceState connec
// GetState returns the connectivity.State of ClientConn.
//
// Experimental
// # Experimental
//
// Notice: This API is EXPERIMENTAL and may be changed or removed in a later
// release.
@ -534,7 +534,7 @@ func (cc *ClientConn) GetState() connectivity.State {
// the channel is idle. Does not wait for the connection attempts to begin
// before returning.
//
// Experimental
// # Experimental
//
// Notice: This API is EXPERIMENTAL and may be changed or removed in a later
// release.
@ -761,7 +761,7 @@ func (cc *ClientConn) channelzMetric() *channelz.ChannelInternalMetric {
// Target returns the target string of the ClientConn.
//
// Experimental
// # Experimental
//
// Notice: This API is EXPERIMENTAL and may be changed or removed in a
// later release.
@ -831,9 +831,9 @@ func equalAddresses(a, b []resolver.Address) bool {
//
// If ac is Ready, it checks whether current connected address of ac is in the
// new addrs list.
// - If true, it updates ac.addrs and returns true. The ac will keep using
// the existing connection.
// - If false, it does nothing and returns false.
// - If true, it updates ac.addrs and returns true. The ac will keep using
// the existing connection.
// - If false, it does nothing and returns false.
func (ac *addrConn) tryUpdateAddrs(addrs []resolver.Address) bool {
ac.mu.Lock()
defer ac.mu.Unlock()
@ -998,7 +998,7 @@ func (cc *ClientConn) resolveNow(o resolver.ResolveNowOptions) {
// However, if a previously unavailable network becomes available, this may be
// used to trigger an immediate reconnect.
//
// Experimental
// # Experimental
//
// Notice: This API is EXPERIMENTAL and may be changed or removed in a
// later release.
@ -1228,38 +1228,33 @@ func (ac *addrConn) tryAllAddrs(addrs []resolver.Address, connectDeadline time.T
// address was not successfully connected, or updates ac appropriately with the
// new transport.
func (ac *addrConn) createTransport(addr resolver.Address, copts transport.ConnectOptions, connectDeadline time.Time) error {
// TODO: Delete prefaceReceived and move the logic to wait for it into the
// transport.
prefaceReceived := grpcsync.NewEvent()
connClosed := grpcsync.NewEvent()
addr.ServerName = ac.cc.getServerName(addr)
hctx, hcancel := context.WithCancel(ac.ctx)
hcStarted := false // protected by ac.mu
onClose := func() {
onClose := grpcsync.OnceFunc(func() {
ac.mu.Lock()
defer ac.mu.Unlock()
defer connClosed.Fire()
defer hcancel()
if !hcStarted || hctx.Err() != nil {
// We didn't start the health check or set the state to READY, so
// no need to do anything else here.
//
// OR, we have already cancelled the health check context, meaning
// we have already called onClose once for this transport. In this
// case it would be dangerous to clear the transport and update the
// state, since there may be a new transport in this addrConn.
if ac.state == connectivity.Shutdown {
// Already shut down. tearDown() already cleared the transport and
// canceled hctx via ac.ctx, and we expected this connection to be
// closed, so do nothing here.
return
}
hcancel()
if ac.transport == nil {
// We're still connecting to this address, which could error. Do
// not update the connectivity state or resolve; these will happen
// at the end of the tryAllAddrs connection loop in the event of an
// error.
return
}
ac.transport = nil
// Refresh the name resolver
// Refresh the name resolver on any connection loss.
ac.cc.resolveNow(resolver.ResolveNowOptions{})
if ac.state != connectivity.Shutdown {
ac.updateConnectivityState(connectivity.Idle, nil)
}
}
// Always go idle and wait for the LB policy to initiate a new
// connection attempt.
ac.updateConnectivityState(connectivity.Idle, nil)
})
onGoAway := func(r transport.GoAwayReason) {
ac.mu.Lock()
ac.adjustParams(r)
@ -1271,7 +1266,7 @@ func (ac *addrConn) createTransport(addr resolver.Address, copts transport.Conne
defer cancel()
copts.ChannelzParentID = ac.channelzID
newTr, err := transport.NewClientTransport(connectCtx, ac.cc.ctx, addr, copts, func() { prefaceReceived.Fire() }, onGoAway, onClose)
newTr, err := transport.NewClientTransport(connectCtx, ac.cc.ctx, addr, copts, onGoAway, onClose)
if err != nil {
// newTr is either nil, or closed.
hcancel()
@ -1279,60 +1274,34 @@ func (ac *addrConn) createTransport(addr resolver.Address, copts transport.Conne
return err
}
select {
case <-connectCtx.Done():
// We didn't get the preface in time.
ac.mu.Lock()
defer ac.mu.Unlock()
if ac.state == connectivity.Shutdown {
// This can happen if the subConn was removed while in `Connecting`
// state. tearDown() would have set the state to `Shutdown`, but
// would not have closed the transport since ac.transport would not
// have been set at that point.
//
// We run this in a goroutine because newTr.Close() calls onClose()
// inline, which requires locking ac.mu.
//
// The error we pass to Close() is immaterial since there are no open
// streams at this point, so no trailers with error details will be sent
// out. We just need to pass a non-nil error.
newTr.Close(transport.ErrConnClosing)
if connectCtx.Err() == context.DeadlineExceeded {
err := errors.New("failed to receive server preface within timeout")
channelz.Warningf(logger, ac.channelzID, "grpc: addrConn.createTransport failed to connect to %s: %v", addr, err)
return err
}
go newTr.Close(transport.ErrConnClosing)
return nil
case <-prefaceReceived.Done():
// We got the preface - huzzah! things are good.
ac.mu.Lock()
defer ac.mu.Unlock()
if connClosed.HasFired() {
// onClose called first; go idle but do nothing else.
if ac.state != connectivity.Shutdown {
ac.updateConnectivityState(connectivity.Idle, nil)
}
return nil
}
if ac.state == connectivity.Shutdown {
// This can happen if the subConn was removed while in `Connecting`
// state. tearDown() would have set the state to `Shutdown`, but
// would not have closed the transport since ac.transport would not
// been set at that point.
//
// We run this in a goroutine because newTr.Close() calls onClose()
// inline, which requires locking ac.mu.
//
// The error we pass to Close() is immaterial since there are no open
// streams at this point, so no trailers with error details will be sent
// out. We just need to pass a non-nil error.
go newTr.Close(transport.ErrConnClosing)
return nil
}
ac.curAddr = addr
ac.transport = newTr
hcStarted = true
ac.startHealthCheck(hctx) // Will set state to READY if appropriate.
return nil
case <-connClosed.Done():
// The transport has already closed. If we received the preface, too,
// this is not an error.
select {
case <-prefaceReceived.Done():
return nil
default:
return errors.New("connection closed before server preface received")
}
}
if hctx.Err() != nil {
// onClose was already called for this connection, but the connection
// was successfully established first. Consider it a success and set
// the new state to Idle.
ac.updateConnectivityState(connectivity.Idle, nil)
return nil
}
ac.curAddr = addr
ac.transport = newTr
ac.startHealthCheck(hctx) // Will set state to READY if appropriate.
return nil
}
// startHealthCheck starts the health checking stream (RPC) to watch the health
@ -1583,7 +1552,7 @@ func (cc *ClientConn) parseTargetAndFindResolver() (resolver.Builder, error) {
channelz.Infof(logger, cc.channelzID, "dial target %q parse failed: %v", cc.target, err)
} else {
channelz.Infof(logger, cc.channelzID, "parsed dial target is: %+v", parsedTarget)
rb = cc.getResolver(parsedTarget.Scheme)
rb = cc.getResolver(parsedTarget.URL.Scheme)
if rb != nil {
cc.parsedTarget = parsedTarget
return rb, nil
@ -1604,9 +1573,9 @@ func (cc *ClientConn) parseTargetAndFindResolver() (resolver.Builder, error) {
return nil, err
}
channelz.Infof(logger, cc.channelzID, "parsed dial target is: %+v", parsedTarget)
rb = cc.getResolver(parsedTarget.Scheme)
rb = cc.getResolver(parsedTarget.URL.Scheme)
if rb == nil {
return nil, fmt.Errorf("could not get resolver for default scheme: %q", parsedTarget.Scheme)
return nil, fmt.Errorf("could not get resolver for default scheme: %q", parsedTarget.URL.Scheme)
}
cc.parsedTarget = parsedTarget
return rb, nil

View File

@ -36,16 +36,16 @@ import (
// PerRPCCredentials defines the common interface for the credentials which need to
// attach security information to every RPC (e.g., oauth2).
type PerRPCCredentials interface {
// GetRequestMetadata gets the current request metadata, refreshing
// tokens if required. This should be called by the transport layer on
// each request, and the data should be populated in headers or other
// context. If a status code is returned, it will be used as the status
// for the RPC. uri is the URI of the entry point for the request.
// When supported by the underlying implementation, ctx can be used for
// timeout and cancellation. Additionally, RequestInfo data will be
// available via ctx to this call.
// TODO(zhaoq): Define the set of the qualified keys instead of leaving
// it as an arbitrary string.
// GetRequestMetadata gets the current request metadata, refreshing tokens
// if required. This should be called by the transport layer on each
// request, and the data should be populated in headers or other
// context. If a status code is returned, it will be used as the status for
// the RPC (restricted to an allowable set of codes as defined by gRFC
// A54). uri is the URI of the entry point for the request. When supported
// by the underlying implementation, ctx can be used for timeout and
// cancellation. Additionally, RequestInfo data will be available via ctx
// to this call. TODO(zhaoq): Define the set of the qualified keys instead
// of leaving it as an arbitrary string.
GetRequestMetadata(ctx context.Context, uri ...string) (map[string]string, error)
// RequireTransportSecurity indicates whether the credentials requires
// transport security.

View File

@ -195,7 +195,7 @@ func NewServerTLSFromFile(certFile, keyFile string) (TransportCredentials, error
// TLSChannelzSecurityValue defines the struct that TLS protocol should return
// from GetSecurityValue(), containing security info like cipher and certificate used.
//
// Experimental
// # Experimental
//
// Notice: This type is EXPERIMENTAL and may be changed or removed in a
// later release.

View File

@ -29,6 +29,7 @@ import (
"google.golang.org/grpc/credentials/insecure"
"google.golang.org/grpc/internal"
internalbackoff "google.golang.org/grpc/internal/backoff"
"google.golang.org/grpc/internal/binarylog"
"google.golang.org/grpc/internal/transport"
"google.golang.org/grpc/keepalive"
"google.golang.org/grpc/resolver"
@ -36,12 +37,13 @@ import (
)
func init() {
internal.AddExtraDialOptions = func(opt ...DialOption) {
internal.AddGlobalDialOptions = func(opt ...DialOption) {
extraDialOptions = append(extraDialOptions, opt...)
}
internal.ClearExtraDialOptions = func() {
internal.ClearGlobalDialOptions = func() {
extraDialOptions = nil
}
internal.WithBinaryLogger = withBinaryLogger
}
// dialOptions configure a Dial call. dialOptions are set by the DialOption
@ -61,6 +63,7 @@ type dialOptions struct {
timeout time.Duration
scChan <-chan ServiceConfig
authority string
binaryLogger binarylog.Logger
copts transport.ConnectOptions
callOptions []CallOption
channelzParentID *channelz.Identifier
@ -401,6 +404,14 @@ func WithStatsHandler(h stats.Handler) DialOption {
})
}
// withBinaryLogger returns a DialOption that specifies the binary logger for
// this ClientConn.
func withBinaryLogger(bl binarylog.Logger) DialOption {
return newFuncDialOption(func(o *dialOptions) {
o.binaryLogger = bl
})
}
// FailOnNonTempDialError returns a DialOption that specifies if gRPC fails on
// non-temporary dial errors. If f is true, and dialer returns a non-temporary
// error, gRPC will fail the connection to the network address and won't try to

View File

@ -19,7 +19,7 @@
// Package encoding defines the interface for the compressor and codec, and
// functions to register and retrieve compressors and codecs.
//
// Experimental
// # Experimental
//
// Notice: This package is EXPERIMENTAL and may be changed or removed in a
// later release.
@ -28,6 +28,8 @@ package encoding
import (
"io"
"strings"
"google.golang.org/grpc/internal/grpcutil"
)
// Identity specifies the optional encoding for uncompressed streams.
@ -73,6 +75,7 @@ var registeredCompressor = make(map[string]Compressor)
// registered with the same name, the one registered last will take effect.
func RegisterCompressor(c Compressor) {
registeredCompressor[c.Name()] = c
grpcutil.RegisteredCompressorNames = append(grpcutil.RegisteredCompressorNames, c.Name())
}
// GetCompressor returns Compressor for the given compressor name.

View File

@ -19,7 +19,7 @@
// Package gzip implements and registers the gzip compressor
// during the initialization.
//
// Experimental
// # Experimental
//
// Notice: This package is EXPERIMENTAL and may be changed or removed in a
// later release.

View File

@ -242,7 +242,7 @@ func (g *loggerT) V(l int) bool {
// DepthLoggerV2, the below functions will be called with the appropriate stack
// depth set for trivial functions the logger may ignore.
//
// Experimental
// # Experimental
//
// Notice: This type is EXPERIMENTAL and may be changed or removed in a
// later release.

View File

@ -1,3 +1,20 @@
// Copyright 2015 The gRPC Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// The canonical version of this proto can be found at
// https://github.com/grpc/grpc-proto/blob/master/grpc/health/v1/health.proto
// Code generated by protoc-gen-go-grpc. DO NOT EDIT.
// versions:
// - protoc-gen-go-grpc v1.2.0

View File

@ -37,7 +37,7 @@ type Logger interface {
// binLogger is the global binary logger for the binary. One of this should be
// built at init time from the configuration (environment variable or flags).
//
// It is used to get a methodLogger for each individual method.
// It is used to get a MethodLogger for each individual method.
var binLogger Logger
var grpclogLogger = grpclog.Component("binarylog")
@ -56,11 +56,11 @@ func GetLogger() Logger {
return binLogger
}
// GetMethodLogger returns the methodLogger for the given methodName.
// GetMethodLogger returns the MethodLogger for the given methodName.
//
// methodName should be in the format of "/service/method".
//
// Each methodLogger returned by this method is a new instance. This is to
// Each MethodLogger returned by this method is a new instance. This is to
// generate sequence id within the call.
func GetMethodLogger(methodName string) MethodLogger {
if binLogger == nil {
@ -117,7 +117,7 @@ func (l *logger) setDefaultMethodLogger(ml *MethodLoggerConfig) error {
// Set method logger for "service/*".
//
// New methodLogger with same service overrides the old one.
// New MethodLogger with same service overrides the old one.
func (l *logger) setServiceMethodLogger(service string, ml *MethodLoggerConfig) error {
if _, ok := l.config.Services[service]; ok {
return fmt.Errorf("conflicting service rules for service %v found", service)
@ -131,7 +131,7 @@ func (l *logger) setServiceMethodLogger(service string, ml *MethodLoggerConfig)
// Set method logger for "service/method".
//
// New methodLogger with same method overrides the old one.
// New MethodLogger with same method overrides the old one.
func (l *logger) setMethodMethodLogger(method string, ml *MethodLoggerConfig) error {
if _, ok := l.config.Blacklist[method]; ok {
return fmt.Errorf("conflicting blacklist rules for method %v found", method)
@ -161,11 +161,11 @@ func (l *logger) setBlacklist(method string) error {
return nil
}
// getMethodLogger returns the methodLogger for the given methodName.
// getMethodLogger returns the MethodLogger for the given methodName.
//
// methodName should be in the format of "/service/method".
//
// Each methodLogger returned by this method is a new instance. This is to
// Each MethodLogger returned by this method is a new instance. This is to
// generate sequence id within the call.
func (l *logger) GetMethodLogger(methodName string) MethodLogger {
s, m, err := grpcutil.ParseMethod(methodName)
@ -174,16 +174,16 @@ func (l *logger) GetMethodLogger(methodName string) MethodLogger {
return nil
}
if ml, ok := l.config.Methods[s+"/"+m]; ok {
return newMethodLogger(ml.Header, ml.Message)
return NewTruncatingMethodLogger(ml.Header, ml.Message)
}
if _, ok := l.config.Blacklist[s+"/"+m]; ok {
return nil
}
if ml, ok := l.config.Services[s]; ok {
return newMethodLogger(ml.Header, ml.Message)
return NewTruncatingMethodLogger(ml.Header, ml.Message)
}
if l.config.All == nil {
return nil
}
return newMethodLogger(l.config.All.Header, l.config.All.Message)
return NewTruncatingMethodLogger(l.config.All.Header, l.config.All.Message)
}

View File

@ -30,15 +30,15 @@ import (
// to build a new logger and assign it to binarylog.Logger.
//
// Example filter config strings:
// - "" Nothing will be logged
// - "*" All headers and messages will be fully logged.
// - "*{h}" Only headers will be logged.
// - "*{m:256}" Only the first 256 bytes of each message will be logged.
// - "Foo/*" Logs every method in service Foo
// - "Foo/*,-Foo/Bar" Logs every method in service Foo except method /Foo/Bar
// - "Foo/*,Foo/Bar{m:256}" Logs the first 256 bytes of each message in method
// /Foo/Bar, logs all headers and messages in every other method in service
// Foo.
// - "" Nothing will be logged
// - "*" All headers and messages will be fully logged.
// - "*{h}" Only headers will be logged.
// - "*{m:256}" Only the first 256 bytes of each message will be logged.
// - "Foo/*" Logs every method in service Foo
// - "Foo/*,-Foo/Bar" Logs every method in service Foo except method /Foo/Bar
// - "Foo/*,Foo/Bar{m:256}" Logs the first 256 bytes of each message in method
// /Foo/Bar, logs all headers and messages in every other method in service
// Foo.
//
// If two configs exist for one certain method or service, the one specified
// later overrides the previous config.
@ -57,7 +57,7 @@ func NewLoggerFromConfigString(s string) Logger {
return l
}
// fillMethodLoggerWithConfigString parses config, creates methodLogger and adds
// fillMethodLoggerWithConfigString parses config, creates TruncatingMethodLogger and adds
// it to the right map in the logger.
func (l *logger) fillMethodLoggerWithConfigString(config string) error {
// "" is invalid.

View File

@ -52,7 +52,9 @@ type MethodLogger interface {
Log(LogEntryConfig)
}
type methodLogger struct {
// TruncatingMethodLogger is a method logger that truncates headers and messages
// based on configured fields.
type TruncatingMethodLogger struct {
headerMaxLen, messageMaxLen uint64
callID uint64
@ -61,8 +63,9 @@ type methodLogger struct {
sink Sink // TODO(blog): make this plugable.
}
func newMethodLogger(h, m uint64) *methodLogger {
return &methodLogger{
// NewTruncatingMethodLogger returns a new truncating method logger.
func NewTruncatingMethodLogger(h, m uint64) *TruncatingMethodLogger {
return &TruncatingMethodLogger{
headerMaxLen: h,
messageMaxLen: m,
@ -75,8 +78,8 @@ func newMethodLogger(h, m uint64) *methodLogger {
// Build is an internal only method for building the proto message out of the
// input event. It's made public to enable other library to reuse as much logic
// in methodLogger as possible.
func (ml *methodLogger) Build(c LogEntryConfig) *pb.GrpcLogEntry {
// in TruncatingMethodLogger as possible.
func (ml *TruncatingMethodLogger) Build(c LogEntryConfig) *pb.GrpcLogEntry {
m := c.toProto()
timestamp, _ := ptypes.TimestampProto(time.Now())
m.Timestamp = timestamp
@ -95,11 +98,11 @@ func (ml *methodLogger) Build(c LogEntryConfig) *pb.GrpcLogEntry {
}
// Log creates a proto binary log entry, and logs it to the sink.
func (ml *methodLogger) Log(c LogEntryConfig) {
func (ml *TruncatingMethodLogger) Log(c LogEntryConfig) {
ml.sink.Write(ml.Build(c))
}
func (ml *methodLogger) truncateMetadata(mdPb *pb.Metadata) (truncated bool) {
func (ml *TruncatingMethodLogger) truncateMetadata(mdPb *pb.Metadata) (truncated bool) {
if ml.headerMaxLen == maxUInt {
return false
}
@ -129,7 +132,7 @@ func (ml *methodLogger) truncateMetadata(mdPb *pb.Metadata) (truncated bool) {
return truncated
}
func (ml *methodLogger) truncateMessage(msgPb *pb.Message) (truncated bool) {
func (ml *TruncatingMethodLogger) truncateMessage(msgPb *pb.Message) (truncated bool) {
if ml.messageMaxLen == maxUInt {
return false
}

View File

@ -273,10 +273,10 @@ func (c *channel) deleteSelfFromMap() (delete bool) {
// deleteSelfIfReady tries to delete the channel itself from the channelz database.
// The delete process includes two steps:
// 1. delete the channel from the entry relation tree, i.e. delete the channel reference from its
// parent's child list.
// 2. delete the channel from the map, i.e. delete the channel entirely from channelz. Lookup by id
// will return entry not found error.
// 1. delete the channel from the entry relation tree, i.e. delete the channel reference from its
// parent's child list.
// 2. delete the channel from the map, i.e. delete the channel entirely from channelz. Lookup by id
// will return entry not found error.
func (c *channel) deleteSelfIfReady() {
if !c.deleteSelfFromTree() {
return
@ -381,10 +381,10 @@ func (sc *subChannel) deleteSelfFromMap() (delete bool) {
// deleteSelfIfReady tries to delete the subchannel itself from the channelz database.
// The delete process includes two steps:
// 1. delete the subchannel from the entry relation tree, i.e. delete the subchannel reference from
// its parent's child list.
// 2. delete the subchannel from the map, i.e. delete the subchannel entirely from channelz. Lookup
// by id will return entry not found error.
// 1. delete the subchannel from the entry relation tree, i.e. delete the subchannel reference from
// its parent's child list.
// 2. delete the subchannel from the map, i.e. delete the subchannel entirely from channelz. Lookup
// by id will return entry not found error.
func (sc *subChannel) deleteSelfIfReady() {
if !sc.deleteSelfFromTree() {
return

View File

@ -25,11 +25,15 @@ import (
)
const (
prefix = "GRPC_GO_"
txtErrIgnoreStr = prefix + "IGNORE_TXT_ERRORS"
prefix = "GRPC_GO_"
txtErrIgnoreStr = prefix + "IGNORE_TXT_ERRORS"
advertiseCompressorsStr = prefix + "ADVERTISE_COMPRESSORS"
)
var (
// TXTErrIgnore is set if TXT errors should be ignored ("GRPC_GO_IGNORE_TXT_ERRORS" is not "false").
TXTErrIgnore = !strings.EqualFold(os.Getenv(txtErrIgnoreStr), "false")
// AdvertiseCompressors is set if registered compressor should be advertised
// ("GRPC_GO_ADVERTISE_COMPRESSORS" is not "false").
AdvertiseCompressors = !strings.EqualFold(os.Getenv(advertiseCompressorsStr), "false")
)

View File

@ -0,0 +1,36 @@
/*
*
* Copyright 2022 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package envconfig
import "os"
const (
envObservabilityConfig = "GRPC_GCP_OBSERVABILITY_CONFIG"
envObservabilityConfigFile = "GRPC_GCP_OBSERVABILITY_CONFIG_FILE"
)
var (
// ObservabilityConfig is the json configuration for the gcp/observability
// package specified directly in the envObservabilityConfig env var.
ObservabilityConfig = os.Getenv(envObservabilityConfig)
// ObservabilityConfigFile is the json configuration for the
// gcp/observability specified in a file with the location specified in
// envObservabilityConfigFile env var.
ObservabilityConfigFile = os.Getenv(envObservabilityConfigFile)
)

View File

@ -41,6 +41,7 @@ const (
clientSideSecuritySupportEnv = "GRPC_XDS_EXPERIMENTAL_SECURITY_SUPPORT"
aggregateAndDNSSupportEnv = "GRPC_XDS_EXPERIMENTAL_ENABLE_AGGREGATE_AND_LOGICAL_DNS_CLUSTER"
rbacSupportEnv = "GRPC_XDS_EXPERIMENTAL_RBAC"
outlierDetectionSupportEnv = "GRPC_EXPERIMENTAL_ENABLE_OUTLIER_DETECTION"
federationEnv = "GRPC_EXPERIMENTAL_XDS_FEDERATION"
rlsInXDSEnv = "GRPC_EXPERIMENTAL_XDS_RLS_LB"
@ -83,9 +84,9 @@ var (
// "GRPC_XDS_EXPERIMENTAL_RBAC" to "false".
XDSRBAC = !strings.EqualFold(os.Getenv(rbacSupportEnv), "false")
// XDSOutlierDetection indicates whether outlier detection support is
// enabled, which can be enabled by setting the environment variable
// "GRPC_EXPERIMENTAL_ENABLE_OUTLIER_DETECTION" to "true".
XDSOutlierDetection = false
// enabled, which can be disabled by setting the environment variable
// "GRPC_EXPERIMENTAL_ENABLE_OUTLIER_DETECTION" to "false".
XDSOutlierDetection = !strings.EqualFold(os.Getenv(outlierDetectionSupportEnv), "false")
// XDSFederation indicates whether federation support is enabled.
XDSFederation = strings.EqualFold(os.Getenv(federationEnv), "true")

View File

@ -110,7 +110,7 @@ type LoggerV2 interface {
// This is a copy of the DepthLoggerV2 defined in the external grpclog package.
// It is defined here to avoid a circular dependency.
//
// Experimental
// # Experimental
//
// Notice: This type is EXPERIMENTAL and may be changed or removed in a
// later release.

View File

@ -52,6 +52,13 @@ func Intn(n int) int {
return r.Intn(n)
}
// Int31n implements rand.Int31n on the grpcrand global source.
func Int31n(n int32) int32 {
mu.Lock()
defer mu.Unlock()
return r.Int31n(n)
}
// Float64 implements rand.Float64 on the grpcrand global source.
func Float64() float64 {
mu.Lock()

View File

@ -0,0 +1,32 @@
/*
*
* Copyright 2022 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package grpcsync
import (
"sync"
)
// OnceFunc returns a function wrapping f which ensures f is only executed
// once even if the returned function is executed multiple times.
func OnceFunc(f func()) func() {
var once sync.Once
return func() {
once.Do(f)
}
}

View File

@ -0,0 +1,47 @@
/*
*
* Copyright 2022 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package grpcutil
import (
"strings"
"google.golang.org/grpc/internal/envconfig"
)
// RegisteredCompressorNames holds names of the registered compressors.
var RegisteredCompressorNames []string
// IsCompressorNameRegistered returns true when name is available in registry.
func IsCompressorNameRegistered(name string) bool {
for _, compressor := range RegisteredCompressorNames {
if compressor == name {
return true
}
}
return false
}
// RegisteredCompressors returns a string of registered compressor names
// separated by comma.
func RegisteredCompressors() string {
if !envconfig.AdvertiseCompressors {
return ""
}
return strings.Join(RegisteredCompressorNames, ",")
}

View File

@ -25,7 +25,6 @@ import (
// ParseMethod splits service and method from the input. It expects format
// "/service/method".
//
func ParseMethod(methodName string) (service, method string, _ error) {
if !strings.HasPrefix(methodName, "/") {
return "", "", errors.New("invalid method name: should start with /")

View File

@ -63,20 +63,30 @@ var (
// xDS-enabled server invokes this method on a grpc.Server when a particular
// listener moves to "not-serving" mode.
DrainServerTransports interface{} // func(*grpc.Server, string)
// AddExtraServerOptions adds an array of ServerOption that will be
// AddGlobalServerOptions adds an array of ServerOption that will be
// effective globally for newly created servers. The priority will be: 1.
// user-provided; 2. this method; 3. default values.
AddExtraServerOptions interface{} // func(opt ...ServerOption)
// ClearExtraServerOptions clears the array of extra ServerOption. This
AddGlobalServerOptions interface{} // func(opt ...ServerOption)
// ClearGlobalServerOptions clears the array of extra ServerOption. This
// method is useful in testing and benchmarking.
ClearExtraServerOptions func()
// AddExtraDialOptions adds an array of DialOption that will be effective
ClearGlobalServerOptions func()
// AddGlobalDialOptions adds an array of DialOption that will be effective
// globally for newly created client channels. The priority will be: 1.
// user-provided; 2. this method; 3. default values.
AddExtraDialOptions interface{} // func(opt ...DialOption)
// ClearExtraDialOptions clears the array of extra DialOption. This
AddGlobalDialOptions interface{} // func(opt ...DialOption)
// ClearGlobalDialOptions clears the array of extra DialOption. This
// method is useful in testing and benchmarking.
ClearExtraDialOptions func()
ClearGlobalDialOptions func()
// JoinServerOptions combines the server options passed as arguments into a
// single server option.
JoinServerOptions interface{} // func(...grpc.ServerOption) grpc.ServerOption
// WithBinaryLogger returns a DialOption that specifies the binary logger
// for a ClientConn.
WithBinaryLogger interface{} // func(binarylog.Logger) grpc.DialOption
// BinaryLogger returns a ServerOption that can set the binary logger for a
// server.
BinaryLogger interface{} // func(binarylog.Logger) grpc.ServerOption
// NewXDSResolverWithConfigForTesting creates a new xds resolver builder using
// the provided xds bootstrap config instead of the global configuration from
@ -117,22 +127,6 @@ var (
//
// TODO: Remove this function once the RBAC env var is removed.
UnregisterRBACHTTPFilterForTesting func()
// RegisterOutlierDetectionBalancerForTesting registers the Outlier
// Detection Balancer for testing purposes, regardless of the Outlier
// Detection environment variable.
//
// TODO: Remove this function once the Outlier Detection env var is removed.
RegisterOutlierDetectionBalancerForTesting func()
// UnregisterOutlierDetectionBalancerForTesting unregisters the Outlier
// Detection Balancer for testing purposes. This is needed because there is
// no way to unregister the Outlier Detection Balancer after registering it
// solely for testing purposes using
// RegisterOutlierDetectionBalancerForTesting().
//
// TODO: Remove this function once the Outlier Detection env var is removed.
UnregisterOutlierDetectionBalancerForTesting func()
)
// HealthChecker defines the signature of the client-side LB channel health checking function.

View File

@ -49,8 +49,9 @@ func (b *builder) Build(target resolver.Target, cc resolver.ClientConn, _ resolv
}
addr := resolver.Address{Addr: endpoint}
if b.scheme == unixAbstractScheme {
// prepend "\x00" to address for unix-abstract
addr.Addr = "\x00" + addr.Addr
// We can not prepend \0 as c++ gRPC does, as in Golang '@' is used to signify we do
// not want trailing \0 in address.
addr.Addr = "@" + addr.Addr
}
cc.UpdateState(resolver.State{Addresses: []resolver.Address{networktype.Set(addr, "unix")}})
return &nopResolver{}, nil

View File

@ -67,10 +67,10 @@ func (bc *BalancerConfig) MarshalJSON() ([]byte, error) {
// ServiceConfig contains a list of loadBalancingConfigs, each with a name and
// config. This method iterates through that list in order, and stops at the
// first policy that is supported.
// - If the config for the first supported policy is invalid, the whole service
// config is invalid.
// - If the list doesn't contain any supported policy, the whole service config
// is invalid.
// - If the config for the first supported policy is invalid, the whole service
// config is invalid.
// - If the list doesn't contain any supported policy, the whole service config
// is invalid.
func (bc *BalancerConfig) UnmarshalJSON(b []byte) error {
var ir intermediateBalancerConfig
err := json.Unmarshal(b, &ir)

View File

@ -164,3 +164,13 @@ func (e *Error) Is(target error) bool {
}
return proto.Equal(e.s.s, tse.s.s)
}
// IsRestrictedControlPlaneCode returns whether the status includes a code
// restricted for control plane usage as defined by gRFC A54.
func IsRestrictedControlPlaneCode(s *Status) bool {
switch s.Code() {
case codes.InvalidArgument, codes.NotFound, codes.AlreadyExists, codes.FailedPrecondition, codes.Aborted, codes.OutOfRange, codes.DataLoss:
return true
}
return false
}

View File

@ -886,9 +886,9 @@ func (l *loopyWriter) processData() (bool, error) {
dataItem := str.itl.peek().(*dataFrame) // Peek at the first data item this stream.
// A data item is represented by a dataFrame, since it later translates into
// multiple HTTP2 data frames.
// Every dataFrame has two buffers; h that keeps grpc-message header and d that is acutal data.
// Every dataFrame has two buffers; h that keeps grpc-message header and d that is actual data.
// As an optimization to keep wire traffic low, data from d is copied to h to make as big as the
// maximum possilbe HTTP2 frame size.
// maximum possible HTTP2 frame size.
if len(dataItem.h) == 0 && len(dataItem.d) == 0 { // Empty data frame
// Client sends out empty data frame with endStream = true

View File

@ -442,10 +442,10 @@ func (ht *serverHandlerTransport) Drain() {
// mapRecvMsgError returns the non-nil err into the appropriate
// error value as expected by callers of *grpc.parser.recvMsg.
// In particular, in can only be:
// * io.EOF
// * io.ErrUnexpectedEOF
// * of type transport.ConnectionError
// * an error from the status package
// - io.EOF
// - io.ErrUnexpectedEOF
// - of type transport.ConnectionError
// - an error from the status package
func mapRecvMsgError(err error) error {
if err == io.EOF || err == io.ErrUnexpectedEOF {
return err

View File

@ -38,8 +38,10 @@ import (
"google.golang.org/grpc/credentials"
"google.golang.org/grpc/internal/channelz"
icredentials "google.golang.org/grpc/internal/credentials"
"google.golang.org/grpc/internal/grpcsync"
"google.golang.org/grpc/internal/grpcutil"
imetadata "google.golang.org/grpc/internal/metadata"
istatus "google.golang.org/grpc/internal/status"
"google.golang.org/grpc/internal/syscall"
"google.golang.org/grpc/internal/transport/networktype"
"google.golang.org/grpc/keepalive"
@ -99,16 +101,13 @@ type http2Client struct {
maxSendHeaderListSize *uint32
bdpEst *bdpEstimator
// onPrefaceReceipt is a callback that client transport calls upon
// receiving server preface to signal that a succefull HTTP2
// connection was established.
onPrefaceReceipt func()
maxConcurrentStreams uint32
streamQuota int64
streamsQuotaAvailable chan struct{}
waitingStreams uint32
nextID uint32
registeredCompressors string
// Do not access controlBuf with mu held.
mu sync.Mutex // guard the following variables
@ -194,7 +193,7 @@ func isTemporary(err error) bool {
// newHTTP2Client constructs a connected ClientTransport to addr based on HTTP2
// and starts to receive messages on it. Non-nil error returns if construction
// fails.
func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts ConnectOptions, onPrefaceReceipt func(), onGoAway func(GoAwayReason), onClose func()) (_ *http2Client, err error) {
func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts ConnectOptions, onGoAway func(GoAwayReason), onClose func()) (_ *http2Client, err error) {
scheme := "http"
ctx, cancel := context.WithCancel(ctx)
defer func() {
@ -216,12 +215,35 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts
}
return nil, connectionErrorf(true, err, "transport: Error while dialing %v", err)
}
// Any further errors will close the underlying connection
defer func(conn net.Conn) {
if err != nil {
conn.Close()
}
}(conn)
// The following defer and goroutine monitor the connectCtx for cancelation
// and deadline. On context expiration, the connection is hard closed and
// this function will naturally fail as a result. Otherwise, the defer
// waits for the goroutine to exit to prevent the context from being
// monitored (and to prevent the connection from ever being closed) after
// returning from this function.
ctxMonitorDone := grpcsync.NewEvent()
newClientCtx, newClientDone := context.WithCancel(connectCtx)
defer func() {
newClientDone() // Awaken the goroutine below if connectCtx hasn't expired.
<-ctxMonitorDone.Done() // Wait for the goroutine below to exit.
}()
go func(conn net.Conn) {
defer ctxMonitorDone.Fire() // Signal this goroutine has exited.
<-newClientCtx.Done() // Block until connectCtx expires or the defer above executes.
if connectCtx.Err() != nil {
// connectCtx expired before exiting the function. Hard close the connection.
conn.Close()
}
}(conn)
kp := opts.KeepaliveParams
// Validate keepalive parameters.
if kp.Time == 0 {
@ -253,15 +275,7 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts
}
}
if transportCreds != nil {
rawConn := conn
// Pull the deadline from the connectCtx, which will be used for
// timeouts in the authentication protocol handshake. Can ignore the
// boolean as the deadline will return the zero value, which will make
// the conn not timeout on I/O operations.
deadline, _ := connectCtx.Deadline()
rawConn.SetDeadline(deadline)
conn, authInfo, err = transportCreds.ClientHandshake(connectCtx, addr.ServerName, rawConn)
rawConn.SetDeadline(time.Time{})
conn, authInfo, err = transportCreds.ClientHandshake(connectCtx, addr.ServerName, conn)
if err != nil {
return nil, connectionErrorf(isTemporary(err), err, "transport: authentication handshake failed: %v", err)
}
@ -299,6 +313,7 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts
ctxDone: ctx.Done(), // Cache Done chan.
cancel: cancel,
userAgent: opts.UserAgent,
registeredCompressors: grpcutil.RegisteredCompressors(),
conn: conn,
remoteAddr: conn.RemoteAddr(),
localAddr: conn.LocalAddr(),
@ -315,17 +330,18 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts
kp: kp,
statsHandlers: opts.StatsHandlers,
initialWindowSize: initialWindowSize,
onPrefaceReceipt: onPrefaceReceipt,
nextID: 1,
maxConcurrentStreams: defaultMaxStreamsClient,
streamQuota: defaultMaxStreamsClient,
streamsQuotaAvailable: make(chan struct{}, 1),
czData: new(channelzData),
onGoAway: onGoAway,
onClose: onClose,
keepaliveEnabled: keepaliveEnabled,
bufferPool: newBufferPool(),
onClose: onClose,
}
// Add peer information to the http2client context.
t.ctx = peer.NewContext(t.ctx, t.getPeer())
if md, ok := addr.Metadata.(*metadata.MD); ok {
t.md = *md
@ -361,21 +377,32 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts
t.kpDormancyCond = sync.NewCond(&t.mu)
go t.keepalive()
}
// Start the reader goroutine for incoming message. Each transport has
// a dedicated goroutine which reads HTTP2 frame from network. Then it
// dispatches the frame to the corresponding stream entity.
go t.reader()
// Start the reader goroutine for incoming messages. Each transport has a
// dedicated goroutine which reads HTTP2 frames from the network. Then it
// dispatches the frame to the corresponding stream entity. When the
// server preface is received, readerErrCh is closed. If an error occurs
// first, an error is pushed to the channel. This must be checked before
// returning from this function.
readerErrCh := make(chan error, 1)
go t.reader(readerErrCh)
defer func() {
if err == nil {
err = <-readerErrCh
}
if err != nil {
t.Close(err)
}
}()
// Send connection preface to server.
n, err := t.conn.Write(clientPreface)
if err != nil {
err = connectionErrorf(true, err, "transport: failed to write client preface: %v", err)
t.Close(err)
return nil, err
}
if n != len(clientPreface) {
err = connectionErrorf(true, nil, "transport: preface mismatch, wrote %d bytes; want %d", n, len(clientPreface))
t.Close(err)
return nil, err
}
var ss []http2.Setting
@ -395,14 +422,12 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts
err = t.framer.fr.WriteSettings(ss...)
if err != nil {
err = connectionErrorf(true, err, "transport: failed to write initial settings frame: %v", err)
t.Close(err)
return nil, err
}
// Adjust the connection flow control window if needed.
if delta := uint32(icwz - defaultWindowSize); delta > 0 {
if err := t.framer.fr.WriteWindowUpdate(0, delta); err != nil {
err = connectionErrorf(true, err, "transport: failed to write window update: %v", err)
t.Close(err)
return nil, err
}
}
@ -469,7 +494,7 @@ func (t *http2Client) newStream(ctx context.Context, callHdr *CallHdr) *Stream {
func (t *http2Client) getPeer() *peer.Peer {
return &peer.Peer{
Addr: t.remoteAddr,
AuthInfo: t.authInfo,
AuthInfo: t.authInfo, // Can be nil
}
}
@ -505,9 +530,22 @@ func (t *http2Client) createHeaderFields(ctx context.Context, callHdr *CallHdr)
headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-previous-rpc-attempts", Value: strconv.Itoa(callHdr.PreviousAttempts)})
}
registeredCompressors := t.registeredCompressors
if callHdr.SendCompress != "" {
headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-encoding", Value: callHdr.SendCompress})
headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-accept-encoding", Value: callHdr.SendCompress})
// Include the outgoing compressor name when compressor is not registered
// via encoding.RegisterCompressor. This is possible when client uses
// WithCompressor dial option.
if !grpcutil.IsCompressorNameRegistered(callHdr.SendCompress) {
if registeredCompressors != "" {
registeredCompressors += ","
}
registeredCompressors += callHdr.SendCompress
}
}
if registeredCompressors != "" {
headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-accept-encoding", Value: registeredCompressors})
}
if dl, ok := ctx.Deadline(); ok {
// Send out timeout regardless its value. The server can detect timeout context by itself.
@ -587,7 +625,11 @@ func (t *http2Client) getTrAuthData(ctx context.Context, audience string) (map[s
for _, c := range t.perRPCCreds {
data, err := c.GetRequestMetadata(ctx, audience)
if err != nil {
if _, ok := status.FromError(err); ok {
if st, ok := status.FromError(err); ok {
// Restrict the code to the list allowed by gRFC A54.
if istatus.IsRestrictedControlPlaneCode(st) {
err = status.Errorf(codes.Internal, "transport: received per-RPC creds error with illegal status: %v", err)
}
return nil, err
}
@ -616,7 +658,14 @@ func (t *http2Client) getCallAuthData(ctx context.Context, audience string, call
}
data, err := callCreds.GetRequestMetadata(ctx, audience)
if err != nil {
return nil, status.Errorf(codes.Internal, "transport: %v", err)
if st, ok := status.FromError(err); ok {
// Restrict the code to the list allowed by gRFC A54.
if istatus.IsRestrictedControlPlaneCode(st) {
err = status.Errorf(codes.Internal, "transport: received per-RPC creds error with illegal status: %v", err)
}
return nil, err
}
return nil, status.Errorf(codes.Internal, "transport: per-RPC creds failed due to error: %v", err)
}
callAuthData = make(map[string]string, len(data))
for k, v := range data {
@ -632,13 +681,13 @@ func (t *http2Client) getCallAuthData(ctx context.Context, audience string, call
// NewStream errors result in transparent retry, as they mean nothing went onto
// the wire. However, there are two notable exceptions:
//
// 1. If the stream headers violate the max header list size allowed by the
// server. It's possible this could succeed on another transport, even if
// it's unlikely, but do not transparently retry.
// 2. If the credentials errored when requesting their headers. In this case,
// it's possible a retry can fix the problem, but indefinitely transparently
// retrying is not appropriate as it is likely the credentials, if they can
// eventually succeed, would need I/O to do so.
// 1. If the stream headers violate the max header list size allowed by the
// server. It's possible this could succeed on another transport, even if
// it's unlikely, but do not transparently retry.
// 2. If the credentials errored when requesting their headers. In this case,
// it's possible a retry can fix the problem, but indefinitely transparently
// retrying is not appropriate as it is likely the credentials, if they can
// eventually succeed, would need I/O to do so.
type NewStreamError struct {
Err error
@ -878,19 +927,15 @@ func (t *http2Client) closeStream(s *Stream, err error, rst bool, rstCode http2.
// Close kicks off the shutdown process of the transport. This should be called
// only once on a transport. Once it is called, the transport should not be
// accessed any more.
//
// This method blocks until the addrConn that initiated this transport is
// re-connected. This happens because t.onClose() begins reconnect logic at the
// addrConn level and blocks until the addrConn is successfully connected.
func (t *http2Client) Close(err error) {
t.mu.Lock()
// Make sure we only Close once.
// Make sure we only close once.
if t.state == closing {
t.mu.Unlock()
return
}
// Call t.onClose before setting the state to closing to prevent the client
// from attempting to create new streams ASAP.
// Call t.onClose ASAP to prevent the client from attempting to create new
// streams.
t.onClose()
t.state = closing
streams := t.activeStreams
@ -1230,18 +1275,29 @@ func (t *http2Client) handleGoAway(f *http2.GoAwayFrame) {
if upperLimit == 0 { // This is the first GoAway Frame.
upperLimit = math.MaxUint32 // Kill all streams after the GoAway ID.
}
t.prevGoAwayID = id
if len(t.activeStreams) == 0 {
t.mu.Unlock()
t.Close(connectionErrorf(true, nil, "received goaway and there are no active streams"))
return
}
streamsToClose := make([]*Stream, 0)
for streamID, stream := range t.activeStreams {
if streamID > id && streamID <= upperLimit {
// The stream was unprocessed by the server.
atomic.StoreUint32(&stream.unprocessed, 1)
t.closeStream(stream, errStreamDrain, false, http2.ErrCodeNo, statusGoAway, nil, false)
if streamID > id && streamID <= upperLimit {
atomic.StoreUint32(&stream.unprocessed, 1)
streamsToClose = append(streamsToClose, stream)
}
}
}
t.prevGoAwayID = id
active := len(t.activeStreams)
t.mu.Unlock()
if active == 0 {
t.Close(connectionErrorf(true, nil, "received goaway and there are no active streams"))
// Called outside t.mu because closeStream can take controlBuf's mu, which
// could induce deadlock and is not allowed.
for _, stream := range streamsToClose {
t.closeStream(stream, errStreamDrain, false, http2.ErrCodeNo, statusGoAway, nil, false)
}
}
@ -1469,33 +1525,35 @@ func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) {
t.closeStream(s, io.EOF, rst, http2.ErrCodeNo, statusGen, mdata, true)
}
// reader runs as a separate goroutine in charge of reading data from network
// connection.
//
// TODO(zhaoq): currently one reader per transport. Investigate whether this is
// optimal.
// TODO(zhaoq): Check the validity of the incoming frame sequence.
func (t *http2Client) reader() {
defer close(t.readerDone)
// Check the validity of server preface.
// readServerPreface reads and handles the initial settings frame from the
// server.
func (t *http2Client) readServerPreface() error {
frame, err := t.framer.fr.ReadFrame()
if err != nil {
err = connectionErrorf(true, err, "error reading server preface: %v", err)
t.Close(err) // this kicks off resetTransport, so must be last before return
return
}
t.conn.SetReadDeadline(time.Time{}) // reset deadline once we get the settings frame (we didn't time out, yay!)
if t.keepaliveEnabled {
atomic.StoreInt64(&t.lastRead, time.Now().UnixNano())
return connectionErrorf(true, err, "error reading server preface: %v", err)
}
sf, ok := frame.(*http2.SettingsFrame)
if !ok {
// this kicks off resetTransport, so must be last before return
t.Close(connectionErrorf(true, nil, "initial http2 frame from server is not a settings frame: %T", frame))
return connectionErrorf(true, nil, "initial http2 frame from server is not a settings frame: %T", frame)
}
t.handleSettings(sf, true)
return nil
}
// reader verifies the server preface and reads all subsequent data from
// network connection. If the server preface is not read successfully, an
// error is pushed to errCh; otherwise errCh is closed with no error.
func (t *http2Client) reader(errCh chan<- error) {
defer close(t.readerDone)
if err := t.readServerPreface(); err != nil {
errCh <- err
return
}
t.onPrefaceReceipt()
t.handleSettings(sf, true)
close(errCh)
if t.keepaliveEnabled {
atomic.StoreInt64(&t.lastRead, time.Now().UnixNano())
}
// loop to keep reading incoming messages on this transport.
for {

View File

@ -265,6 +265,9 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport,
czData: new(channelzData),
bufferPool: newBufferPool(),
}
// Add peer information to the http2server context.
t.ctx = peer.NewContext(t.ctx, t.getPeer())
t.controlBuf = newControlBuffer(t.done)
if dynamicWindow {
t.bdpEst = &bdpEstimator{
@ -485,14 +488,7 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(
} else {
s.ctx, s.cancel = context.WithCancel(t.ctx)
}
pr := &peer.Peer{
Addr: t.remoteAddr,
}
// Attach Auth info if there is any.
if t.authInfo != nil {
pr.AuthInfo = t.authInfo
}
s.ctx = peer.NewContext(s.ctx, pr)
// Attach the received metadata to the context.
if len(mdata) > 0 {
s.ctx = metadata.NewIncomingContext(s.ctx, mdata)
@ -1416,6 +1412,13 @@ func (t *http2Server) getOutFlowWindow() int64 {
}
}
func (t *http2Server) getPeer() *peer.Peer {
return &peer.Peer{
Addr: t.remoteAddr,
AuthInfo: t.authInfo, // Can be nil
}
}
func getJitter(v time.Duration) time.Duration {
if v == infinity {
return 0

View File

@ -20,7 +20,6 @@ package transport
import (
"bufio"
"bytes"
"encoding/base64"
"fmt"
"io"
@ -45,7 +44,7 @@ import (
const (
// http2MaxFrameLen specifies the max length of a HTTP2 frame.
http2MaxFrameLen = 16384 // 16KB frame
// http://http2.github.io/http2-spec/#SettingValues
// https://httpwg.org/specs/rfc7540.html#SettingValues
http2InitHeaderTableSize = 4096
)
@ -251,13 +250,13 @@ func encodeGrpcMessage(msg string) string {
}
func encodeGrpcMessageUnchecked(msg string) string {
var buf bytes.Buffer
var sb strings.Builder
for len(msg) > 0 {
r, size := utf8.DecodeRuneInString(msg)
for _, b := range []byte(string(r)) {
if size > 1 {
// If size > 1, r is not ascii. Always do percent encoding.
buf.WriteString(fmt.Sprintf("%%%02X", b))
fmt.Fprintf(&sb, "%%%02X", b)
continue
}
@ -266,14 +265,14 @@ func encodeGrpcMessageUnchecked(msg string) string {
//
// fmt.Sprintf("%%%02X", utf8.RuneError) gives "%FFFD".
if b >= spaceByte && b <= tildeByte && b != percentByte {
buf.WriteByte(b)
sb.WriteByte(b)
} else {
buf.WriteString(fmt.Sprintf("%%%02X", b))
fmt.Fprintf(&sb, "%%%02X", b)
}
}
msg = msg[size:]
}
return buf.String()
return sb.String()
}
// decodeGrpcMessage decodes the msg encoded by encodeGrpcMessage.
@ -291,23 +290,23 @@ func decodeGrpcMessage(msg string) string {
}
func decodeGrpcMessageUnchecked(msg string) string {
var buf bytes.Buffer
var sb strings.Builder
lenMsg := len(msg)
for i := 0; i < lenMsg; i++ {
c := msg[i]
if c == percentByte && i+2 < lenMsg {
parsed, err := strconv.ParseUint(msg[i+1:i+3], 16, 8)
if err != nil {
buf.WriteByte(c)
sb.WriteByte(c)
} else {
buf.WriteByte(byte(parsed))
sb.WriteByte(byte(parsed))
i += 2
}
} else {
buf.WriteByte(c)
sb.WriteByte(c)
}
}
return buf.String()
return sb.String()
}
type bufWriter struct {

View File

@ -43,6 +43,10 @@ import (
"google.golang.org/grpc/tap"
)
// ErrNoHeaders is used as a signal that a trailers only response was received,
// and is not a real error.
var ErrNoHeaders = errors.New("stream has no headers")
const logLevel = 2
type bufferPool struct {
@ -366,9 +370,15 @@ func (s *Stream) Header() (metadata.MD, error) {
return s.header.Copy(), nil
}
s.waitOnHeader()
if !s.headerValid {
return nil, s.status.Err()
}
if s.noHeaders {
return nil, ErrNoHeaders
}
return s.header.Copy(), nil
}
@ -573,8 +583,8 @@ type ConnectOptions struct {
// NewClientTransport establishes the transport with the required ConnectOptions
// and returns it to the caller.
func NewClientTransport(connectCtx, ctx context.Context, addr resolver.Address, opts ConnectOptions, onPrefaceReceipt func(), onGoAway func(GoAwayReason), onClose func()) (ClientTransport, error) {
return newHTTP2Client(connectCtx, ctx, addr, opts, onPrefaceReceipt, onGoAway, onClose)
func NewClientTransport(connectCtx, ctx context.Context, addr resolver.Address, opts ConnectOptions, onGoAway func(GoAwayReason), onClose func()) (ClientTransport, error) {
return newHTTP2Client(connectCtx, ctx, addr, opts, onGoAway, onClose)
}
// Options provides additional hints and information for message

View File

@ -41,16 +41,17 @@ type MD map[string][]string
// New creates an MD from a given key-value map.
//
// Only the following ASCII characters are allowed in keys:
// - digits: 0-9
// - uppercase letters: A-Z (normalized to lower)
// - lowercase letters: a-z
// - special characters: -_.
// - digits: 0-9
// - uppercase letters: A-Z (normalized to lower)
// - lowercase letters: a-z
// - special characters: -_.
//
// Uppercase letters are automatically converted to lowercase.
//
// Keys beginning with "grpc-" are reserved for grpc-internal use only and may
// result in errors if set in metadata.
func New(m map[string]string) MD {
md := MD{}
md := make(MD, len(m))
for k, val := range m {
key := strings.ToLower(k)
md[key] = append(md[key], val)
@ -62,10 +63,11 @@ func New(m map[string]string) MD {
// Pairs panics if len(kv) is odd.
//
// Only the following ASCII characters are allowed in keys:
// - digits: 0-9
// - uppercase letters: A-Z (normalized to lower)
// - lowercase letters: a-z
// - special characters: -_.
// - digits: 0-9
// - uppercase letters: A-Z (normalized to lower)
// - lowercase letters: a-z
// - special characters: -_.
//
// Uppercase letters are automatically converted to lowercase.
//
// Keys beginning with "grpc-" are reserved for grpc-internal use only and may
@ -74,7 +76,7 @@ func Pairs(kv ...string) MD {
if len(kv)%2 == 1 {
panic(fmt.Sprintf("metadata: Pairs got the odd number of input pairs for metadata: %d", len(kv)))
}
md := MD{}
md := make(MD, len(kv)/2)
for i := 0; i < len(kv); i += 2 {
key := strings.ToLower(kv[i])
md[key] = append(md[key], kv[i+1])
@ -182,19 +184,51 @@ func FromIncomingContext(ctx context.Context) (MD, bool) {
if !ok {
return nil, false
}
out := MD{}
out := make(MD, len(md))
for k, v := range md {
// We need to manually convert all keys to lower case, because MD is a
// map, and there's no guarantee that the MD attached to the context is
// created using our helper functions.
key := strings.ToLower(k)
s := make([]string, len(v))
copy(s, v)
out[key] = s
out[key] = copyOf(v)
}
return out, true
}
// ValueFromIncomingContext returns the metadata value corresponding to the metadata
// key from the incoming metadata if it exists. Key must be lower-case.
//
// # Experimental
//
// Notice: This API is EXPERIMENTAL and may be changed or removed in a
// later release.
func ValueFromIncomingContext(ctx context.Context, key string) []string {
md, ok := ctx.Value(mdIncomingKey{}).(MD)
if !ok {
return nil
}
if v, ok := md[key]; ok {
return copyOf(v)
}
for k, v := range md {
// We need to manually convert all keys to lower case, because MD is a
// map, and there's no guarantee that the MD attached to the context is
// created using our helper functions.
if strings.ToLower(k) == key {
return copyOf(v)
}
}
return nil
}
// the returned slice must not be modified in place
func copyOf(v []string) []string {
vals := make([]string, len(v))
copy(vals, v)
return vals
}
// FromOutgoingContextRaw returns the un-merged, intermediary contents of rawMD.
//
// Remember to perform strings.ToLower on the keys, for both the returned MD (MD
@ -222,15 +256,18 @@ func FromOutgoingContext(ctx context.Context) (MD, bool) {
return nil, false
}
out := MD{}
mdSize := len(raw.md)
for i := range raw.added {
mdSize += len(raw.added[i]) / 2
}
out := make(MD, mdSize)
for k, v := range raw.md {
// We need to manually convert all keys to lower case, because MD is a
// map, and there's no guarantee that the MD attached to the context is
// created using our helper functions.
key := strings.ToLower(k)
s := make([]string, len(v))
copy(s, v)
out[key] = s
out[key] = copyOf(v)
}
for _, added := range raw.added {
if len(added)%2 == 1 {

View File

@ -26,6 +26,7 @@ import (
"google.golang.org/grpc/balancer"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/internal/channelz"
istatus "google.golang.org/grpc/internal/status"
"google.golang.org/grpc/internal/transport"
"google.golang.org/grpc/status"
)
@ -129,8 +130,12 @@ func (pw *pickerWrapper) pick(ctx context.Context, failfast bool, info balancer.
if err == balancer.ErrNoSubConnAvailable {
continue
}
if _, ok := status.FromError(err); ok {
if st, ok := status.FromError(err); ok {
// Status error: end the RPC unconditionally with this status.
// First restrict the code to the list allowed by gRFC A54.
if istatus.IsRestrictedControlPlaneCode(st) {
err = status.Errorf(codes.Internal, "received picker error with illegal status: %v", err)
}
return nil, nil, dropError{error: err}
}
// For all other errors, wait for ready RPCs should block and other

View File

@ -25,7 +25,7 @@ import (
// PreparedMsg is responsible for creating a Marshalled and Compressed object.
//
// Experimental
// # Experimental
//
// Notice: This type is EXPERIMENTAL and may be changed or removed in a
// later release.

View File

@ -96,7 +96,7 @@ const (
// Address represents a server the client connects to.
//
// Experimental
// # Experimental
//
// Notice: This type is EXPERIMENTAL and may be changed or removed in a
// later release.
@ -236,12 +236,12 @@ type ClientConn interface {
//
// Examples:
//
// - "dns://some_authority/foo.bar"
// Target{Scheme: "dns", Authority: "some_authority", Endpoint: "foo.bar"}
// - "foo.bar"
// Target{Scheme: resolver.GetDefaultScheme(), Endpoint: "foo.bar"}
// - "unknown_scheme://authority/endpoint"
// Target{Scheme: resolver.GetDefaultScheme(), Endpoint: "unknown_scheme://authority/endpoint"}
// - "dns://some_authority/foo.bar"
// Target{Scheme: "dns", Authority: "some_authority", Endpoint: "foo.bar"}
// - "foo.bar"
// Target{Scheme: resolver.GetDefaultScheme(), Endpoint: "foo.bar"}
// - "unknown_scheme://authority/endpoint"
// Target{Scheme: resolver.GetDefaultScheme(), Endpoint: "unknown_scheme://authority/endpoint"}
type Target struct {
// Deprecated: use URL.Scheme instead.
Scheme string

View File

@ -198,7 +198,7 @@ func Header(md *metadata.MD) CallOption {
// HeaderCallOption is a CallOption for collecting response header metadata.
// The metadata field will be populated *after* the RPC completes.
//
// Experimental
// # Experimental
//
// Notice: This type is EXPERIMENTAL and may be changed or removed in a
// later release.
@ -220,7 +220,7 @@ func Trailer(md *metadata.MD) CallOption {
// TrailerCallOption is a CallOption for collecting response trailer metadata.
// The metadata field will be populated *after* the RPC completes.
//
// Experimental
// # Experimental
//
// Notice: This type is EXPERIMENTAL and may be changed or removed in a
// later release.
@ -242,7 +242,7 @@ func Peer(p *peer.Peer) CallOption {
// PeerCallOption is a CallOption for collecting the identity of the remote
// peer. The peer field will be populated *after* the RPC completes.
//
// Experimental
// # Experimental
//
// Notice: This type is EXPERIMENTAL and may be changed or removed in a
// later release.
@ -282,7 +282,7 @@ func FailFast(failFast bool) CallOption {
// FailFastCallOption is a CallOption for indicating whether an RPC should fail
// fast or not.
//
// Experimental
// # Experimental
//
// Notice: This type is EXPERIMENTAL and may be changed or removed in a
// later release.
@ -305,7 +305,7 @@ func MaxCallRecvMsgSize(bytes int) CallOption {
// MaxRecvMsgSizeCallOption is a CallOption that indicates the maximum message
// size in bytes the client can receive.
//
// Experimental
// # Experimental
//
// Notice: This type is EXPERIMENTAL and may be changed or removed in a
// later release.
@ -328,7 +328,7 @@ func MaxCallSendMsgSize(bytes int) CallOption {
// MaxSendMsgSizeCallOption is a CallOption that indicates the maximum message
// size in bytes the client can send.
//
// Experimental
// # Experimental
//
// Notice: This type is EXPERIMENTAL and may be changed or removed in a
// later release.
@ -351,7 +351,7 @@ func PerRPCCredentials(creds credentials.PerRPCCredentials) CallOption {
// PerRPCCredsCallOption is a CallOption that indicates the per-RPC
// credentials to use for the call.
//
// Experimental
// # Experimental
//
// Notice: This type is EXPERIMENTAL and may be changed or removed in a
// later release.
@ -369,7 +369,7 @@ func (o PerRPCCredsCallOption) after(c *callInfo, attempt *csAttempt) {}
// sending the request. If WithCompressor is also set, UseCompressor has
// higher priority.
//
// Experimental
// # Experimental
//
// Notice: This API is EXPERIMENTAL and may be changed or removed in a
// later release.
@ -379,7 +379,7 @@ func UseCompressor(name string) CallOption {
// CompressorCallOption is a CallOption that indicates the compressor to use.
//
// Experimental
// # Experimental
//
// Notice: This type is EXPERIMENTAL and may be changed or removed in a
// later release.
@ -416,7 +416,7 @@ func CallContentSubtype(contentSubtype string) CallOption {
// ContentSubtypeCallOption is a CallOption that indicates the content-subtype
// used for marshaling messages.
//
// Experimental
// # Experimental
//
// Notice: This type is EXPERIMENTAL and may be changed or removed in a
// later release.
@ -444,7 +444,7 @@ func (o ContentSubtypeCallOption) after(c *callInfo, attempt *csAttempt) {}
// This function is provided for advanced users; prefer to use only
// CallContentSubtype to select a registered codec instead.
//
// Experimental
// # Experimental
//
// Notice: This API is EXPERIMENTAL and may be changed or removed in a
// later release.
@ -455,7 +455,7 @@ func ForceCodec(codec encoding.Codec) CallOption {
// ForceCodecCallOption is a CallOption that indicates the codec used for
// marshaling messages.
//
// Experimental
// # Experimental
//
// Notice: This type is EXPERIMENTAL and may be changed or removed in a
// later release.
@ -480,7 +480,7 @@ func CallCustomCodec(codec Codec) CallOption {
// CustomCodecCallOption is a CallOption that indicates the codec used for
// marshaling messages.
//
// Experimental
// # Experimental
//
// Notice: This type is EXPERIMENTAL and may be changed or removed in a
// later release.
@ -497,7 +497,7 @@ func (o CustomCodecCallOption) after(c *callInfo, attempt *csAttempt) {}
// MaxRetryRPCBufferSize returns a CallOption that limits the amount of memory
// used for buffering this RPC's requests for retry purposes.
//
// Experimental
// # Experimental
//
// Notice: This API is EXPERIMENTAL and may be changed or removed in a
// later release.
@ -508,7 +508,7 @@ func MaxRetryRPCBufferSize(bytes int) CallOption {
// MaxRetryRPCBufferSizeCallOption is a CallOption indicating the amount of
// memory to be used for caching this RPC for retry purposes.
//
// Experimental
// # Experimental
//
// Notice: This type is EXPERIMENTAL and may be changed or removed in a
// later release.
@ -548,10 +548,11 @@ type parser struct {
// format. The caller owns the returned msg memory.
//
// If there is an error, possible values are:
// * io.EOF, when no messages remain
// * io.ErrUnexpectedEOF
// * of type transport.ConnectionError
// * an error from the status package
// - io.EOF, when no messages remain
// - io.ErrUnexpectedEOF
// - of type transport.ConnectionError
// - an error from the status package
//
// No other error values or types must be returned, which also means
// that the underlying io.Reader must not return an incompatible
// error.

View File

@ -73,12 +73,14 @@ func init() {
internal.DrainServerTransports = func(srv *Server, addr string) {
srv.drainServerTransports(addr)
}
internal.AddExtraServerOptions = func(opt ...ServerOption) {
extraServerOptions = opt
internal.AddGlobalServerOptions = func(opt ...ServerOption) {
extraServerOptions = append(extraServerOptions, opt...)
}
internal.ClearExtraServerOptions = func() {
internal.ClearGlobalServerOptions = func() {
extraServerOptions = nil
}
internal.BinaryLogger = binaryLogger
internal.JoinServerOptions = newJoinServerOption
}
var statusOK = status.New(codes.OK, "")
@ -155,6 +157,7 @@ type serverOptions struct {
streamInt StreamServerInterceptor
chainUnaryInts []UnaryServerInterceptor
chainStreamInts []StreamServerInterceptor
binaryLogger binarylog.Logger
inTapHandle tap.ServerInHandle
statsHandlers []stats.Handler
maxConcurrentStreams uint32
@ -214,6 +217,22 @@ func newFuncServerOption(f func(*serverOptions)) *funcServerOption {
}
}
// joinServerOption provides a way to combine arbitrary number of server
// options into one.
type joinServerOption struct {
opts []ServerOption
}
func (mdo *joinServerOption) apply(do *serverOptions) {
for _, opt := range mdo.opts {
opt.apply(do)
}
}
func newJoinServerOption(opts ...ServerOption) ServerOption {
return &joinServerOption{opts: opts}
}
// WriteBufferSize determines how much data can be batched before doing a write on the wire.
// The corresponding memory allocation for this buffer will be twice the size to keep syscalls low.
// The default value for this buffer is 32KB.
@ -452,6 +471,14 @@ func StatsHandler(h stats.Handler) ServerOption {
})
}
// binaryLogger returns a ServerOption that can set the binary logger for the
// server.
func binaryLogger(bl binarylog.Logger) ServerOption {
return newFuncServerOption(func(o *serverOptions) {
o.binaryLogger = bl
})
}
// UnknownServiceHandler returns a ServerOption that allows for adding a custom
// unknown service handler. The provided method is a bidi-streaming RPC service
// handler that will be invoked instead of returning the "unimplemented" gRPC
@ -1199,9 +1226,16 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport.
}
}()
}
binlog := binarylog.GetMethodLogger(stream.Method())
if binlog != nil {
var binlogs []binarylog.MethodLogger
if ml := binarylog.GetMethodLogger(stream.Method()); ml != nil {
binlogs = append(binlogs, ml)
}
if s.opts.binaryLogger != nil {
if ml := s.opts.binaryLogger.GetMethodLogger(stream.Method()); ml != nil {
binlogs = append(binlogs, ml)
}
}
if len(binlogs) != 0 {
ctx := stream.Context()
md, _ := metadata.FromIncomingContext(ctx)
logEntry := &binarylog.ClientHeader{
@ -1221,7 +1255,9 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport.
if peer, ok := peer.FromContext(ctx); ok {
logEntry.PeerAddr = peer.Addr
}
binlog.Log(logEntry)
for _, binlog := range binlogs {
binlog.Log(logEntry)
}
}
// comp and cp are used for compression. decomp and dc are used for
@ -1261,7 +1297,7 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport.
}
var payInfo *payloadInfo
if len(shs) != 0 || binlog != nil {
if len(shs) != 0 || len(binlogs) != 0 {
payInfo = &payloadInfo{}
}
d, err := recvAndDecompress(&parser{r: stream}, stream, dc, s.opts.maxReceiveMessageSize, payInfo, decomp)
@ -1287,10 +1323,13 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport.
Length: len(d),
})
}
if binlog != nil {
binlog.Log(&binarylog.ClientMessage{
if len(binlogs) != 0 {
cm := &binarylog.ClientMessage{
Message: d,
})
}
for _, binlog := range binlogs {
binlog.Log(cm)
}
}
if trInfo != nil {
trInfo.tr.LazyLog(&payload{sent: false, msg: v}, true)
@ -1314,18 +1353,24 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport.
if e := t.WriteStatus(stream, appStatus); e != nil {
channelz.Warningf(logger, s.channelzID, "grpc: Server.processUnaryRPC failed to write status: %v", e)
}
if binlog != nil {
if len(binlogs) != 0 {
if h, _ := stream.Header(); h.Len() > 0 {
// Only log serverHeader if there was header. Otherwise it can
// be trailer only.
binlog.Log(&binarylog.ServerHeader{
sh := &binarylog.ServerHeader{
Header: h,
})
}
for _, binlog := range binlogs {
binlog.Log(sh)
}
}
binlog.Log(&binarylog.ServerTrailer{
st := &binarylog.ServerTrailer{
Trailer: stream.Trailer(),
Err: appErr,
})
}
for _, binlog := range binlogs {
binlog.Log(st)
}
}
return appErr
}
@ -1351,26 +1396,34 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport.
panic(fmt.Sprintf("grpc: Unexpected error (%T) from sendResponse: %v", st, st))
}
}
if binlog != nil {
if len(binlogs) != 0 {
h, _ := stream.Header()
binlog.Log(&binarylog.ServerHeader{
sh := &binarylog.ServerHeader{
Header: h,
})
binlog.Log(&binarylog.ServerTrailer{
}
st := &binarylog.ServerTrailer{
Trailer: stream.Trailer(),
Err: appErr,
})
}
for _, binlog := range binlogs {
binlog.Log(sh)
binlog.Log(st)
}
}
return err
}
if binlog != nil {
if len(binlogs) != 0 {
h, _ := stream.Header()
binlog.Log(&binarylog.ServerHeader{
sh := &binarylog.ServerHeader{
Header: h,
})
binlog.Log(&binarylog.ServerMessage{
}
sm := &binarylog.ServerMessage{
Message: reply,
})
}
for _, binlog := range binlogs {
binlog.Log(sh)
binlog.Log(sm)
}
}
if channelz.IsOn() {
t.IncrMsgSent()
@ -1382,11 +1435,14 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport.
// Should the logging be in WriteStatus? Should we ignore the WriteStatus
// error or allow the stats handler to see it?
err = t.WriteStatus(stream, statusOK)
if binlog != nil {
binlog.Log(&binarylog.ServerTrailer{
if len(binlogs) != 0 {
st := &binarylog.ServerTrailer{
Trailer: stream.Trailer(),
Err: appErr,
})
}
for _, binlog := range binlogs {
binlog.Log(st)
}
}
return err
}
@ -1499,8 +1555,15 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp
}()
}
ss.binlog = binarylog.GetMethodLogger(stream.Method())
if ss.binlog != nil {
if ml := binarylog.GetMethodLogger(stream.Method()); ml != nil {
ss.binlogs = append(ss.binlogs, ml)
}
if s.opts.binaryLogger != nil {
if ml := s.opts.binaryLogger.GetMethodLogger(stream.Method()); ml != nil {
ss.binlogs = append(ss.binlogs, ml)
}
}
if len(ss.binlogs) != 0 {
md, _ := metadata.FromIncomingContext(ctx)
logEntry := &binarylog.ClientHeader{
Header: md,
@ -1519,7 +1582,9 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp
if peer, ok := peer.FromContext(ss.Context()); ok {
logEntry.PeerAddr = peer.Addr
}
ss.binlog.Log(logEntry)
for _, binlog := range ss.binlogs {
binlog.Log(logEntry)
}
}
// If dc is set and matches the stream's compression, use it. Otherwise, try
@ -1585,11 +1650,14 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp
ss.mu.Unlock()
}
t.WriteStatus(ss.s, appStatus)
if ss.binlog != nil {
ss.binlog.Log(&binarylog.ServerTrailer{
if len(ss.binlogs) != 0 {
st := &binarylog.ServerTrailer{
Trailer: ss.s.Trailer(),
Err: appErr,
})
}
for _, binlog := range ss.binlogs {
binlog.Log(st)
}
}
// TODO: Should we log an error from WriteStatus here and below?
return appErr
@ -1600,11 +1668,14 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp
ss.mu.Unlock()
}
err = t.WriteStatus(ss.s, statusOK)
if ss.binlog != nil {
ss.binlog.Log(&binarylog.ServerTrailer{
if len(ss.binlogs) != 0 {
st := &binarylog.ServerTrailer{
Trailer: ss.s.Trailer(),
Err: appErr,
})
}
for _, binlog := range ss.binlogs {
binlog.Log(st)
}
}
return err
}

View File

@ -19,7 +19,7 @@
// Package serviceconfig defines types and methods for operating on gRPC
// service configs.
//
// Experimental
// # Experimental
//
// Notice: This package is EXPERIMENTAL and may be changed or removed in a
// later release.

View File

@ -76,14 +76,14 @@ func FromProto(s *spb.Status) *Status {
// FromError returns a Status representation of err.
//
// - If err was produced by this package or implements the method `GRPCStatus()
// *Status`, the appropriate Status is returned.
// - If err was produced by this package or implements the method `GRPCStatus()
// *Status`, the appropriate Status is returned.
//
// - If err is nil, a Status is returned with codes.OK and no message.
// - If err is nil, a Status is returned with codes.OK and no message.
//
// - Otherwise, err is an error not compatible with this package. In this
// case, a Status is returned with codes.Unknown and err's Error() message,
// and ok is false.
// - Otherwise, err is an error not compatible with this package. In this
// case, a Status is returned with codes.Unknown and err's Error() message,
// and ok is false.
func FromError(err error) (s *Status, ok bool) {
if err == nil {
return nil, true

View File

@ -39,6 +39,7 @@ import (
imetadata "google.golang.org/grpc/internal/metadata"
iresolver "google.golang.org/grpc/internal/resolver"
"google.golang.org/grpc/internal/serviceconfig"
istatus "google.golang.org/grpc/internal/status"
"google.golang.org/grpc/internal/transport"
"google.golang.org/grpc/metadata"
"google.golang.org/grpc/peer"
@ -195,6 +196,13 @@ func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, meth
rpcInfo := iresolver.RPCInfo{Context: ctx, Method: method}
rpcConfig, err := cc.safeConfigSelector.SelectConfig(rpcInfo)
if err != nil {
if st, ok := status.FromError(err); ok {
// Restrict the code to the list allowed by gRFC A54.
if istatus.IsRestrictedControlPlaneCode(st) {
err = status.Errorf(codes.Internal, "config selector returned illegal status: %v", err)
}
return nil, err
}
return nil, toRPCErr(err)
}
@ -301,7 +309,14 @@ func newClientStreamWithParams(ctx context.Context, desc *StreamDesc, cc *Client
if !cc.dopts.disableRetry {
cs.retryThrottler = cc.retryThrottler.Load().(*retryThrottler)
}
cs.binlog = binarylog.GetMethodLogger(method)
if ml := binarylog.GetMethodLogger(method); ml != nil {
cs.binlogs = append(cs.binlogs, ml)
}
if cc.dopts.binaryLogger != nil {
if ml := cc.dopts.binaryLogger.GetMethodLogger(method); ml != nil {
cs.binlogs = append(cs.binlogs, ml)
}
}
// Pick the transport to use and create a new stream on the transport.
// Assign cs.attempt upon success.
@ -322,7 +337,7 @@ func newClientStreamWithParams(ctx context.Context, desc *StreamDesc, cc *Client
return nil, err
}
if cs.binlog != nil {
if len(cs.binlogs) != 0 {
md, _ := metadata.FromOutgoingContext(ctx)
logEntry := &binarylog.ClientHeader{
OnClientSide: true,
@ -336,7 +351,9 @@ func newClientStreamWithParams(ctx context.Context, desc *StreamDesc, cc *Client
logEntry.Timeout = 0
}
}
cs.binlog.Log(logEntry)
for _, binlog := range cs.binlogs {
binlog.Log(logEntry)
}
}
if desc != unaryStreamDesc {
@ -480,7 +497,7 @@ type clientStream struct {
retryThrottler *retryThrottler // The throttler active when the RPC began.
binlog binarylog.MethodLogger // Binary logger, can be nil.
binlogs []binarylog.MethodLogger
// serverHeaderBinlogged is a boolean for whether server header has been
// logged. Server header will be logged when the first time one of those
// happens: stream.Header(), stream.Recv().
@ -735,17 +752,25 @@ func (cs *clientStream) withRetry(op func(a *csAttempt) error, onSuccess func())
func (cs *clientStream) Header() (metadata.MD, error) {
var m metadata.MD
noHeader := false
err := cs.withRetry(func(a *csAttempt) error {
var err error
m, err = a.s.Header()
if err == transport.ErrNoHeaders {
noHeader = true
return nil
}
return toRPCErr(err)
}, cs.commitAttemptLocked)
if err != nil {
cs.finish(err)
return nil, err
}
if cs.binlog != nil && !cs.serverHeaderBinlogged {
// Only log if binary log is on and header has not been logged.
if len(cs.binlogs) != 0 && !cs.serverHeaderBinlogged && !noHeader {
// Only log if binary log is on and header has not been logged, and
// there is actually headers to log.
logEntry := &binarylog.ServerHeader{
OnClientSide: true,
Header: m,
@ -754,8 +779,10 @@ func (cs *clientStream) Header() (metadata.MD, error) {
if peer, ok := peer.FromContext(cs.Context()); ok {
logEntry.PeerAddr = peer.Addr
}
cs.binlog.Log(logEntry)
cs.serverHeaderBinlogged = true
for _, binlog := range cs.binlogs {
binlog.Log(logEntry)
}
}
return m, nil
}
@ -829,38 +856,44 @@ func (cs *clientStream) SendMsg(m interface{}) (err error) {
return a.sendMsg(m, hdr, payload, data)
}
err = cs.withRetry(op, func() { cs.bufferForRetryLocked(len(hdr)+len(payload), op) })
if cs.binlog != nil && err == nil {
cs.binlog.Log(&binarylog.ClientMessage{
if len(cs.binlogs) != 0 && err == nil {
cm := &binarylog.ClientMessage{
OnClientSide: true,
Message: data,
})
}
for _, binlog := range cs.binlogs {
binlog.Log(cm)
}
}
return err
}
func (cs *clientStream) RecvMsg(m interface{}) error {
if cs.binlog != nil && !cs.serverHeaderBinlogged {
if len(cs.binlogs) != 0 && !cs.serverHeaderBinlogged {
// Call Header() to binary log header if it's not already logged.
cs.Header()
}
var recvInfo *payloadInfo
if cs.binlog != nil {
if len(cs.binlogs) != 0 {
recvInfo = &payloadInfo{}
}
err := cs.withRetry(func(a *csAttempt) error {
return a.recvMsg(m, recvInfo)
}, cs.commitAttemptLocked)
if cs.binlog != nil && err == nil {
cs.binlog.Log(&binarylog.ServerMessage{
if len(cs.binlogs) != 0 && err == nil {
sm := &binarylog.ServerMessage{
OnClientSide: true,
Message: recvInfo.uncompressedBytes,
})
}
for _, binlog := range cs.binlogs {
binlog.Log(sm)
}
}
if err != nil || !cs.desc.ServerStreams {
// err != nil or non-server-streaming indicates end of stream.
cs.finish(err)
if cs.binlog != nil {
if len(cs.binlogs) != 0 {
// finish will not log Trailer. Log Trailer here.
logEntry := &binarylog.ServerTrailer{
OnClientSide: true,
@ -873,7 +906,9 @@ func (cs *clientStream) RecvMsg(m interface{}) error {
if peer, ok := peer.FromContext(cs.Context()); ok {
logEntry.PeerAddr = peer.Addr
}
cs.binlog.Log(logEntry)
for _, binlog := range cs.binlogs {
binlog.Log(logEntry)
}
}
}
return err
@ -894,10 +929,13 @@ func (cs *clientStream) CloseSend() error {
return nil
}
cs.withRetry(op, func() { cs.bufferForRetryLocked(0, op) })
if cs.binlog != nil {
cs.binlog.Log(&binarylog.ClientHalfClose{
if len(cs.binlogs) != 0 {
chc := &binarylog.ClientHalfClose{
OnClientSide: true,
})
}
for _, binlog := range cs.binlogs {
binlog.Log(chc)
}
}
// We never returned an error here for reasons.
return nil
@ -930,10 +968,13 @@ func (cs *clientStream) finish(err error) {
//
// Only one of cancel or trailer needs to be logged. In the cases where
// users don't call RecvMsg, users must have already canceled the RPC.
if cs.binlog != nil && status.Code(err) == codes.Canceled {
cs.binlog.Log(&binarylog.Cancel{
if len(cs.binlogs) != 0 && status.Code(err) == codes.Canceled {
c := &binarylog.Cancel{
OnClientSide: true,
})
}
for _, binlog := range cs.binlogs {
binlog.Log(c)
}
}
if err == nil {
cs.retryThrottler.successfulRPC()
@ -1005,6 +1046,7 @@ func (a *csAttempt) recvMsg(m interface{}, payInfo *payloadInfo) (err error) {
}
return io.EOF // indicates successful end of stream.
}
return toRPCErr(err)
}
if a.trInfo != nil {
@ -1453,7 +1495,7 @@ type serverStream struct {
statsHandler []stats.Handler
binlog binarylog.MethodLogger
binlogs []binarylog.MethodLogger
// serverHeaderBinlogged indicates whether server header has been logged. It
// will happen when one of the following two happens: stream.SendHeader(),
// stream.Send().
@ -1487,12 +1529,15 @@ func (ss *serverStream) SendHeader(md metadata.MD) error {
}
err = ss.t.WriteHeader(ss.s, md)
if ss.binlog != nil && !ss.serverHeaderBinlogged {
if len(ss.binlogs) != 0 && !ss.serverHeaderBinlogged {
h, _ := ss.s.Header()
ss.binlog.Log(&binarylog.ServerHeader{
sh := &binarylog.ServerHeader{
Header: h,
})
}
ss.serverHeaderBinlogged = true
for _, binlog := range ss.binlogs {
binlog.Log(sh)
}
}
return err
}
@ -1549,17 +1594,23 @@ func (ss *serverStream) SendMsg(m interface{}) (err error) {
if err := ss.t.Write(ss.s, hdr, payload, &transport.Options{Last: false}); err != nil {
return toRPCErr(err)
}
if ss.binlog != nil {
if len(ss.binlogs) != 0 {
if !ss.serverHeaderBinlogged {
h, _ := ss.s.Header()
ss.binlog.Log(&binarylog.ServerHeader{
sh := &binarylog.ServerHeader{
Header: h,
})
}
ss.serverHeaderBinlogged = true
for _, binlog := range ss.binlogs {
binlog.Log(sh)
}
}
ss.binlog.Log(&binarylog.ServerMessage{
sm := &binarylog.ServerMessage{
Message: data,
})
}
for _, binlog := range ss.binlogs {
binlog.Log(sm)
}
}
if len(ss.statsHandler) != 0 {
for _, sh := range ss.statsHandler {
@ -1598,13 +1649,16 @@ func (ss *serverStream) RecvMsg(m interface{}) (err error) {
}
}()
var payInfo *payloadInfo
if len(ss.statsHandler) != 0 || ss.binlog != nil {
if len(ss.statsHandler) != 0 || len(ss.binlogs) != 0 {
payInfo = &payloadInfo{}
}
if err := recv(ss.p, ss.codec, ss.s, ss.dc, m, ss.maxReceiveMessageSize, payInfo, ss.decomp); err != nil {
if err == io.EOF {
if ss.binlog != nil {
ss.binlog.Log(&binarylog.ClientHalfClose{})
if len(ss.binlogs) != 0 {
chc := &binarylog.ClientHalfClose{}
for _, binlog := range ss.binlogs {
binlog.Log(chc)
}
}
return err
}
@ -1625,10 +1679,13 @@ func (ss *serverStream) RecvMsg(m interface{}) (err error) {
})
}
}
if ss.binlog != nil {
ss.binlog.Log(&binarylog.ClientMessage{
if len(ss.binlogs) != 0 {
cm := &binarylog.ClientMessage{
Message: payInfo.uncompressedBytes,
})
}
for _, binlog := range ss.binlogs {
binlog.Log(cm)
}
}
return nil
}

View File

@ -19,7 +19,7 @@
// Package tap defines the function handles which are executed on the transport
// layer of gRPC-Go and related information.
//
// Experimental
// # Experimental
//
// Notice: This API is EXPERIMENTAL and may be changed or removed in a
// later release.

View File

@ -19,4 +19,4 @@
package grpc
// Version is the current grpc version.
const Version = "1.49.0"
const Version = "1.51.0"

View File

@ -67,7 +67,9 @@ elif [[ "$#" -ne 0 ]]; then
fi
# - Ensure all source files contain a copyright message.
not git grep -L "\(Copyright [0-9]\{4,\} gRPC authors\)\|DO NOT EDIT" -- '*.go'
# (Done in two parts because Darwin "git grep" has broken support for compound
# exclusion matches.)
(grep -L "DO NOT EDIT" $(git grep -L "\(Copyright [0-9]\{4,\} gRPC authors\)" -- '*.go') || true) | fail_on_output
# - Make sure all tests in grpc and grpc/test use leakcheck via Teardown.
not grep 'func Test[^(]' *_test.go
@ -81,7 +83,7 @@ not git grep -l 'x/net/context' -- "*.go"
git grep -l '"math/rand"' -- "*.go" 2>&1 | not grep -v '^examples\|^stress\|grpcrand\|^benchmark\|wrr_test'
# - Do not call grpclog directly. Use grpclog.Component instead.
git grep -l 'grpclog.I\|grpclog.W\|grpclog.E\|grpclog.F\|grpclog.V' -- "*.go" | not grep -v '^grpclog/component.go\|^internal/grpctest/tlogger_test.go'
git grep -l -e 'grpclog.I' --or -e 'grpclog.W' --or -e 'grpclog.E' --or -e 'grpclog.F' --or -e 'grpclog.V' -- "*.go" | not grep -v '^grpclog/component.go\|^internal/grpctest/tlogger_test.go'
# - Ensure all ptypes proto packages are renamed when importing.
not git grep "\(import \|^\s*\)\"github.com/golang/protobuf/ptypes/" -- "*.go"

2
vendor/modules.txt vendored
View File

@ -1136,7 +1136,7 @@ google.golang.org/genproto/googleapis/rpc/code
google.golang.org/genproto/googleapis/rpc/errdetails
google.golang.org/genproto/googleapis/rpc/status
google.golang.org/genproto/protobuf/field_mask
# google.golang.org/grpc v1.49.0
# google.golang.org/grpc v1.51.0
## explicit; go 1.17
google.golang.org/grpc
google.golang.org/grpc/attributes