mirror of
https://github.com/kata-containers/kata-containers.git
synced 2025-06-23 14:08:31 +00:00
runtime: upgrade grpc vendor dependency
- remove hard link to v.1.47.0 in go.mod - run go mod tidy, go mod vendor to actually update to v1.58.3 - addresses CVE-2023-44487 Signed-off-by: Manuel Huber <mahuber@microsoft.com>
This commit is contained in:
parent
644af52968
commit
c05b976ebe
@ -138,6 +138,5 @@ replace (
|
||||
github.com/stretchr/testify => github.com/stretchr/testify v1.8.0
|
||||
github.com/uber-go/atomic => go.uber.org/atomic v1.5.1
|
||||
golang.org/x/text => golang.org/x/text v0.7.0
|
||||
google.golang.org/grpc => google.golang.org/grpc v1.47.0
|
||||
gopkg.in/yaml.v3 => gopkg.in/yaml.v3 v3.0.1
|
||||
)
|
||||
|
@ -1,3 +1,4 @@
|
||||
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
|
||||
cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
|
||||
cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU=
|
||||
cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU=
|
||||
@ -51,7 +52,6 @@ github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuy
|
||||
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
|
||||
github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
|
||||
github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho=
|
||||
github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY=
|
||||
github.com/asaskevich/govalidator v0.0.0-20200907205600-7a23bdc65eef/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw=
|
||||
github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 h1:DklsrG3dyBCFEj5IhUbnKptjxatkF07cF2ak3yi77so=
|
||||
github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw=
|
||||
@ -73,10 +73,8 @@ github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5P
|
||||
github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU=
|
||||
github.com/cilium/ebpf v0.9.1 h1:64sn2K3UKw8NbP/blsixRpF3nXuyhz/VjRlRzvlBRu4=
|
||||
github.com/cilium/ebpf v0.9.1/go.mod h1:+OhNOIXx/Fnu1IE8bJz2dzOA+VSfyTfdNUVdlQnxUFY=
|
||||
github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI=
|
||||
github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
|
||||
github.com/cncf/xds/go v0.0.0-20211001041855-01bcc9b48dfe/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
|
||||
github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
|
||||
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
|
||||
github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
|
||||
github.com/container-orchestrated-devices/container-device-interface v0.6.0 h1:aWwcz/Ep0Fd7ZuBjQGjU/jdPloM7ydhMW13h85jZNvk=
|
||||
github.com/container-orchestrated-devices/container-device-interface v0.6.0/go.mod h1:OQlgtJtDrOxSQ1BWODC8OZK1tzi9W69wek+Jy17ndzo=
|
||||
github.com/containerd/cgroups v1.1.0 h1:v8rEWFl6EoqHB+swVNjVoCJE8o3jX7e8nqBGPLaDFBM=
|
||||
@ -121,7 +119,9 @@ github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c h1:+pKlWGMw7gf6bQ
|
||||
github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c/go.mod h1:Uw6UezgYA44ePAFQYUehOuCzmy5zmg/+nl2ZfMWGkpA=
|
||||
github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4=
|
||||
github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=
|
||||
github.com/envoyproxy/go-control-plane v0.10.2-0.20220325020618-49ff273808a1/go.mod h1:KJwIaB5Mv44NWtYuAOFCVOjcI94vtpEz2JU/D2v6IjE=
|
||||
github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
|
||||
github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
|
||||
github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98=
|
||||
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
|
||||
github.com/felixge/httpsnoop v1.0.3 h1:s/nj+GCswXYzN5v2DpNMuMQYe+0DDwt5WVCU6CWBdXk=
|
||||
github.com/felixge/httpsnoop v1.0.3/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U=
|
||||
@ -131,7 +131,6 @@ github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMo
|
||||
github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ=
|
||||
github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA=
|
||||
github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM=
|
||||
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
|
||||
github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU=
|
||||
github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
|
||||
github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
|
||||
@ -224,6 +223,7 @@ github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4er
|
||||
github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
||||
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE=
|
||||
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
||||
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
|
||||
github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
|
||||
github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y=
|
||||
github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
|
||||
@ -290,7 +290,6 @@ github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+
|
||||
github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
|
||||
github.com/gopherjs/gopherjs v1.17.2 h1:fQnZVsXk8uxXIStYb0N4bGk7jeyTalG/wsZjQ25dO0g=
|
||||
github.com/gopherjs/gopherjs v1.17.2/go.mod h1:pRRIvn/QzFLrKfvEz3qUuEhtE/zLCWfreZ6J5gM2i+k=
|
||||
github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw=
|
||||
github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
|
||||
github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I=
|
||||
github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
|
||||
@ -444,7 +443,6 @@ github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1
|
||||
github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
|
||||
github.com/prometheus/procfs v0.8.0 h1:ODq8ZFEaYeCaZOJlZZdJA2AbQR98dSHSM1KW/You5mo=
|
||||
github.com/prometheus/procfs v0.8.0/go.mod h1:z7EfXMXOkbkqb9IINtpCn86r/to3BnA0uaxHdg830/4=
|
||||
github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ=
|
||||
github.com/rogpeppe/go-internal v1.1.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
|
||||
github.com/rogpeppe/go-internal v1.2.2/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
|
||||
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
|
||||
@ -534,7 +532,6 @@ go.opentelemetry.io/otel/sdk v1.19.0/go.mod h1:NedEbbS4w3C6zElbLdPJKOpJQOrGUJ+Gf
|
||||
go.opentelemetry.io/otel/trace v1.0.0/go.mod h1:PXTWqayeFUlJV1YDNhsJYB184+IvAH814St6o6ajzIs=
|
||||
go.opentelemetry.io/otel/trace v1.19.0 h1:DFVQmlVbfVeOuBRrwdtaehRrWiL1JoVs9CPIQ1Dzxpg=
|
||||
go.opentelemetry.io/otel/trace v1.19.0/go.mod h1:mfaSyvGyEJEI0nyV2I4qhNQnbBOUUmYZpYojqMnX2vo=
|
||||
go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI=
|
||||
go.uber.org/goleak v1.1.12 h1:gZAh5/EyT/HQwlpkCy6wTpqfH9H8Lz8zbm3dZh+OyzA=
|
||||
go.uber.org/goleak v1.1.12/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ=
|
||||
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||
@ -559,6 +556,7 @@ golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EH
|
||||
golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU=
|
||||
golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
|
||||
golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
|
||||
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
||||
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
|
||||
golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
||||
golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
||||
@ -580,6 +578,7 @@ golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91
|
||||
golang.org/x/mod v0.13.0 h1:I/DsJXRlw/8l/0c24sM9yb0T4z9liZTduXvdAWYiysY=
|
||||
golang.org/x/mod v0.13.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
|
||||
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
@ -620,6 +619,7 @@ golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su
|
||||
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
|
||||
golang.org/x/net v0.33.0 h1:74SYHlV8BIgHIFC/LrYkOGIwL19eTYXQ5wc6TBuO36I=
|
||||
golang.org/x/net v0.33.0/go.mod h1:HXLR5J+9DxmrqMwG9qjGCxZ+zKXxBru04zlTvWlWuN4=
|
||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
@ -643,6 +643,7 @@ golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJ
|
||||
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.6.0 h1:5BMeUDZ7vkXGfEr1x9B4bRcTH4lpkTkpdh0T/J+qjbQ=
|
||||
golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
|
||||
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
@ -687,7 +688,6 @@ golang.org/x/sys v0.0.0-20200916030750-2334cc1a136f/go.mod h1:h1NjWce9XRLGQEsW7w
|
||||
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210420072515-93ed5bcd2bfe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
@ -711,6 +711,7 @@ golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
|
||||
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
|
||||
golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
||||
golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
||||
@ -779,6 +780,7 @@ google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0M
|
||||
google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE=
|
||||
google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM=
|
||||
google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc=
|
||||
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
|
||||
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||
google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||
google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0=
|
||||
@ -786,6 +788,7 @@ google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCID
|
||||
google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
|
||||
google.golang.org/appengine v1.6.8 h1:IhEN5q69dyKagZPYMSdIjS2HqprW324FRQZJcGqPAsM=
|
||||
google.golang.org/appengine v1.6.8/go.mod h1:1jJ3jBArFh5pcgW8gCtRJnepW8FzD1V44FJffLiz/Ds=
|
||||
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
|
||||
google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
|
||||
google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
|
||||
google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
|
||||
@ -808,7 +811,6 @@ google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfG
|
||||
google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
|
||||
google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
|
||||
google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
|
||||
google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
|
||||
google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U=
|
||||
google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
|
||||
google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA=
|
||||
@ -819,8 +821,21 @@ google.golang.org/genproto v0.0.0-20230913181813-007df8e322eb h1:XFBgcDwm7irdHTb
|
||||
google.golang.org/genproto v0.0.0-20230913181813-007df8e322eb/go.mod h1:yZTlhN0tQnXo3h00fuXNCxJdLdIdnVFVBaRJ5LWBbw4=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20230920204549-e6e6cdab5c13 h1:N3bU/SQDCDyD6R528GJ/PwW9KjYcJA3dgyH+MovAkIM=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20230920204549-e6e6cdab5c13/go.mod h1:KSqppvjFjtoCI+KGd4PELB0qLNxdJHRGqRI09mB6pQA=
|
||||
google.golang.org/grpc v1.47.0 h1:9n77onPX5F3qfFCqjy9dhn8PbNQsIKeVU04J9G7umt8=
|
||||
google.golang.org/grpc v1.47.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk=
|
||||
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
|
||||
google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
|
||||
google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
|
||||
google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
|
||||
google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY=
|
||||
google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
|
||||
google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
|
||||
google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
|
||||
google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60=
|
||||
google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk=
|
||||
google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
|
||||
google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
|
||||
google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc=
|
||||
google.golang.org/grpc v1.58.3 h1:BjnpXut1btbtgN/6sp+brB2Kbm2LjNXnidYujAVbSoQ=
|
||||
google.golang.org/grpc v1.58.3/go.mod h1:tgX3ZQDlNJGU96V6yHh1T/JeoBQ2TXdr43YbYSsCJk0=
|
||||
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
|
||||
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
|
||||
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
|
||||
@ -850,7 +865,6 @@ gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw=
|
||||
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ=
|
||||
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
|
||||
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
|
25
src/runtime/vendor/google.golang.org/grpc/CONTRIBUTING.md
generated
vendored
25
src/runtime/vendor/google.golang.org/grpc/CONTRIBUTING.md
generated
vendored
@ -20,6 +20,15 @@ How to get your contributions merged smoothly and quickly.
|
||||
both author's & review's time is wasted. Create more PRs to address different
|
||||
concerns and everyone will be happy.
|
||||
|
||||
- If you are searching for features to work on, issues labeled [Status: Help
|
||||
Wanted](https://github.com/grpc/grpc-go/issues?q=is%3Aissue+is%3Aopen+sort%3Aupdated-desc+label%3A%22Status%3A+Help+Wanted%22)
|
||||
is a great place to start. These issues are well-documented and usually can be
|
||||
resolved with a single pull request.
|
||||
|
||||
- If you are adding a new file, make sure it has the copyright message template
|
||||
at the top as a comment. You can copy over the message from an existing file
|
||||
and update the year.
|
||||
|
||||
- The grpc package should only depend on standard Go packages and a small number
|
||||
of exceptions. If your contribution introduces new dependencies which are NOT
|
||||
in the [list](https://godoc.org/google.golang.org/grpc?imports), you need a
|
||||
@ -32,14 +41,18 @@ How to get your contributions merged smoothly and quickly.
|
||||
- Provide a good **PR description** as a record of **what** change is being made
|
||||
and **why** it was made. Link to a github issue if it exists.
|
||||
|
||||
- Don't fix code style and formatting unless you are already changing that line
|
||||
to address an issue. PRs with irrelevant changes won't be merged. If you do
|
||||
want to fix formatting or style, do that in a separate PR.
|
||||
- If you want to fix formatting or style, consider whether your changes are an
|
||||
obvious improvement or might be considered a personal preference. If a style
|
||||
change is based on preference, it likely will not be accepted. If it corrects
|
||||
widely agreed-upon anti-patterns, then please do create a PR and explain the
|
||||
benefits of the change.
|
||||
|
||||
- Unless your PR is trivial, you should expect there will be reviewer comments
|
||||
that you'll need to address before merging. We expect you to be reasonably
|
||||
responsive to those comments, otherwise the PR will be closed after 2-3 weeks
|
||||
of inactivity.
|
||||
that you'll need to address before merging. We'll mark it as `Status: Requires
|
||||
Reporter Clarification` if we expect you to respond to these comments in a
|
||||
timely manner. If the PR remains inactive for 6 days, it will be marked as
|
||||
`stale` and automatically close 7 days after that if we don't hear back from
|
||||
you.
|
||||
|
||||
- Maintain **clean commit history** and use **meaningful commit messages**. PRs
|
||||
with messy commit history are difficult to review and won't be merged. Use
|
||||
|
58
src/runtime/vendor/google.golang.org/grpc/README.md
generated
vendored
58
src/runtime/vendor/google.golang.org/grpc/README.md
generated
vendored
@ -14,21 +14,14 @@ RPC framework that puts mobile and HTTP/2 first. For more information see the
|
||||
|
||||
## Installation
|
||||
|
||||
With [Go module][] support (Go 1.11+), simply add the following import
|
||||
Simply add the following import to your code, and then `go [build|run|test]`
|
||||
will automatically fetch the necessary dependencies:
|
||||
|
||||
|
||||
```go
|
||||
import "google.golang.org/grpc"
|
||||
```
|
||||
|
||||
to your code, and then `go [build|run|test]` will automatically fetch the
|
||||
necessary dependencies.
|
||||
|
||||
Otherwise, to install the `grpc-go` package, run the following command:
|
||||
|
||||
```console
|
||||
$ go get -u google.golang.org/grpc
|
||||
```
|
||||
|
||||
> **Note:** If you are trying to access `grpc-go` from **China**, see the
|
||||
> [FAQ](#FAQ) below.
|
||||
|
||||
@ -56,15 +49,6 @@ To build Go code, there are several options:
|
||||
|
||||
- Set up a VPN and access google.golang.org through that.
|
||||
|
||||
- Without Go module support: `git clone` the repo manually:
|
||||
|
||||
```sh
|
||||
git clone https://github.com/grpc/grpc-go.git $GOPATH/src/google.golang.org/grpc
|
||||
```
|
||||
|
||||
You will need to do the same for all of grpc's dependencies in `golang.org`,
|
||||
e.g. `golang.org/x/net`.
|
||||
|
||||
- With Go module support: it is possible to use the `replace` feature of `go
|
||||
mod` to create aliases for golang.org packages. In your project's directory:
|
||||
|
||||
@ -76,33 +60,13 @@ To build Go code, there are several options:
|
||||
```
|
||||
|
||||
Again, this will need to be done for all transitive dependencies hosted on
|
||||
golang.org as well. For details, refer to [golang/go issue #28652](https://github.com/golang/go/issues/28652).
|
||||
golang.org as well. For details, refer to [golang/go issue
|
||||
#28652](https://github.com/golang/go/issues/28652).
|
||||
|
||||
### Compiling error, undefined: grpc.SupportPackageIsVersion
|
||||
|
||||
#### If you are using Go modules:
|
||||
|
||||
Ensure your gRPC-Go version is `require`d at the appropriate version in
|
||||
the same module containing the generated `.pb.go` files. For example,
|
||||
`SupportPackageIsVersion6` needs `v1.27.0`, so in your `go.mod` file:
|
||||
|
||||
```go
|
||||
module <your module name>
|
||||
|
||||
require (
|
||||
google.golang.org/grpc v1.27.0
|
||||
)
|
||||
```
|
||||
|
||||
#### If you are *not* using Go modules:
|
||||
|
||||
Update the `proto` package, gRPC package, and rebuild the `.proto` files:
|
||||
|
||||
```sh
|
||||
go get -u github.com/golang/protobuf/{proto,protoc-gen-go}
|
||||
go get -u google.golang.org/grpc
|
||||
protoc --go_out=plugins=grpc:. *.proto
|
||||
```
|
||||
Please update to the latest version of gRPC-Go using
|
||||
`go get google.golang.org/grpc`.
|
||||
|
||||
### How to turn on logging
|
||||
|
||||
@ -121,9 +85,11 @@ possible reasons, including:
|
||||
1. mis-configured transport credentials, connection failed on handshaking
|
||||
1. bytes disrupted, possibly by a proxy in between
|
||||
1. server shutdown
|
||||
1. Keepalive parameters caused connection shutdown, for example if you have configured
|
||||
your server to terminate connections regularly to [trigger DNS lookups](https://github.com/grpc/grpc-go/issues/3170#issuecomment-552517779).
|
||||
If this is the case, you may want to increase your [MaxConnectionAgeGrace](https://pkg.go.dev/google.golang.org/grpc/keepalive?tab=doc#ServerParameters),
|
||||
1. Keepalive parameters caused connection shutdown, for example if you have
|
||||
configured your server to terminate connections regularly to [trigger DNS
|
||||
lookups](https://github.com/grpc/grpc-go/issues/3170#issuecomment-552517779).
|
||||
If this is the case, you may want to increase your
|
||||
[MaxConnectionAgeGrace](https://pkg.go.dev/google.golang.org/grpc/keepalive?tab=doc#ServerParameters),
|
||||
to allow longer RPC calls to finish.
|
||||
|
||||
It can be tricky to debug this because the error happens on the client side but
|
||||
|
74
src/runtime/vendor/google.golang.org/grpc/attributes/attributes.go
generated
vendored
74
src/runtime/vendor/google.golang.org/grpc/attributes/attributes.go
generated
vendored
@ -19,36 +19,41 @@
|
||||
// Package attributes defines a generic key/value store used in various gRPC
|
||||
// components.
|
||||
//
|
||||
// Experimental
|
||||
// # Experimental
|
||||
//
|
||||
// Notice: This package is EXPERIMENTAL and may be changed or removed in a
|
||||
// later release.
|
||||
package attributes
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// Attributes is an immutable struct for storing and retrieving generic
|
||||
// key/value pairs. Keys must be hashable, and users should define their own
|
||||
// types for keys. Values should not be modified after they are added to an
|
||||
// Attributes or if they were received from one. If values implement 'Equal(o
|
||||
// interface{}) bool', it will be called by (*Attributes).Equal to determine
|
||||
// whether two values with the same key should be considered equal.
|
||||
// any) bool', it will be called by (*Attributes).Equal to determine whether
|
||||
// two values with the same key should be considered equal.
|
||||
type Attributes struct {
|
||||
m map[interface{}]interface{}
|
||||
m map[any]any
|
||||
}
|
||||
|
||||
// New returns a new Attributes containing the key/value pair.
|
||||
func New(key, value interface{}) *Attributes {
|
||||
return &Attributes{m: map[interface{}]interface{}{key: value}}
|
||||
func New(key, value any) *Attributes {
|
||||
return &Attributes{m: map[any]any{key: value}}
|
||||
}
|
||||
|
||||
// WithValue returns a new Attributes containing the previous keys and values
|
||||
// and the new key/value pair. If the same key appears multiple times, the
|
||||
// last value overwrites all previous values for that key. To remove an
|
||||
// existing key, use a nil value. value should not be modified later.
|
||||
func (a *Attributes) WithValue(key, value interface{}) *Attributes {
|
||||
func (a *Attributes) WithValue(key, value any) *Attributes {
|
||||
if a == nil {
|
||||
return New(key, value)
|
||||
}
|
||||
n := &Attributes{m: make(map[interface{}]interface{}, len(a.m)+1)}
|
||||
n := &Attributes{m: make(map[any]any, len(a.m)+1)}
|
||||
for k, v := range a.m {
|
||||
n.m[k] = v
|
||||
}
|
||||
@ -58,20 +63,19 @@ func (a *Attributes) WithValue(key, value interface{}) *Attributes {
|
||||
|
||||
// Value returns the value associated with these attributes for key, or nil if
|
||||
// no value is associated with key. The returned value should not be modified.
|
||||
func (a *Attributes) Value(key interface{}) interface{} {
|
||||
func (a *Attributes) Value(key any) any {
|
||||
if a == nil {
|
||||
return nil
|
||||
}
|
||||
return a.m[key]
|
||||
}
|
||||
|
||||
// Equal returns whether a and o are equivalent. If 'Equal(o interface{})
|
||||
// bool' is implemented for a value in the attributes, it is called to
|
||||
// determine if the value matches the one stored in the other attributes. If
|
||||
// Equal is not implemented, standard equality is used to determine if the two
|
||||
// values are equal. Note that some types (e.g. maps) aren't comparable by
|
||||
// default, so they must be wrapped in a struct, or in an alias type, with Equal
|
||||
// defined.
|
||||
// Equal returns whether a and o are equivalent. If 'Equal(o any) bool' is
|
||||
// implemented for a value in the attributes, it is called to determine if the
|
||||
// value matches the one stored in the other attributes. If Equal is not
|
||||
// implemented, standard equality is used to determine if the two values are
|
||||
// equal. Note that some types (e.g. maps) aren't comparable by default, so
|
||||
// they must be wrapped in a struct, or in an alias type, with Equal defined.
|
||||
func (a *Attributes) Equal(o *Attributes) bool {
|
||||
if a == nil && o == nil {
|
||||
return true
|
||||
@ -88,7 +92,7 @@ func (a *Attributes) Equal(o *Attributes) bool {
|
||||
// o missing element of a
|
||||
return false
|
||||
}
|
||||
if eq, ok := v.(interface{ Equal(o interface{}) bool }); ok {
|
||||
if eq, ok := v.(interface{ Equal(o any) bool }); ok {
|
||||
if !eq.Equal(ov) {
|
||||
return false
|
||||
}
|
||||
@ -99,3 +103,39 @@ func (a *Attributes) Equal(o *Attributes) bool {
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// String prints the attribute map. If any key or values throughout the map
|
||||
// implement fmt.Stringer, it calls that method and appends.
|
||||
func (a *Attributes) String() string {
|
||||
var sb strings.Builder
|
||||
sb.WriteString("{")
|
||||
first := true
|
||||
for k, v := range a.m {
|
||||
if !first {
|
||||
sb.WriteString(", ")
|
||||
}
|
||||
sb.WriteString(fmt.Sprintf("%q: %q ", str(k), str(v)))
|
||||
first = false
|
||||
}
|
||||
sb.WriteString("}")
|
||||
return sb.String()
|
||||
}
|
||||
|
||||
func str(x any) string {
|
||||
if v, ok := x.(fmt.Stringer); ok {
|
||||
return v.String()
|
||||
} else if v, ok := x.(string); ok {
|
||||
return v
|
||||
}
|
||||
return fmt.Sprintf("<%p>", x)
|
||||
}
|
||||
|
||||
// MarshalJSON helps implement the json.Marshaler interface, thereby rendering
|
||||
// the Attributes correctly when printing (via pretty.JSON) structs containing
|
||||
// Attributes as fields.
|
||||
//
|
||||
// Is it impossible to unmarshal attributes from a JSON representation and this
|
||||
// method is meant only for debugging purposes.
|
||||
func (a *Attributes) MarshalJSON() ([]byte, error) {
|
||||
return []byte(a.String()), nil
|
||||
}
|
||||
|
2
src/runtime/vendor/google.golang.org/grpc/backoff.go
generated
vendored
2
src/runtime/vendor/google.golang.org/grpc/backoff.go
generated
vendored
@ -48,7 +48,7 @@ type BackoffConfig struct {
|
||||
// here for more details:
|
||||
// https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md.
|
||||
//
|
||||
// Experimental
|
||||
// # Experimental
|
||||
//
|
||||
// Notice: This type is EXPERIMENTAL and may be changed or removed in a
|
||||
// later release.
|
||||
|
115
src/runtime/vendor/google.golang.org/grpc/balancer/balancer.go
generated
vendored
115
src/runtime/vendor/google.golang.org/grpc/balancer/balancer.go
generated
vendored
@ -105,11 +105,23 @@ type SubConn interface {
|
||||
//
|
||||
// This will trigger a state transition for the SubConn.
|
||||
//
|
||||
// Deprecated: This method is now part of the ClientConn interface and will
|
||||
// eventually be removed from here.
|
||||
// Deprecated: this method will be removed. Create new SubConns for new
|
||||
// addresses instead.
|
||||
UpdateAddresses([]resolver.Address)
|
||||
// Connect starts the connecting for this SubConn.
|
||||
Connect()
|
||||
// GetOrBuildProducer returns a reference to the existing Producer for this
|
||||
// ProducerBuilder in this SubConn, or, if one does not currently exist,
|
||||
// creates a new one and returns it. Returns a close function which must
|
||||
// be called when the Producer is no longer needed.
|
||||
GetOrBuildProducer(ProducerBuilder) (p Producer, close func())
|
||||
// Shutdown shuts down the SubConn gracefully. Any started RPCs will be
|
||||
// allowed to complete. No future calls should be made on the SubConn.
|
||||
// One final state update will be delivered to the StateListener (or
|
||||
// UpdateSubConnState; deprecated) with ConnectivityState of Shutdown to
|
||||
// indicate the shutdown operation. This may be delivered before
|
||||
// in-progress RPCs are complete and the actual connection is closed.
|
||||
Shutdown()
|
||||
}
|
||||
|
||||
// NewSubConnOptions contains options to create new SubConn.
|
||||
@ -124,6 +136,11 @@ type NewSubConnOptions struct {
|
||||
// HealthCheckEnabled indicates whether health check service should be
|
||||
// enabled on this SubConn
|
||||
HealthCheckEnabled bool
|
||||
// StateListener is called when the state of the subconn changes. If nil,
|
||||
// Balancer.UpdateSubConnState will be called instead. Will never be
|
||||
// invoked until after Connect() is called on the SubConn created with
|
||||
// these options.
|
||||
StateListener func(SubConnState)
|
||||
}
|
||||
|
||||
// State contains the balancer's state relevant to the gRPC ClientConn.
|
||||
@ -145,16 +162,24 @@ type ClientConn interface {
|
||||
// NewSubConn is called by balancer to create a new SubConn.
|
||||
// It doesn't block and wait for the connections to be established.
|
||||
// Behaviors of the SubConn can be controlled by options.
|
||||
//
|
||||
// Deprecated: please be aware that in a future version, SubConns will only
|
||||
// support one address per SubConn.
|
||||
NewSubConn([]resolver.Address, NewSubConnOptions) (SubConn, error)
|
||||
// RemoveSubConn removes the SubConn from ClientConn.
|
||||
// The SubConn will be shutdown.
|
||||
//
|
||||
// Deprecated: use SubConn.Shutdown instead.
|
||||
RemoveSubConn(SubConn)
|
||||
// UpdateAddresses updates the addresses used in the passed in SubConn.
|
||||
// gRPC checks if the currently connected address is still in the new list.
|
||||
// If so, the connection will be kept. Else, the connection will be
|
||||
// gracefully closed, and a new connection will be created.
|
||||
//
|
||||
// This will trigger a state transition for the SubConn.
|
||||
// This may trigger a state transition for the SubConn.
|
||||
//
|
||||
// Deprecated: this method will be removed. Create new SubConns for new
|
||||
// addresses instead.
|
||||
UpdateAddresses(SubConn, []resolver.Address)
|
||||
|
||||
// UpdateState notifies gRPC that the balancer's internal state has
|
||||
@ -244,8 +269,8 @@ type DoneInfo struct {
|
||||
// ServerLoad is the load received from server. It's usually sent as part of
|
||||
// trailing metadata.
|
||||
//
|
||||
// The only supported type now is *orca_v1.LoadReport.
|
||||
ServerLoad interface{}
|
||||
// The only supported type now is *orca_v3.LoadReport.
|
||||
ServerLoad any
|
||||
}
|
||||
|
||||
var (
|
||||
@ -274,6 +299,14 @@ type PickResult struct {
|
||||
// type, Done may not be called. May be nil if the balancer does not wish
|
||||
// to be notified when the RPC completes.
|
||||
Done func(DoneInfo)
|
||||
|
||||
// Metadata provides a way for LB policies to inject arbitrary per-call
|
||||
// metadata. Any metadata returned here will be merged with existing
|
||||
// metadata added by the client application.
|
||||
//
|
||||
// LB policies with child policies are responsible for propagating metadata
|
||||
// injected by their children to the ClientConn, as part of Pick().
|
||||
Metadata metadata.MD
|
||||
}
|
||||
|
||||
// TransientFailureError returns e. It exists for backward compatibility and
|
||||
@ -330,9 +363,13 @@ type Balancer interface {
|
||||
ResolverError(error)
|
||||
// UpdateSubConnState is called by gRPC when the state of a SubConn
|
||||
// changes.
|
||||
//
|
||||
// Deprecated: Use NewSubConnOptions.StateListener when creating the
|
||||
// SubConn instead.
|
||||
UpdateSubConnState(SubConn, SubConnState)
|
||||
// Close closes the balancer. The balancer is not required to call
|
||||
// ClientConn.RemoveSubConn for its existing SubConns.
|
||||
// Close closes the balancer. The balancer is not currently required to
|
||||
// call SubConn.Shutdown for its existing SubConns; however, this will be
|
||||
// required in a future release, so it is recommended.
|
||||
Close()
|
||||
}
|
||||
|
||||
@ -372,55 +409,19 @@ type ClientConnState struct {
|
||||
// problem with the provided name resolver data.
|
||||
var ErrBadResolverState = errors.New("bad resolver state")
|
||||
|
||||
// ConnectivityStateEvaluator takes the connectivity states of multiple SubConns
|
||||
// and returns one aggregated connectivity state.
|
||||
//
|
||||
// It's not thread safe.
|
||||
type ConnectivityStateEvaluator struct {
|
||||
numReady uint64 // Number of addrConns in ready state.
|
||||
numConnecting uint64 // Number of addrConns in connecting state.
|
||||
numTransientFailure uint64 // Number of addrConns in transient failure state.
|
||||
numIdle uint64 // Number of addrConns in idle state.
|
||||
// A ProducerBuilder is a simple constructor for a Producer. It is used by the
|
||||
// SubConn to create producers when needed.
|
||||
type ProducerBuilder interface {
|
||||
// Build creates a Producer. The first parameter is always a
|
||||
// grpc.ClientConnInterface (a type to allow creating RPCs/streams on the
|
||||
// associated SubConn), but is declared as `any` to avoid a dependency
|
||||
// cycle. Should also return a close function that will be called when all
|
||||
// references to the Producer have been given up.
|
||||
Build(grpcClientConnInterface any) (p Producer, close func())
|
||||
}
|
||||
|
||||
// RecordTransition records state change happening in subConn and based on that
|
||||
// it evaluates what aggregated state should be.
|
||||
//
|
||||
// - If at least one SubConn in Ready, the aggregated state is Ready;
|
||||
// - Else if at least one SubConn in Connecting, the aggregated state is Connecting;
|
||||
// - Else if at least one SubConn is TransientFailure, the aggregated state is Transient Failure;
|
||||
// - Else if at least one SubConn is Idle, the aggregated state is Idle;
|
||||
// - Else there are no subconns and the aggregated state is Transient Failure
|
||||
//
|
||||
// Shutdown is not considered.
|
||||
func (cse *ConnectivityStateEvaluator) RecordTransition(oldState, newState connectivity.State) connectivity.State {
|
||||
// Update counters.
|
||||
for idx, state := range []connectivity.State{oldState, newState} {
|
||||
updateVal := 2*uint64(idx) - 1 // -1 for oldState and +1 for new.
|
||||
switch state {
|
||||
case connectivity.Ready:
|
||||
cse.numReady += updateVal
|
||||
case connectivity.Connecting:
|
||||
cse.numConnecting += updateVal
|
||||
case connectivity.TransientFailure:
|
||||
cse.numTransientFailure += updateVal
|
||||
case connectivity.Idle:
|
||||
cse.numIdle += updateVal
|
||||
}
|
||||
}
|
||||
|
||||
// Evaluate.
|
||||
if cse.numReady > 0 {
|
||||
return connectivity.Ready
|
||||
}
|
||||
if cse.numConnecting > 0 {
|
||||
return connectivity.Connecting
|
||||
}
|
||||
if cse.numTransientFailure > 0 {
|
||||
return connectivity.TransientFailure
|
||||
}
|
||||
if cse.numIdle > 0 {
|
||||
return connectivity.Idle
|
||||
}
|
||||
return connectivity.TransientFailure
|
||||
}
|
||||
// A Producer is a type shared among potentially many consumers. It is
|
||||
// associated with a SubConn, and an implementation will typically contain
|
||||
// other methods to provide additional functionality, e.g. configuration or
|
||||
// subscription registration.
|
||||
type Producer any
|
||||
|
30
src/runtime/vendor/google.golang.org/grpc/balancer/base/balancer.go
generated
vendored
30
src/runtime/vendor/google.golang.org/grpc/balancer/base/balancer.go
generated
vendored
@ -45,6 +45,7 @@ func (bb *baseBuilder) Build(cc balancer.ClientConn, opt balancer.BuildOptions)
|
||||
scStates: make(map[balancer.SubConn]connectivity.State),
|
||||
csEvltr: &balancer.ConnectivityStateEvaluator{},
|
||||
config: bb.config,
|
||||
state: connectivity.Connecting,
|
||||
}
|
||||
// Initialize picker to a picker that always returns
|
||||
// ErrNoSubConnAvailable, because when state of a SubConn changes, we
|
||||
@ -104,7 +105,12 @@ func (b *baseBalancer) UpdateClientConnState(s balancer.ClientConnState) error {
|
||||
addrsSet.Set(a, nil)
|
||||
if _, ok := b.subConns.Get(a); !ok {
|
||||
// a is a new address (not existing in b.subConns).
|
||||
sc, err := b.cc.NewSubConn([]resolver.Address{a}, balancer.NewSubConnOptions{HealthCheckEnabled: b.config.HealthCheck})
|
||||
var sc balancer.SubConn
|
||||
opts := balancer.NewSubConnOptions{
|
||||
HealthCheckEnabled: b.config.HealthCheck,
|
||||
StateListener: func(scs balancer.SubConnState) { b.updateSubConnState(sc, scs) },
|
||||
}
|
||||
sc, err := b.cc.NewSubConn([]resolver.Address{a}, opts)
|
||||
if err != nil {
|
||||
logger.Warningf("base.baseBalancer: failed to create new SubConn: %v", err)
|
||||
continue
|
||||
@ -120,10 +126,10 @@ func (b *baseBalancer) UpdateClientConnState(s balancer.ClientConnState) error {
|
||||
sc := sci.(balancer.SubConn)
|
||||
// a was removed by resolver.
|
||||
if _, ok := addrsSet.Get(a); !ok {
|
||||
b.cc.RemoveSubConn(sc)
|
||||
sc.Shutdown()
|
||||
b.subConns.Delete(a)
|
||||
// Keep the state of this sc in b.scStates until sc's state becomes Shutdown.
|
||||
// The entry will be deleted in UpdateSubConnState.
|
||||
// The entry will be deleted in updateSubConnState.
|
||||
}
|
||||
}
|
||||
// If resolver state contains no addresses, return an error so ClientConn
|
||||
@ -134,6 +140,9 @@ func (b *baseBalancer) UpdateClientConnState(s balancer.ClientConnState) error {
|
||||
b.ResolverError(errors.New("produced zero addresses"))
|
||||
return balancer.ErrBadResolverState
|
||||
}
|
||||
|
||||
b.regeneratePicker()
|
||||
b.cc.UpdateState(balancer.State{ConnectivityState: b.state, Picker: b.picker})
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -153,8 +162,8 @@ func (b *baseBalancer) mergeErrors() error {
|
||||
|
||||
// regeneratePicker takes a snapshot of the balancer, and generates a picker
|
||||
// from it. The picker is
|
||||
// - errPicker if the balancer is in TransientFailure,
|
||||
// - built by the pickerBuilder with all READY SubConns otherwise.
|
||||
// - errPicker if the balancer is in TransientFailure,
|
||||
// - built by the pickerBuilder with all READY SubConns otherwise.
|
||||
func (b *baseBalancer) regeneratePicker() {
|
||||
if b.state == connectivity.TransientFailure {
|
||||
b.picker = NewErrPicker(b.mergeErrors())
|
||||
@ -173,7 +182,12 @@ func (b *baseBalancer) regeneratePicker() {
|
||||
b.picker = b.pickerBuilder.Build(PickerBuildInfo{ReadySCs: readySCs})
|
||||
}
|
||||
|
||||
// UpdateSubConnState is a nop because a StateListener is always set in NewSubConn.
|
||||
func (b *baseBalancer) UpdateSubConnState(sc balancer.SubConn, state balancer.SubConnState) {
|
||||
logger.Errorf("base.baseBalancer: UpdateSubConnState(%v, %+v) called unexpectedly", sc, state)
|
||||
}
|
||||
|
||||
func (b *baseBalancer) updateSubConnState(sc balancer.SubConn, state balancer.SubConnState) {
|
||||
s := state.ConnectivityState
|
||||
if logger.V(2) {
|
||||
logger.Infof("base.baseBalancer: handle SubConn state change: %p, %v", sc, s)
|
||||
@ -200,8 +214,8 @@ func (b *baseBalancer) UpdateSubConnState(sc balancer.SubConn, state balancer.Su
|
||||
case connectivity.Idle:
|
||||
sc.Connect()
|
||||
case connectivity.Shutdown:
|
||||
// When an address was removed by resolver, b called RemoveSubConn but
|
||||
// kept the sc's state in scStates. Remove state for this sc here.
|
||||
// When an address was removed by resolver, b called Shutdown but kept
|
||||
// the sc's state in scStates. Remove state for this sc here.
|
||||
delete(b.scStates, sc)
|
||||
case connectivity.TransientFailure:
|
||||
// Save error to be reported via picker.
|
||||
@ -222,7 +236,7 @@ func (b *baseBalancer) UpdateSubConnState(sc balancer.SubConn, state balancer.Su
|
||||
}
|
||||
|
||||
// Close is a nop because base balancer doesn't have internal state to clean up,
|
||||
// and it doesn't need to call RemoveSubConn for the SubConns.
|
||||
// and it doesn't need to call Shutdown for the SubConns.
|
||||
func (b *baseBalancer) Close() {
|
||||
}
|
||||
|
||||
|
74
src/runtime/vendor/google.golang.org/grpc/balancer/conn_state_evaluator.go
generated
vendored
Normal file
74
src/runtime/vendor/google.golang.org/grpc/balancer/conn_state_evaluator.go
generated
vendored
Normal file
@ -0,0 +1,74 @@
|
||||
/*
|
||||
*
|
||||
* Copyright 2022 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
package balancer
|
||||
|
||||
import "google.golang.org/grpc/connectivity"
|
||||
|
||||
// ConnectivityStateEvaluator takes the connectivity states of multiple SubConns
|
||||
// and returns one aggregated connectivity state.
|
||||
//
|
||||
// It's not thread safe.
|
||||
type ConnectivityStateEvaluator struct {
|
||||
numReady uint64 // Number of addrConns in ready state.
|
||||
numConnecting uint64 // Number of addrConns in connecting state.
|
||||
numTransientFailure uint64 // Number of addrConns in transient failure state.
|
||||
numIdle uint64 // Number of addrConns in idle state.
|
||||
}
|
||||
|
||||
// RecordTransition records state change happening in subConn and based on that
|
||||
// it evaluates what aggregated state should be.
|
||||
//
|
||||
// - If at least one SubConn in Ready, the aggregated state is Ready;
|
||||
// - Else if at least one SubConn in Connecting, the aggregated state is Connecting;
|
||||
// - Else if at least one SubConn is Idle, the aggregated state is Idle;
|
||||
// - Else if at least one SubConn is TransientFailure (or there are no SubConns), the aggregated state is Transient Failure.
|
||||
//
|
||||
// Shutdown is not considered.
|
||||
func (cse *ConnectivityStateEvaluator) RecordTransition(oldState, newState connectivity.State) connectivity.State {
|
||||
// Update counters.
|
||||
for idx, state := range []connectivity.State{oldState, newState} {
|
||||
updateVal := 2*uint64(idx) - 1 // -1 for oldState and +1 for new.
|
||||
switch state {
|
||||
case connectivity.Ready:
|
||||
cse.numReady += updateVal
|
||||
case connectivity.Connecting:
|
||||
cse.numConnecting += updateVal
|
||||
case connectivity.TransientFailure:
|
||||
cse.numTransientFailure += updateVal
|
||||
case connectivity.Idle:
|
||||
cse.numIdle += updateVal
|
||||
}
|
||||
}
|
||||
return cse.CurrentState()
|
||||
}
|
||||
|
||||
// CurrentState returns the current aggregate conn state by evaluating the counters
|
||||
func (cse *ConnectivityStateEvaluator) CurrentState() connectivity.State {
|
||||
// Evaluate.
|
||||
if cse.numReady > 0 {
|
||||
return connectivity.Ready
|
||||
}
|
||||
if cse.numConnecting > 0 {
|
||||
return connectivity.Connecting
|
||||
}
|
||||
if cse.numIdle > 0 {
|
||||
return connectivity.Idle
|
||||
}
|
||||
return connectivity.TransientFailure
|
||||
}
|
16
src/runtime/vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go
generated
vendored
16
src/runtime/vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go
generated
vendored
@ -22,7 +22,7 @@
|
||||
package roundrobin
|
||||
|
||||
import (
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
|
||||
"google.golang.org/grpc/balancer"
|
||||
"google.golang.org/grpc/balancer/base"
|
||||
@ -60,7 +60,7 @@ func (*rrPickerBuilder) Build(info base.PickerBuildInfo) balancer.Picker {
|
||||
// Start at a random index, as the same RR balancer rebuilds a new
|
||||
// picker when SubConn states change, and we don't want to apply excess
|
||||
// load to the first server in the list.
|
||||
next: grpcrand.Intn(len(scs)),
|
||||
next: uint32(grpcrand.Intn(len(scs))),
|
||||
}
|
||||
}
|
||||
|
||||
@ -69,15 +69,13 @@ type rrPicker struct {
|
||||
// created. The slice is immutable. Each Get() will do a round robin
|
||||
// selection from it and return the selected SubConn.
|
||||
subConns []balancer.SubConn
|
||||
|
||||
mu sync.Mutex
|
||||
next int
|
||||
next uint32
|
||||
}
|
||||
|
||||
func (p *rrPicker) Pick(balancer.PickInfo) (balancer.PickResult, error) {
|
||||
p.mu.Lock()
|
||||
sc := p.subConns[p.next]
|
||||
p.next = (p.next + 1) % len(p.subConns)
|
||||
p.mu.Unlock()
|
||||
subConnsLen := uint32(len(p.subConns))
|
||||
nextIndex := atomic.AddUint32(&p.next, 1)
|
||||
|
||||
sc := p.subConns[nextIndex%subConnsLen]
|
||||
return balancer.PickResult{SubConn: sc}, nil
|
||||
}
|
||||
|
536
src/runtime/vendor/google.golang.org/grpc/balancer_conn_wrappers.go
generated
vendored
536
src/runtime/vendor/google.golang.org/grpc/balancer_conn_wrappers.go
generated
vendored
@ -19,6 +19,7 @@
|
||||
package grpc
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strings"
|
||||
"sync"
|
||||
@ -26,12 +27,20 @@ import (
|
||||
"google.golang.org/grpc/balancer"
|
||||
"google.golang.org/grpc/connectivity"
|
||||
"google.golang.org/grpc/internal/balancer/gracefulswitch"
|
||||
"google.golang.org/grpc/internal/buffer"
|
||||
"google.golang.org/grpc/internal/channelz"
|
||||
"google.golang.org/grpc/internal/grpcsync"
|
||||
"google.golang.org/grpc/resolver"
|
||||
)
|
||||
|
||||
type ccbMode int
|
||||
|
||||
const (
|
||||
ccbModeActive = iota
|
||||
ccbModeIdle
|
||||
ccbModeClosed
|
||||
ccbModeExitingIdle
|
||||
)
|
||||
|
||||
// ccBalancerWrapper sits between the ClientConn and the Balancer.
|
||||
//
|
||||
// ccBalancerWrapper implements methods corresponding to the ones on the
|
||||
@ -46,192 +55,89 @@ import (
|
||||
// It uses the gracefulswitch.Balancer internally to ensure that balancer
|
||||
// switches happen in a graceful manner.
|
||||
type ccBalancerWrapper struct {
|
||||
cc *ClientConn
|
||||
// The following fields are initialized when the wrapper is created and are
|
||||
// read-only afterwards, and therefore can be accessed without a mutex.
|
||||
cc *ClientConn
|
||||
opts balancer.BuildOptions
|
||||
|
||||
// Since these fields are accessed only from handleXxx() methods which are
|
||||
// synchronized by the watcher goroutine, we do not need a mutex to protect
|
||||
// these fields.
|
||||
// Outgoing (gRPC --> balancer) calls are guaranteed to execute in a
|
||||
// mutually exclusive manner as they are scheduled in the serializer. Fields
|
||||
// accessed *only* in these serializer callbacks, can therefore be accessed
|
||||
// without a mutex.
|
||||
balancer *gracefulswitch.Balancer
|
||||
curBalancerName string
|
||||
|
||||
updateCh *buffer.Unbounded // Updates written on this channel are processed by watcher().
|
||||
resultCh *buffer.Unbounded // Results of calls to UpdateClientConnState() are pushed here.
|
||||
closed *grpcsync.Event // Indicates if close has been called.
|
||||
done *grpcsync.Event // Indicates if close has completed its work.
|
||||
// mu guards access to the below fields. Access to the serializer and its
|
||||
// cancel function needs to be mutex protected because they are overwritten
|
||||
// when the wrapper exits idle mode.
|
||||
mu sync.Mutex
|
||||
serializer *grpcsync.CallbackSerializer // To serialize all outoing calls.
|
||||
serializerCancel context.CancelFunc // To close the seralizer at close/enterIdle time.
|
||||
mode ccbMode // Tracks the current mode of the wrapper.
|
||||
}
|
||||
|
||||
// newCCBalancerWrapper creates a new balancer wrapper. The underlying balancer
|
||||
// is not created until the switchTo() method is invoked.
|
||||
func newCCBalancerWrapper(cc *ClientConn, bopts balancer.BuildOptions) *ccBalancerWrapper {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
ccb := &ccBalancerWrapper{
|
||||
cc: cc,
|
||||
updateCh: buffer.NewUnbounded(),
|
||||
resultCh: buffer.NewUnbounded(),
|
||||
closed: grpcsync.NewEvent(),
|
||||
done: grpcsync.NewEvent(),
|
||||
cc: cc,
|
||||
opts: bopts,
|
||||
serializer: grpcsync.NewCallbackSerializer(ctx),
|
||||
serializerCancel: cancel,
|
||||
}
|
||||
go ccb.watcher()
|
||||
ccb.balancer = gracefulswitch.NewBalancer(ccb, bopts)
|
||||
return ccb
|
||||
}
|
||||
|
||||
// The following xxxUpdate structs wrap the arguments received as part of the
|
||||
// corresponding update. The watcher goroutine uses the 'type' of the update to
|
||||
// invoke the appropriate handler routine to handle the update.
|
||||
|
||||
type ccStateUpdate struct {
|
||||
ccs *balancer.ClientConnState
|
||||
}
|
||||
|
||||
type scStateUpdate struct {
|
||||
sc balancer.SubConn
|
||||
state connectivity.State
|
||||
err error
|
||||
}
|
||||
|
||||
type exitIdleUpdate struct{}
|
||||
|
||||
type resolverErrorUpdate struct {
|
||||
err error
|
||||
}
|
||||
|
||||
type switchToUpdate struct {
|
||||
name string
|
||||
}
|
||||
|
||||
type subConnUpdate struct {
|
||||
acbw *acBalancerWrapper
|
||||
}
|
||||
|
||||
// watcher is a long-running goroutine which reads updates from a channel and
|
||||
// invokes corresponding methods on the underlying balancer. It ensures that
|
||||
// these methods are invoked in a synchronous fashion. It also ensures that
|
||||
// these methods are invoked in the order in which the updates were received.
|
||||
func (ccb *ccBalancerWrapper) watcher() {
|
||||
for {
|
||||
select {
|
||||
case u := <-ccb.updateCh.Get():
|
||||
ccb.updateCh.Load()
|
||||
if ccb.closed.HasFired() {
|
||||
break
|
||||
}
|
||||
switch update := u.(type) {
|
||||
case *ccStateUpdate:
|
||||
ccb.handleClientConnStateChange(update.ccs)
|
||||
case *scStateUpdate:
|
||||
ccb.handleSubConnStateChange(update)
|
||||
case *exitIdleUpdate:
|
||||
ccb.handleExitIdle()
|
||||
case *resolverErrorUpdate:
|
||||
ccb.handleResolverError(update.err)
|
||||
case *switchToUpdate:
|
||||
ccb.handleSwitchTo(update.name)
|
||||
case *subConnUpdate:
|
||||
ccb.handleRemoveSubConn(update.acbw)
|
||||
default:
|
||||
logger.Errorf("ccBalancerWrapper.watcher: unknown update %+v, type %T", update, update)
|
||||
}
|
||||
case <-ccb.closed.Done():
|
||||
}
|
||||
|
||||
if ccb.closed.HasFired() {
|
||||
ccb.handleClose()
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// updateClientConnState is invoked by grpc to push a ClientConnState update to
|
||||
// the underlying balancer.
|
||||
//
|
||||
// Unlike other methods invoked by grpc to push updates to the underlying
|
||||
// balancer, this method cannot simply push the update onto the update channel
|
||||
// and return. It needs to return the error returned by the underlying balancer
|
||||
// back to grpc which propagates that to the resolver.
|
||||
func (ccb *ccBalancerWrapper) updateClientConnState(ccs *balancer.ClientConnState) error {
|
||||
ccb.updateCh.Put(&ccStateUpdate{ccs: ccs})
|
||||
ccb.mu.Lock()
|
||||
errCh := make(chan error, 1)
|
||||
// Here and everywhere else where Schedule() is called, it is done with the
|
||||
// lock held. But the lock guards only the scheduling part. The actual
|
||||
// callback is called asynchronously without the lock being held.
|
||||
ok := ccb.serializer.Schedule(func(_ context.Context) {
|
||||
errCh <- ccb.balancer.UpdateClientConnState(*ccs)
|
||||
})
|
||||
if !ok {
|
||||
// If we are unable to schedule a function with the serializer, it
|
||||
// indicates that it has been closed. A serializer is only closed when
|
||||
// the wrapper is closed or is in idle.
|
||||
ccb.mu.Unlock()
|
||||
return fmt.Errorf("grpc: cannot send state update to a closed or idle balancer")
|
||||
}
|
||||
ccb.mu.Unlock()
|
||||
|
||||
var res interface{}
|
||||
select {
|
||||
case res = <-ccb.resultCh.Get():
|
||||
ccb.resultCh.Load()
|
||||
case <-ccb.closed.Done():
|
||||
// Return early if the balancer wrapper is closed while we are waiting for
|
||||
// the underlying balancer to process a ClientConnState update.
|
||||
return nil
|
||||
// We get here only if the above call to Schedule succeeds, in which case it
|
||||
// is guaranteed that the scheduled function will run. Therefore it is safe
|
||||
// to block on this channel.
|
||||
err := <-errCh
|
||||
if logger.V(2) && err != nil {
|
||||
logger.Infof("error from balancer.UpdateClientConnState: %v", err)
|
||||
}
|
||||
// If the returned error is nil, attempting to type assert to error leads to
|
||||
// panic. So, this needs to handled separately.
|
||||
if res == nil {
|
||||
return nil
|
||||
}
|
||||
return res.(error)
|
||||
}
|
||||
|
||||
// handleClientConnStateChange handles a ClientConnState update from the update
|
||||
// channel and invokes the appropriate method on the underlying balancer.
|
||||
//
|
||||
// If the addresses specified in the update contain addresses of type "grpclb"
|
||||
// and the selected LB policy is not "grpclb", these addresses will be filtered
|
||||
// out and ccs will be modified with the updated address list.
|
||||
func (ccb *ccBalancerWrapper) handleClientConnStateChange(ccs *balancer.ClientConnState) {
|
||||
if ccb.curBalancerName != grpclbName {
|
||||
// Filter any grpclb addresses since we don't have the grpclb balancer.
|
||||
var addrs []resolver.Address
|
||||
for _, addr := range ccs.ResolverState.Addresses {
|
||||
if addr.Type == resolver.GRPCLB {
|
||||
continue
|
||||
}
|
||||
addrs = append(addrs, addr)
|
||||
}
|
||||
ccs.ResolverState.Addresses = addrs
|
||||
}
|
||||
ccb.resultCh.Put(ccb.balancer.UpdateClientConnState(*ccs))
|
||||
return err
|
||||
}
|
||||
|
||||
// updateSubConnState is invoked by grpc to push a subConn state update to the
|
||||
// underlying balancer.
|
||||
func (ccb *ccBalancerWrapper) updateSubConnState(sc balancer.SubConn, s connectivity.State, err error) {
|
||||
// When updating addresses for a SubConn, if the address in use is not in
|
||||
// the new addresses, the old ac will be tearDown() and a new ac will be
|
||||
// created. tearDown() generates a state change with Shutdown state, we
|
||||
// don't want the balancer to receive this state change. So before
|
||||
// tearDown() on the old ac, ac.acbw (acWrapper) will be set to nil, and
|
||||
// this function will be called with (nil, Shutdown). We don't need to call
|
||||
// balancer method in this case.
|
||||
if sc == nil {
|
||||
return
|
||||
}
|
||||
ccb.updateCh.Put(&scStateUpdate{
|
||||
sc: sc,
|
||||
state: s,
|
||||
err: err,
|
||||
ccb.mu.Lock()
|
||||
ccb.serializer.Schedule(func(_ context.Context) {
|
||||
// Even though it is optional for balancers, gracefulswitch ensures
|
||||
// opts.StateListener is set, so this cannot ever be nil.
|
||||
sc.(*acBalancerWrapper).stateListener(balancer.SubConnState{ConnectivityState: s, ConnectionError: err})
|
||||
})
|
||||
}
|
||||
|
||||
// handleSubConnStateChange handles a SubConnState update from the update
|
||||
// channel and invokes the appropriate method on the underlying balancer.
|
||||
func (ccb *ccBalancerWrapper) handleSubConnStateChange(update *scStateUpdate) {
|
||||
ccb.balancer.UpdateSubConnState(update.sc, balancer.SubConnState{ConnectivityState: update.state, ConnectionError: update.err})
|
||||
}
|
||||
|
||||
func (ccb *ccBalancerWrapper) exitIdle() {
|
||||
ccb.updateCh.Put(&exitIdleUpdate{})
|
||||
}
|
||||
|
||||
func (ccb *ccBalancerWrapper) handleExitIdle() {
|
||||
if ccb.cc.GetState() != connectivity.Idle {
|
||||
return
|
||||
}
|
||||
ccb.balancer.ExitIdle()
|
||||
ccb.mu.Unlock()
|
||||
}
|
||||
|
||||
func (ccb *ccBalancerWrapper) resolverError(err error) {
|
||||
ccb.updateCh.Put(&resolverErrorUpdate{err: err})
|
||||
}
|
||||
|
||||
func (ccb *ccBalancerWrapper) handleResolverError(err error) {
|
||||
ccb.balancer.ResolverError(err)
|
||||
ccb.mu.Lock()
|
||||
ccb.serializer.Schedule(func(_ context.Context) {
|
||||
ccb.balancer.ResolverError(err)
|
||||
})
|
||||
ccb.mu.Unlock()
|
||||
}
|
||||
|
||||
// switchTo is invoked by grpc to instruct the balancer wrapper to switch to the
|
||||
@ -245,24 +151,27 @@ func (ccb *ccBalancerWrapper) handleResolverError(err error) {
|
||||
// the ccBalancerWrapper keeps track of the current LB policy name, and skips
|
||||
// the graceful balancer switching process if the name does not change.
|
||||
func (ccb *ccBalancerWrapper) switchTo(name string) {
|
||||
ccb.updateCh.Put(&switchToUpdate{name: name})
|
||||
ccb.mu.Lock()
|
||||
ccb.serializer.Schedule(func(_ context.Context) {
|
||||
// TODO: Other languages use case-sensitive balancer registries. We should
|
||||
// switch as well. See: https://github.com/grpc/grpc-go/issues/5288.
|
||||
if strings.EqualFold(ccb.curBalancerName, name) {
|
||||
return
|
||||
}
|
||||
ccb.buildLoadBalancingPolicy(name)
|
||||
})
|
||||
ccb.mu.Unlock()
|
||||
}
|
||||
|
||||
// handleSwitchTo handles a balancer switch update from the update channel. It
|
||||
// calls the SwitchTo() method on the gracefulswitch.Balancer with a
|
||||
// balancer.Builder corresponding to name. If no balancer.Builder is registered
|
||||
// for the given name, it uses the default LB policy which is "pick_first".
|
||||
func (ccb *ccBalancerWrapper) handleSwitchTo(name string) {
|
||||
// TODO: Other languages use case-insensitive balancer registries. We should
|
||||
// switch as well. See: https://github.com/grpc/grpc-go/issues/5288.
|
||||
if strings.EqualFold(ccb.curBalancerName, name) {
|
||||
return
|
||||
}
|
||||
|
||||
// TODO: Ensure that name is a registered LB policy when we get here.
|
||||
// We currently only validate the `loadBalancingConfig` field. We need to do
|
||||
// the same for the `loadBalancingPolicy` field and reject the service config
|
||||
// if the specified policy is not registered.
|
||||
// buildLoadBalancingPolicy performs the following:
|
||||
// - retrieve a balancer builder for the given name. Use the default LB
|
||||
// policy, pick_first, if no LB policy with name is found in the registry.
|
||||
// - instruct the gracefulswitch balancer to switch to the above builder. This
|
||||
// will actually build the new balancer.
|
||||
// - update the `curBalancerName` field
|
||||
//
|
||||
// Must be called from a serializer callback.
|
||||
func (ccb *ccBalancerWrapper) buildLoadBalancingPolicy(name string) {
|
||||
builder := balancer.Get(name)
|
||||
if builder == nil {
|
||||
channelz.Warningf(logger, ccb.cc.channelzID, "Channel switches to new LB policy %q, since the specified LB policy %q was not registered", PickFirstBalancerName, name)
|
||||
@ -278,26 +187,112 @@ func (ccb *ccBalancerWrapper) handleSwitchTo(name string) {
|
||||
ccb.curBalancerName = builder.Name()
|
||||
}
|
||||
|
||||
// handleRemoveSucConn handles a request from the underlying balancer to remove
|
||||
// a subConn.
|
||||
//
|
||||
// See comments in RemoveSubConn() for more details.
|
||||
func (ccb *ccBalancerWrapper) handleRemoveSubConn(acbw *acBalancerWrapper) {
|
||||
ccb.cc.removeAddrConn(acbw.getAddrConn(), errConnDrain)
|
||||
}
|
||||
|
||||
func (ccb *ccBalancerWrapper) close() {
|
||||
ccb.closed.Fire()
|
||||
<-ccb.done.Done()
|
||||
channelz.Info(logger, ccb.cc.channelzID, "ccBalancerWrapper: closing")
|
||||
ccb.closeBalancer(ccbModeClosed)
|
||||
}
|
||||
|
||||
func (ccb *ccBalancerWrapper) handleClose() {
|
||||
ccb.balancer.Close()
|
||||
ccb.done.Fire()
|
||||
// enterIdleMode is invoked by grpc when the channel enters idle mode upon
|
||||
// expiry of idle_timeout. This call blocks until the balancer is closed.
|
||||
func (ccb *ccBalancerWrapper) enterIdleMode() {
|
||||
channelz.Info(logger, ccb.cc.channelzID, "ccBalancerWrapper: entering idle mode")
|
||||
ccb.closeBalancer(ccbModeIdle)
|
||||
}
|
||||
|
||||
// closeBalancer is invoked when the channel is being closed or when it enters
|
||||
// idle mode upon expiry of idle_timeout.
|
||||
func (ccb *ccBalancerWrapper) closeBalancer(m ccbMode) {
|
||||
ccb.mu.Lock()
|
||||
if ccb.mode == ccbModeClosed || ccb.mode == ccbModeIdle {
|
||||
ccb.mu.Unlock()
|
||||
return
|
||||
}
|
||||
|
||||
ccb.mode = m
|
||||
done := ccb.serializer.Done()
|
||||
b := ccb.balancer
|
||||
ok := ccb.serializer.Schedule(func(_ context.Context) {
|
||||
// Close the serializer to ensure that no more calls from gRPC are sent
|
||||
// to the balancer.
|
||||
ccb.serializerCancel()
|
||||
// Empty the current balancer name because we don't have a balancer
|
||||
// anymore and also so that we act on the next call to switchTo by
|
||||
// creating a new balancer specified by the new resolver.
|
||||
ccb.curBalancerName = ""
|
||||
})
|
||||
if !ok {
|
||||
ccb.mu.Unlock()
|
||||
return
|
||||
}
|
||||
ccb.mu.Unlock()
|
||||
|
||||
// Give enqueued callbacks a chance to finish before closing the balancer.
|
||||
<-done
|
||||
b.Close()
|
||||
}
|
||||
|
||||
// exitIdleMode is invoked by grpc when the channel exits idle mode either
|
||||
// because of an RPC or because of an invocation of the Connect() API. This
|
||||
// recreates the balancer that was closed previously when entering idle mode.
|
||||
//
|
||||
// If the channel is not in idle mode, we know for a fact that we are here as a
|
||||
// result of the user calling the Connect() method on the ClientConn. In this
|
||||
// case, we can simply forward the call to the underlying balancer, instructing
|
||||
// it to reconnect to the backends.
|
||||
func (ccb *ccBalancerWrapper) exitIdleMode() {
|
||||
ccb.mu.Lock()
|
||||
if ccb.mode == ccbModeClosed {
|
||||
// Request to exit idle is a no-op when wrapper is already closed.
|
||||
ccb.mu.Unlock()
|
||||
return
|
||||
}
|
||||
|
||||
if ccb.mode == ccbModeIdle {
|
||||
// Recreate the serializer which was closed when we entered idle.
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
ccb.serializer = grpcsync.NewCallbackSerializer(ctx)
|
||||
ccb.serializerCancel = cancel
|
||||
}
|
||||
|
||||
// The ClientConn guarantees that mutual exclusion between close() and
|
||||
// exitIdleMode(), and since we just created a new serializer, we can be
|
||||
// sure that the below function will be scheduled.
|
||||
done := make(chan struct{})
|
||||
ccb.serializer.Schedule(func(_ context.Context) {
|
||||
defer close(done)
|
||||
|
||||
ccb.mu.Lock()
|
||||
defer ccb.mu.Unlock()
|
||||
|
||||
if ccb.mode != ccbModeIdle {
|
||||
ccb.balancer.ExitIdle()
|
||||
return
|
||||
}
|
||||
|
||||
// Gracefulswitch balancer does not support a switchTo operation after
|
||||
// being closed. Hence we need to create a new one here.
|
||||
ccb.balancer = gracefulswitch.NewBalancer(ccb, ccb.opts)
|
||||
ccb.mode = ccbModeActive
|
||||
channelz.Info(logger, ccb.cc.channelzID, "ccBalancerWrapper: exiting idle mode")
|
||||
|
||||
})
|
||||
ccb.mu.Unlock()
|
||||
|
||||
<-done
|
||||
}
|
||||
|
||||
func (ccb *ccBalancerWrapper) isIdleOrClosed() bool {
|
||||
ccb.mu.Lock()
|
||||
defer ccb.mu.Unlock()
|
||||
return ccb.mode == ccbModeIdle || ccb.mode == ccbModeClosed
|
||||
}
|
||||
|
||||
func (ccb *ccBalancerWrapper) NewSubConn(addrs []resolver.Address, opts balancer.NewSubConnOptions) (balancer.SubConn, error) {
|
||||
if len(addrs) <= 0 {
|
||||
if ccb.isIdleOrClosed() {
|
||||
return nil, fmt.Errorf("grpc: cannot create SubConn when balancer is closed or idle")
|
||||
}
|
||||
|
||||
if len(addrs) == 0 {
|
||||
return nil, fmt.Errorf("grpc: cannot create SubConn with empty address list")
|
||||
}
|
||||
ac, err := ccb.cc.newAddrConn(addrs, opts)
|
||||
@ -305,32 +300,26 @@ func (ccb *ccBalancerWrapper) NewSubConn(addrs []resolver.Address, opts balancer
|
||||
channelz.Warningf(logger, ccb.cc.channelzID, "acBalancerWrapper: NewSubConn: failed to newAddrConn: %v", err)
|
||||
return nil, err
|
||||
}
|
||||
acbw := &acBalancerWrapper{ac: ac}
|
||||
acbw.ac.mu.Lock()
|
||||
acbw := &acBalancerWrapper{
|
||||
ccb: ccb,
|
||||
ac: ac,
|
||||
producers: make(map[balancer.ProducerBuilder]*refCountedProducer),
|
||||
stateListener: opts.StateListener,
|
||||
}
|
||||
ac.acbw = acbw
|
||||
acbw.ac.mu.Unlock()
|
||||
return acbw, nil
|
||||
}
|
||||
|
||||
func (ccb *ccBalancerWrapper) RemoveSubConn(sc balancer.SubConn) {
|
||||
// Before we switched the ccBalancerWrapper to use gracefulswitch.Balancer, it
|
||||
// was required to handle the RemoveSubConn() method asynchronously by pushing
|
||||
// the update onto the update channel. This was done to avoid a deadlock as
|
||||
// switchBalancer() was holding cc.mu when calling Close() on the old
|
||||
// balancer, which would in turn call RemoveSubConn().
|
||||
//
|
||||
// With the use of gracefulswitch.Balancer in ccBalancerWrapper, handling this
|
||||
// asynchronously is probably not required anymore since the switchTo() method
|
||||
// handles the balancer switch by pushing the update onto the channel.
|
||||
// TODO(easwars): Handle this inline.
|
||||
acbw, ok := sc.(*acBalancerWrapper)
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
ccb.updateCh.Put(&subConnUpdate{acbw: acbw})
|
||||
// The graceful switch balancer will never call this.
|
||||
logger.Errorf("ccb RemoveSubConn(%v) called unexpectedly, sc")
|
||||
}
|
||||
|
||||
func (ccb *ccBalancerWrapper) UpdateAddresses(sc balancer.SubConn, addrs []resolver.Address) {
|
||||
if ccb.isIdleOrClosed() {
|
||||
return
|
||||
}
|
||||
|
||||
acbw, ok := sc.(*acBalancerWrapper)
|
||||
if !ok {
|
||||
return
|
||||
@ -339,6 +328,10 @@ func (ccb *ccBalancerWrapper) UpdateAddresses(sc balancer.SubConn, addrs []resol
|
||||
}
|
||||
|
||||
func (ccb *ccBalancerWrapper) UpdateState(s balancer.State) {
|
||||
if ccb.isIdleOrClosed() {
|
||||
return
|
||||
}
|
||||
|
||||
// Update picker before updating state. Even though the ordering here does
|
||||
// not matter, it can lead to multiple calls of Pick in the common start-up
|
||||
// case where we wait for ready and then perform an RPC. If the picker is
|
||||
@ -349,6 +342,10 @@ func (ccb *ccBalancerWrapper) UpdateState(s balancer.State) {
|
||||
}
|
||||
|
||||
func (ccb *ccBalancerWrapper) ResolveNow(o resolver.ResolveNowOptions) {
|
||||
if ccb.isIdleOrClosed() {
|
||||
return
|
||||
}
|
||||
|
||||
ccb.cc.resolveNow(o)
|
||||
}
|
||||
|
||||
@ -359,58 +356,99 @@ func (ccb *ccBalancerWrapper) Target() string {
|
||||
// acBalancerWrapper is a wrapper on top of ac for balancers.
|
||||
// It implements balancer.SubConn interface.
|
||||
type acBalancerWrapper struct {
|
||||
mu sync.Mutex
|
||||
ac *addrConn
|
||||
ac *addrConn // read-only
|
||||
ccb *ccBalancerWrapper // read-only
|
||||
stateListener func(balancer.SubConnState)
|
||||
|
||||
mu sync.Mutex
|
||||
producers map[balancer.ProducerBuilder]*refCountedProducer
|
||||
}
|
||||
|
||||
func (acbw *acBalancerWrapper) String() string {
|
||||
return fmt.Sprintf("SubConn(id:%d)", acbw.ac.channelzID.Int())
|
||||
}
|
||||
|
||||
func (acbw *acBalancerWrapper) UpdateAddresses(addrs []resolver.Address) {
|
||||
acbw.mu.Lock()
|
||||
defer acbw.mu.Unlock()
|
||||
if len(addrs) <= 0 {
|
||||
acbw.ac.cc.removeAddrConn(acbw.ac, errConnDrain)
|
||||
return
|
||||
}
|
||||
if !acbw.ac.tryUpdateAddrs(addrs) {
|
||||
cc := acbw.ac.cc
|
||||
opts := acbw.ac.scopts
|
||||
acbw.ac.mu.Lock()
|
||||
// Set old ac.acbw to nil so the Shutdown state update will be ignored
|
||||
// by balancer.
|
||||
//
|
||||
// TODO(bar) the state transition could be wrong when tearDown() old ac
|
||||
// and creating new ac, fix the transition.
|
||||
acbw.ac.acbw = nil
|
||||
acbw.ac.mu.Unlock()
|
||||
acState := acbw.ac.getState()
|
||||
acbw.ac.cc.removeAddrConn(acbw.ac, errConnDrain)
|
||||
|
||||
if acState == connectivity.Shutdown {
|
||||
return
|
||||
}
|
||||
|
||||
newAC, err := cc.newAddrConn(addrs, opts)
|
||||
if err != nil {
|
||||
channelz.Warningf(logger, acbw.ac.channelzID, "acBalancerWrapper: UpdateAddresses: failed to newAddrConn: %v", err)
|
||||
return
|
||||
}
|
||||
acbw.ac = newAC
|
||||
newAC.mu.Lock()
|
||||
newAC.acbw = acbw
|
||||
newAC.mu.Unlock()
|
||||
if acState != connectivity.Idle {
|
||||
go newAC.connect()
|
||||
}
|
||||
}
|
||||
acbw.ac.updateAddrs(addrs)
|
||||
}
|
||||
|
||||
func (acbw *acBalancerWrapper) Connect() {
|
||||
acbw.mu.Lock()
|
||||
defer acbw.mu.Unlock()
|
||||
go acbw.ac.connect()
|
||||
}
|
||||
|
||||
func (acbw *acBalancerWrapper) getAddrConn() *addrConn {
|
||||
func (acbw *acBalancerWrapper) Shutdown() {
|
||||
ccb := acbw.ccb
|
||||
if ccb.isIdleOrClosed() {
|
||||
// It it safe to ignore this call when the balancer is closed or in idle
|
||||
// because the ClientConn takes care of closing the connections.
|
||||
//
|
||||
// Not returning early from here when the balancer is closed or in idle
|
||||
// leads to a deadlock though, because of the following sequence of
|
||||
// calls when holding cc.mu:
|
||||
// cc.exitIdleMode --> ccb.enterIdleMode --> gsw.Close -->
|
||||
// ccb.RemoveAddrConn --> cc.removeAddrConn
|
||||
return
|
||||
}
|
||||
|
||||
ccb.cc.removeAddrConn(acbw.ac, errConnDrain)
|
||||
}
|
||||
|
||||
// NewStream begins a streaming RPC on the addrConn. If the addrConn is not
|
||||
// ready, blocks until it is or ctx expires. Returns an error when the context
|
||||
// expires or the addrConn is shut down.
|
||||
func (acbw *acBalancerWrapper) NewStream(ctx context.Context, desc *StreamDesc, method string, opts ...CallOption) (ClientStream, error) {
|
||||
transport, err := acbw.ac.getTransport(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return newNonRetryClientStream(ctx, desc, method, transport, acbw.ac, opts...)
|
||||
}
|
||||
|
||||
// Invoke performs a unary RPC. If the addrConn is not ready, returns
|
||||
// errSubConnNotReady.
|
||||
func (acbw *acBalancerWrapper) Invoke(ctx context.Context, method string, args any, reply any, opts ...CallOption) error {
|
||||
cs, err := acbw.NewStream(ctx, unaryStreamDesc, method, opts...)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := cs.SendMsg(args); err != nil {
|
||||
return err
|
||||
}
|
||||
return cs.RecvMsg(reply)
|
||||
}
|
||||
|
||||
type refCountedProducer struct {
|
||||
producer balancer.Producer
|
||||
refs int // number of current refs to the producer
|
||||
close func() // underlying producer's close function
|
||||
}
|
||||
|
||||
func (acbw *acBalancerWrapper) GetOrBuildProducer(pb balancer.ProducerBuilder) (balancer.Producer, func()) {
|
||||
acbw.mu.Lock()
|
||||
defer acbw.mu.Unlock()
|
||||
return acbw.ac
|
||||
|
||||
// Look up existing producer from this builder.
|
||||
pData := acbw.producers[pb]
|
||||
if pData == nil {
|
||||
// Not found; create a new one and add it to the producers map.
|
||||
p, close := pb.Build(acbw)
|
||||
pData = &refCountedProducer{producer: p, close: close}
|
||||
acbw.producers[pb] = pData
|
||||
}
|
||||
// Account for this new reference.
|
||||
pData.refs++
|
||||
|
||||
// Return a cleanup function wrapped in a OnceFunc to remove this reference
|
||||
// and delete the refCountedProducer from the map if the total reference
|
||||
// count goes to zero.
|
||||
unref := func() {
|
||||
acbw.mu.Lock()
|
||||
pData.refs--
|
||||
if pData.refs == 0 {
|
||||
defer pData.close() // Run outside the acbw mutex
|
||||
delete(acbw.producers, pb)
|
||||
}
|
||||
acbw.mu.Unlock()
|
||||
}
|
||||
return pData.producer, grpcsync.OnceFunc(unref)
|
||||
}
|
||||
|
22
src/runtime/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go
generated
vendored
22
src/runtime/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go
generated
vendored
@ -18,14 +18,13 @@
|
||||
|
||||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||
// versions:
|
||||
// protoc-gen-go v1.25.0
|
||||
// protoc v3.14.0
|
||||
// protoc-gen-go v1.31.0
|
||||
// protoc v4.22.0
|
||||
// source: grpc/binlog/v1/binarylog.proto
|
||||
|
||||
package grpc_binarylog_v1
|
||||
|
||||
import (
|
||||
proto "github.com/golang/protobuf/proto"
|
||||
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
|
||||
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
|
||||
durationpb "google.golang.org/protobuf/types/known/durationpb"
|
||||
@ -41,10 +40,6 @@ const (
|
||||
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
|
||||
)
|
||||
|
||||
// This is a compile-time assertion that a sufficiently up-to-date version
|
||||
// of the legacy proto package is being used.
|
||||
const _ = proto.ProtoPackageIsVersion4
|
||||
|
||||
// Enumerates the type of event
|
||||
// Note the terminology is different from the RPC semantics
|
||||
// definition, but the same meaning is expressed here.
|
||||
@ -261,6 +256,7 @@ type GrpcLogEntry struct {
|
||||
// according to the type of the log entry.
|
||||
//
|
||||
// Types that are assignable to Payload:
|
||||
//
|
||||
// *GrpcLogEntry_ClientHeader
|
||||
// *GrpcLogEntry_ServerHeader
|
||||
// *GrpcLogEntry_Message
|
||||
@ -694,12 +690,12 @@ func (x *Message) GetData() []byte {
|
||||
// Header keys added by gRPC are omitted. To be more specific,
|
||||
// implementations will not log the following entries, and this is
|
||||
// not to be treated as a truncation:
|
||||
// - entries handled by grpc that are not user visible, such as those
|
||||
// that begin with 'grpc-' (with exception of grpc-trace-bin)
|
||||
// or keys like 'lb-token'
|
||||
// - transport specific entries, including but not limited to:
|
||||
// ':path', ':authority', 'content-encoding', 'user-agent', 'te', etc
|
||||
// - entries added for call credentials
|
||||
// - entries handled by grpc that are not user visible, such as those
|
||||
// that begin with 'grpc-' (with exception of grpc-trace-bin)
|
||||
// or keys like 'lb-token'
|
||||
// - transport specific entries, including but not limited to:
|
||||
// ':path', ':authority', 'content-encoding', 'user-agent', 'te', etc
|
||||
// - entries added for call credentials
|
||||
//
|
||||
// Implementations must always log grpc-trace-bin if it is present.
|
||||
// Practically speaking it will only be visible on server side because
|
||||
|
6
src/runtime/vendor/google.golang.org/grpc/call.go
generated
vendored
6
src/runtime/vendor/google.golang.org/grpc/call.go
generated
vendored
@ -26,7 +26,7 @@ import (
|
||||
// received. This is typically called by generated code.
|
||||
//
|
||||
// All errors returned by Invoke are compatible with the status package.
|
||||
func (cc *ClientConn) Invoke(ctx context.Context, method string, args, reply interface{}, opts ...CallOption) error {
|
||||
func (cc *ClientConn) Invoke(ctx context.Context, method string, args, reply any, opts ...CallOption) error {
|
||||
// allow interceptor to see all applicable call options, which means those
|
||||
// configured as defaults from dial option as well as per-call options
|
||||
opts = combine(cc.dopts.callOptions, opts)
|
||||
@ -56,13 +56,13 @@ func combine(o1 []CallOption, o2 []CallOption) []CallOption {
|
||||
// received. This is typically called by generated code.
|
||||
//
|
||||
// DEPRECATED: Use ClientConn.Invoke instead.
|
||||
func Invoke(ctx context.Context, method string, args, reply interface{}, cc *ClientConn, opts ...CallOption) error {
|
||||
func Invoke(ctx context.Context, method string, args, reply any, cc *ClientConn, opts ...CallOption) error {
|
||||
return cc.Invoke(ctx, method, args, reply, opts...)
|
||||
}
|
||||
|
||||
var unaryStreamDesc = &StreamDesc{ServerStreams: false, ClientStreams: false}
|
||||
|
||||
func invoke(ctx context.Context, method string, req, reply interface{}, cc *ClientConn, opts ...CallOption) error {
|
||||
func invoke(ctx context.Context, method string, req, reply any, cc *ClientConn, opts ...CallOption) error {
|
||||
cs, err := newClientStream(ctx, unaryStreamDesc, cc, method, opts...)
|
||||
if err != nil {
|
||||
return err
|
||||
|
2
src/runtime/vendor/google.golang.org/grpc/channelz/channelz.go
generated
vendored
2
src/runtime/vendor/google.golang.org/grpc/channelz/channelz.go
generated
vendored
@ -23,7 +23,7 @@
|
||||
// https://github.com/grpc/proposal/blob/master/A14-channelz.md, is provided by
|
||||
// the `internal/channelz` package.
|
||||
//
|
||||
// Experimental
|
||||
// # Experimental
|
||||
//
|
||||
// Notice: All APIs in this package are experimental and may be removed in a
|
||||
// later release.
|
||||
|
1011
src/runtime/vendor/google.golang.org/grpc/clientconn.go
generated
vendored
1011
src/runtime/vendor/google.golang.org/grpc/clientconn.go
generated
vendored
File diff suppressed because it is too large
Load Diff
8
src/runtime/vendor/google.golang.org/grpc/codec.go
generated
vendored
8
src/runtime/vendor/google.golang.org/grpc/codec.go
generated
vendored
@ -27,8 +27,8 @@ import (
|
||||
// omits the name/string, which vary between the two and are not needed for
|
||||
// anything besides the registry in the encoding package.
|
||||
type baseCodec interface {
|
||||
Marshal(v interface{}) ([]byte, error)
|
||||
Unmarshal(data []byte, v interface{}) error
|
||||
Marshal(v any) ([]byte, error)
|
||||
Unmarshal(data []byte, v any) error
|
||||
}
|
||||
|
||||
var _ baseCodec = Codec(nil)
|
||||
@ -41,9 +41,9 @@ var _ baseCodec = encoding.Codec(nil)
|
||||
// Deprecated: use encoding.Codec instead.
|
||||
type Codec interface {
|
||||
// Marshal returns the wire format of v.
|
||||
Marshal(v interface{}) ([]byte, error)
|
||||
Marshal(v any) ([]byte, error)
|
||||
// Unmarshal parses the wire format into v.
|
||||
Unmarshal(data []byte, v interface{}) error
|
||||
Unmarshal(data []byte, v any) error
|
||||
// String returns the name of the Codec implementation. This is unused by
|
||||
// gRPC.
|
||||
String() string
|
||||
|
51
src/runtime/vendor/google.golang.org/grpc/codes/code_string.go
generated
vendored
51
src/runtime/vendor/google.golang.org/grpc/codes/code_string.go
generated
vendored
@ -18,7 +18,15 @@
|
||||
|
||||
package codes
|
||||
|
||||
import "strconv"
|
||||
import (
|
||||
"strconv"
|
||||
|
||||
"google.golang.org/grpc/internal"
|
||||
)
|
||||
|
||||
func init() {
|
||||
internal.CanonicalString = canonicalString
|
||||
}
|
||||
|
||||
func (c Code) String() string {
|
||||
switch c {
|
||||
@ -60,3 +68,44 @@ func (c Code) String() string {
|
||||
return "Code(" + strconv.FormatInt(int64(c), 10) + ")"
|
||||
}
|
||||
}
|
||||
|
||||
func canonicalString(c Code) string {
|
||||
switch c {
|
||||
case OK:
|
||||
return "OK"
|
||||
case Canceled:
|
||||
return "CANCELLED"
|
||||
case Unknown:
|
||||
return "UNKNOWN"
|
||||
case InvalidArgument:
|
||||
return "INVALID_ARGUMENT"
|
||||
case DeadlineExceeded:
|
||||
return "DEADLINE_EXCEEDED"
|
||||
case NotFound:
|
||||
return "NOT_FOUND"
|
||||
case AlreadyExists:
|
||||
return "ALREADY_EXISTS"
|
||||
case PermissionDenied:
|
||||
return "PERMISSION_DENIED"
|
||||
case ResourceExhausted:
|
||||
return "RESOURCE_EXHAUSTED"
|
||||
case FailedPrecondition:
|
||||
return "FAILED_PRECONDITION"
|
||||
case Aborted:
|
||||
return "ABORTED"
|
||||
case OutOfRange:
|
||||
return "OUT_OF_RANGE"
|
||||
case Unimplemented:
|
||||
return "UNIMPLEMENTED"
|
||||
case Internal:
|
||||
return "INTERNAL"
|
||||
case Unavailable:
|
||||
return "UNAVAILABLE"
|
||||
case DataLoss:
|
||||
return "DATA_LOSS"
|
||||
case Unauthenticated:
|
||||
return "UNAUTHENTICATED"
|
||||
default:
|
||||
return "CODE(" + strconv.FormatInt(int64(c), 10) + ")"
|
||||
}
|
||||
}
|
||||
|
20
src/runtime/vendor/google.golang.org/grpc/credentials/credentials.go
generated
vendored
20
src/runtime/vendor/google.golang.org/grpc/credentials/credentials.go
generated
vendored
@ -36,16 +36,16 @@ import (
|
||||
// PerRPCCredentials defines the common interface for the credentials which need to
|
||||
// attach security information to every RPC (e.g., oauth2).
|
||||
type PerRPCCredentials interface {
|
||||
// GetRequestMetadata gets the current request metadata, refreshing
|
||||
// tokens if required. This should be called by the transport layer on
|
||||
// each request, and the data should be populated in headers or other
|
||||
// context. If a status code is returned, it will be used as the status
|
||||
// for the RPC. uri is the URI of the entry point for the request.
|
||||
// When supported by the underlying implementation, ctx can be used for
|
||||
// timeout and cancellation. Additionally, RequestInfo data will be
|
||||
// available via ctx to this call.
|
||||
// TODO(zhaoq): Define the set of the qualified keys instead of leaving
|
||||
// it as an arbitrary string.
|
||||
// GetRequestMetadata gets the current request metadata, refreshing tokens
|
||||
// if required. This should be called by the transport layer on each
|
||||
// request, and the data should be populated in headers or other
|
||||
// context. If a status code is returned, it will be used as the status for
|
||||
// the RPC (restricted to an allowable set of codes as defined by gRFC
|
||||
// A54). uri is the URI of the entry point for the request. When supported
|
||||
// by the underlying implementation, ctx can be used for timeout and
|
||||
// cancellation. Additionally, RequestInfo data will be available via ctx
|
||||
// to this call. TODO(zhaoq): Define the set of the qualified keys instead
|
||||
// of leaving it as an arbitrary string.
|
||||
GetRequestMetadata(ctx context.Context, uri ...string) (map[string]string, error)
|
||||
// RequireTransportSecurity indicates whether the credentials requires
|
||||
// transport security.
|
||||
|
6
src/runtime/vendor/google.golang.org/grpc/credentials/tls.go
generated
vendored
6
src/runtime/vendor/google.golang.org/grpc/credentials/tls.go
generated
vendored
@ -23,9 +23,9 @@ import (
|
||||
"crypto/tls"
|
||||
"crypto/x509"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net"
|
||||
"net/url"
|
||||
"os"
|
||||
|
||||
credinternal "google.golang.org/grpc/internal/credentials"
|
||||
)
|
||||
@ -166,7 +166,7 @@ func NewClientTLSFromCert(cp *x509.CertPool, serverNameOverride string) Transpor
|
||||
// it will override the virtual host name of authority (e.g. :authority header
|
||||
// field) in requests.
|
||||
func NewClientTLSFromFile(certFile, serverNameOverride string) (TransportCredentials, error) {
|
||||
b, err := ioutil.ReadFile(certFile)
|
||||
b, err := os.ReadFile(certFile)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -195,7 +195,7 @@ func NewServerTLSFromFile(certFile, keyFile string) (TransportCredentials, error
|
||||
// TLSChannelzSecurityValue defines the struct that TLS protocol should return
|
||||
// from GetSecurityValue(), containing security info like cipher and certificate used.
|
||||
//
|
||||
// Experimental
|
||||
// # Experimental
|
||||
//
|
||||
// Notice: This type is EXPERIMENTAL and may be changed or removed in a
|
||||
// later release.
|
||||
|
152
src/runtime/vendor/google.golang.org/grpc/dialoptions.go
generated
vendored
152
src/runtime/vendor/google.golang.org/grpc/dialoptions.go
generated
vendored
@ -29,12 +29,25 @@ import (
|
||||
"google.golang.org/grpc/credentials/insecure"
|
||||
"google.golang.org/grpc/internal"
|
||||
internalbackoff "google.golang.org/grpc/internal/backoff"
|
||||
"google.golang.org/grpc/internal/binarylog"
|
||||
"google.golang.org/grpc/internal/transport"
|
||||
"google.golang.org/grpc/keepalive"
|
||||
"google.golang.org/grpc/resolver"
|
||||
"google.golang.org/grpc/stats"
|
||||
)
|
||||
|
||||
func init() {
|
||||
internal.AddGlobalDialOptions = func(opt ...DialOption) {
|
||||
globalDialOptions = append(globalDialOptions, opt...)
|
||||
}
|
||||
internal.ClearGlobalDialOptions = func() {
|
||||
globalDialOptions = nil
|
||||
}
|
||||
internal.WithBinaryLogger = withBinaryLogger
|
||||
internal.JoinDialOptions = newJoinDialOption
|
||||
internal.DisableGlobalDialOptions = newDisableGlobalDialOptions
|
||||
}
|
||||
|
||||
// dialOptions configure a Dial call. dialOptions are set by the DialOption
|
||||
// values passed to Dial.
|
||||
type dialOptions struct {
|
||||
@ -52,6 +65,7 @@ type dialOptions struct {
|
||||
timeout time.Duration
|
||||
scChan <-chan ServiceConfig
|
||||
authority string
|
||||
binaryLogger binarylog.Logger
|
||||
copts transport.ConnectOptions
|
||||
callOptions []CallOption
|
||||
channelzParentID *channelz.Identifier
|
||||
@ -63,6 +77,8 @@ type dialOptions struct {
|
||||
defaultServiceConfig *ServiceConfig // defaultServiceConfig is parsed from defaultServiceConfigRawJSON.
|
||||
defaultServiceConfigRawJSON *string
|
||||
resolvers []resolver.Builder
|
||||
idleTimeout time.Duration
|
||||
recvBufferPool SharedBufferPool
|
||||
}
|
||||
|
||||
// DialOption configures how we set up the connection.
|
||||
@ -70,10 +86,12 @@ type DialOption interface {
|
||||
apply(*dialOptions)
|
||||
}
|
||||
|
||||
var globalDialOptions []DialOption
|
||||
|
||||
// EmptyDialOption does not alter the dial configuration. It can be embedded in
|
||||
// another structure to build custom dial options.
|
||||
//
|
||||
// Experimental
|
||||
// # Experimental
|
||||
//
|
||||
// Notice: This type is EXPERIMENTAL and may be changed or removed in a
|
||||
// later release.
|
||||
@ -81,6 +99,16 @@ type EmptyDialOption struct{}
|
||||
|
||||
func (EmptyDialOption) apply(*dialOptions) {}
|
||||
|
||||
type disableGlobalDialOptions struct{}
|
||||
|
||||
func (disableGlobalDialOptions) apply(*dialOptions) {}
|
||||
|
||||
// newDisableGlobalDialOptions returns a DialOption that prevents the ClientConn
|
||||
// from applying the global DialOptions (set via AddGlobalDialOptions).
|
||||
func newDisableGlobalDialOptions() DialOption {
|
||||
return &disableGlobalDialOptions{}
|
||||
}
|
||||
|
||||
// funcDialOption wraps a function that modifies dialOptions into an
|
||||
// implementation of the DialOption interface.
|
||||
type funcDialOption struct {
|
||||
@ -97,13 +125,42 @@ func newFuncDialOption(f func(*dialOptions)) *funcDialOption {
|
||||
}
|
||||
}
|
||||
|
||||
type joinDialOption struct {
|
||||
opts []DialOption
|
||||
}
|
||||
|
||||
func (jdo *joinDialOption) apply(do *dialOptions) {
|
||||
for _, opt := range jdo.opts {
|
||||
opt.apply(do)
|
||||
}
|
||||
}
|
||||
|
||||
func newJoinDialOption(opts ...DialOption) DialOption {
|
||||
return &joinDialOption{opts: opts}
|
||||
}
|
||||
|
||||
// WithSharedWriteBuffer allows reusing per-connection transport write buffer.
|
||||
// If this option is set to true every connection will release the buffer after
|
||||
// flushing the data on the wire.
|
||||
//
|
||||
// # Experimental
|
||||
//
|
||||
// Notice: This API is EXPERIMENTAL and may be changed or removed in a
|
||||
// later release.
|
||||
func WithSharedWriteBuffer(val bool) DialOption {
|
||||
return newFuncDialOption(func(o *dialOptions) {
|
||||
o.copts.SharedWriteBuffer = val
|
||||
})
|
||||
}
|
||||
|
||||
// WithWriteBufferSize determines how much data can be batched before doing a
|
||||
// write on the wire. The corresponding memory allocation for this buffer will
|
||||
// be twice the size to keep syscalls low. The default value for this buffer is
|
||||
// 32KB.
|
||||
//
|
||||
// Zero will disable the write buffer such that each write will be on underlying
|
||||
// connection. Note: A Send call may not directly translate to a write.
|
||||
// Zero or negative values will disable the write buffer such that each write
|
||||
// will be on underlying connection. Note: A Send call may not directly
|
||||
// translate to a write.
|
||||
func WithWriteBufferSize(s int) DialOption {
|
||||
return newFuncDialOption(func(o *dialOptions) {
|
||||
o.copts.WriteBufferSize = s
|
||||
@ -113,8 +170,9 @@ func WithWriteBufferSize(s int) DialOption {
|
||||
// WithReadBufferSize lets you set the size of read buffer, this determines how
|
||||
// much data can be read at most for each read syscall.
|
||||
//
|
||||
// The default value for this buffer is 32KB. Zero will disable read buffer for
|
||||
// a connection so data framer can access the underlying conn directly.
|
||||
// The default value for this buffer is 32KB. Zero or negative values will
|
||||
// disable read buffer for a connection so data framer can access the
|
||||
// underlying conn directly.
|
||||
func WithReadBufferSize(s int) DialOption {
|
||||
return newFuncDialOption(func(o *dialOptions) {
|
||||
o.copts.ReadBufferSize = s
|
||||
@ -253,6 +311,9 @@ func withBackoff(bs internalbackoff.Strategy) DialOption {
|
||||
// WithBlock returns a DialOption which makes callers of Dial block until the
|
||||
// underlying connection is up. Without this, Dial returns immediately and
|
||||
// connecting the server happens in background.
|
||||
//
|
||||
// Use of this feature is not recommended. For more information, please see:
|
||||
// https://github.com/grpc/grpc-go/blob/master/Documentation/anti-patterns.md
|
||||
func WithBlock() DialOption {
|
||||
return newFuncDialOption(func(o *dialOptions) {
|
||||
o.block = true
|
||||
@ -264,7 +325,10 @@ func WithBlock() DialOption {
|
||||
// the context.DeadlineExceeded error.
|
||||
// Implies WithBlock()
|
||||
//
|
||||
// Experimental
|
||||
// Use of this feature is not recommended. For more information, please see:
|
||||
// https://github.com/grpc/grpc-go/blob/master/Documentation/anti-patterns.md
|
||||
//
|
||||
// # Experimental
|
||||
//
|
||||
// Notice: This API is EXPERIMENTAL and may be changed or removed in a
|
||||
// later release.
|
||||
@ -293,7 +357,7 @@ func WithInsecure() DialOption {
|
||||
// WithNoProxy returns a DialOption which disables the use of proxies for this
|
||||
// ClientConn. This is ignored if WithDialer or WithContextDialer are used.
|
||||
//
|
||||
// Experimental
|
||||
// # Experimental
|
||||
//
|
||||
// Notice: This API is EXPERIMENTAL and may be changed or removed in a
|
||||
// later release.
|
||||
@ -324,7 +388,7 @@ func WithPerRPCCredentials(creds credentials.PerRPCCredentials) DialOption {
|
||||
// the ClientConn.WithCreds. This should not be used together with
|
||||
// WithTransportCredentials.
|
||||
//
|
||||
// Experimental
|
||||
// # Experimental
|
||||
//
|
||||
// Notice: This API is EXPERIMENTAL and may be changed or removed in a
|
||||
// later release.
|
||||
@ -380,7 +444,21 @@ func WithDialer(f func(string, time.Duration) (net.Conn, error)) DialOption {
|
||||
// all the RPCs and underlying network connections in this ClientConn.
|
||||
func WithStatsHandler(h stats.Handler) DialOption {
|
||||
return newFuncDialOption(func(o *dialOptions) {
|
||||
o.copts.StatsHandler = h
|
||||
if h == nil {
|
||||
logger.Error("ignoring nil parameter in grpc.WithStatsHandler ClientOption")
|
||||
// Do not allow a nil stats handler, which would otherwise cause
|
||||
// panics.
|
||||
return
|
||||
}
|
||||
o.copts.StatsHandlers = append(o.copts.StatsHandlers, h)
|
||||
})
|
||||
}
|
||||
|
||||
// withBinaryLogger returns a DialOption that specifies the binary logger for
|
||||
// this ClientConn.
|
||||
func withBinaryLogger(bl binarylog.Logger) DialOption {
|
||||
return newFuncDialOption(func(o *dialOptions) {
|
||||
o.binaryLogger = bl
|
||||
})
|
||||
}
|
||||
|
||||
@ -392,7 +470,10 @@ func WithStatsHandler(h stats.Handler) DialOption {
|
||||
// FailOnNonTempDialError only affects the initial dial, and does not do
|
||||
// anything useful unless you are also using WithBlock().
|
||||
//
|
||||
// Experimental
|
||||
// Use of this feature is not recommended. For more information, please see:
|
||||
// https://github.com/grpc/grpc-go/blob/master/Documentation/anti-patterns.md
|
||||
//
|
||||
// # Experimental
|
||||
//
|
||||
// Notice: This API is EXPERIMENTAL and may be changed or removed in a
|
||||
// later release.
|
||||
@ -472,7 +553,7 @@ func WithAuthority(a string) DialOption {
|
||||
// current ClientConn's parent. This function is used in nested channel creation
|
||||
// (e.g. grpclb dial).
|
||||
//
|
||||
// Experimental
|
||||
// # Experimental
|
||||
//
|
||||
// Notice: This API is EXPERIMENTAL and may be changed or removed in a
|
||||
// later release.
|
||||
@ -517,9 +598,6 @@ func WithDefaultServiceConfig(s string) DialOption {
|
||||
// service config enables them. This does not impact transparent retries, which
|
||||
// will happen automatically if no data is written to the wire or if the RPC is
|
||||
// unprocessed by the remote server.
|
||||
//
|
||||
// Retry support is currently enabled by default, but may be disabled by
|
||||
// setting the environment variable "GRPC_GO_RETRY" to "off".
|
||||
func WithDisableRetry() DialOption {
|
||||
return newFuncDialOption(func(o *dialOptions) {
|
||||
o.disableRetry = true
|
||||
@ -537,7 +615,7 @@ func WithMaxHeaderListSize(s uint32) DialOption {
|
||||
// WithDisableHealthCheck disables the LB channel health checking for all
|
||||
// SubConns of this ClientConn.
|
||||
//
|
||||
// Experimental
|
||||
// # Experimental
|
||||
//
|
||||
// Notice: This API is EXPERIMENTAL and may be changed or removed in a
|
||||
// later release.
|
||||
@ -565,6 +643,7 @@ func defaultDialOptions() dialOptions {
|
||||
ReadBufferSize: defaultReadBufSize,
|
||||
UseProxy: true,
|
||||
},
|
||||
recvBufferPool: nopBufferPool{},
|
||||
}
|
||||
}
|
||||
|
||||
@ -584,7 +663,7 @@ func withMinConnectDeadline(f func() time.Duration) DialOption {
|
||||
// resolver.Register. They will be matched against the scheme used for the
|
||||
// current Dial only, and will take precedence over the global registry.
|
||||
//
|
||||
// Experimental
|
||||
// # Experimental
|
||||
//
|
||||
// Notice: This API is EXPERIMENTAL and may be changed or removed in a
|
||||
// later release.
|
||||
@ -593,3 +672,44 @@ func WithResolvers(rs ...resolver.Builder) DialOption {
|
||||
o.resolvers = append(o.resolvers, rs...)
|
||||
})
|
||||
}
|
||||
|
||||
// WithIdleTimeout returns a DialOption that configures an idle timeout for the
|
||||
// channel. If the channel is idle for the configured timeout, i.e there are no
|
||||
// ongoing RPCs and no new RPCs are initiated, the channel will enter idle mode
|
||||
// and as a result the name resolver and load balancer will be shut down. The
|
||||
// channel will exit idle mode when the Connect() method is called or when an
|
||||
// RPC is initiated.
|
||||
//
|
||||
// By default this feature is disabled, which can also be explicitly configured
|
||||
// by passing zero to this function.
|
||||
//
|
||||
// # Experimental
|
||||
//
|
||||
// Notice: This API is EXPERIMENTAL and may be changed or removed in a
|
||||
// later release.
|
||||
func WithIdleTimeout(d time.Duration) DialOption {
|
||||
return newFuncDialOption(func(o *dialOptions) {
|
||||
o.idleTimeout = d
|
||||
})
|
||||
}
|
||||
|
||||
// WithRecvBufferPool returns a DialOption that configures the ClientConn
|
||||
// to use the provided shared buffer pool for parsing incoming messages. Depending
|
||||
// on the application's workload, this could result in reduced memory allocation.
|
||||
//
|
||||
// If you are unsure about how to implement a memory pool but want to utilize one,
|
||||
// begin with grpc.NewSharedBufferPool.
|
||||
//
|
||||
// Note: The shared buffer pool feature will not be active if any of the following
|
||||
// options are used: WithStatsHandler, EnableTracing, or binary logging. In such
|
||||
// cases, the shared buffer pool will be ignored.
|
||||
//
|
||||
// # Experimental
|
||||
//
|
||||
// Notice: This API is EXPERIMENTAL and may be changed or removed in a
|
||||
// later release.
|
||||
func WithRecvBufferPool(bufferPool SharedBufferPool) DialOption {
|
||||
return newFuncDialOption(func(o *dialOptions) {
|
||||
o.recvBufferPool = bufferPool
|
||||
})
|
||||
}
|
||||
|
11
src/runtime/vendor/google.golang.org/grpc/encoding/encoding.go
generated
vendored
11
src/runtime/vendor/google.golang.org/grpc/encoding/encoding.go
generated
vendored
@ -19,7 +19,7 @@
|
||||
// Package encoding defines the interface for the compressor and codec, and
|
||||
// functions to register and retrieve compressors and codecs.
|
||||
//
|
||||
// Experimental
|
||||
// # Experimental
|
||||
//
|
||||
// Notice: This package is EXPERIMENTAL and may be changed or removed in a
|
||||
// later release.
|
||||
@ -28,6 +28,8 @@ package encoding
|
||||
import (
|
||||
"io"
|
||||
"strings"
|
||||
|
||||
"google.golang.org/grpc/internal/grpcutil"
|
||||
)
|
||||
|
||||
// Identity specifies the optional encoding for uncompressed streams.
|
||||
@ -73,6 +75,9 @@ var registeredCompressor = make(map[string]Compressor)
|
||||
// registered with the same name, the one registered last will take effect.
|
||||
func RegisterCompressor(c Compressor) {
|
||||
registeredCompressor[c.Name()] = c
|
||||
if !grpcutil.IsCompressorNameRegistered(c.Name()) {
|
||||
grpcutil.RegisteredCompressorNames = append(grpcutil.RegisteredCompressorNames, c.Name())
|
||||
}
|
||||
}
|
||||
|
||||
// GetCompressor returns Compressor for the given compressor name.
|
||||
@ -85,9 +90,9 @@ func GetCompressor(name string) Compressor {
|
||||
// methods can be called from concurrent goroutines.
|
||||
type Codec interface {
|
||||
// Marshal returns the wire format of v.
|
||||
Marshal(v interface{}) ([]byte, error)
|
||||
Marshal(v any) ([]byte, error)
|
||||
// Unmarshal parses the wire format into v.
|
||||
Unmarshal(data []byte, v interface{}) error
|
||||
Unmarshal(data []byte, v any) error
|
||||
// Name returns the name of the Codec implementation. The returned string
|
||||
// will be used as part of content type in transmission. The result must be
|
||||
// static; the result cannot change between calls.
|
||||
|
4
src/runtime/vendor/google.golang.org/grpc/encoding/proto/proto.go
generated
vendored
4
src/runtime/vendor/google.golang.org/grpc/encoding/proto/proto.go
generated
vendored
@ -37,7 +37,7 @@ func init() {
|
||||
// codec is a Codec implementation with protobuf. It is the default codec for gRPC.
|
||||
type codec struct{}
|
||||
|
||||
func (codec) Marshal(v interface{}) ([]byte, error) {
|
||||
func (codec) Marshal(v any) ([]byte, error) {
|
||||
vv, ok := v.(proto.Message)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("failed to marshal, message is %T, want proto.Message", v)
|
||||
@ -45,7 +45,7 @@ func (codec) Marshal(v interface{}) ([]byte, error) {
|
||||
return proto.Marshal(vv)
|
||||
}
|
||||
|
||||
func (codec) Unmarshal(data []byte, v interface{}) error {
|
||||
func (codec) Unmarshal(data []byte, v any) error {
|
||||
vv, ok := v.(proto.Message)
|
||||
if !ok {
|
||||
return fmt.Errorf("failed to unmarshal, message is %T, want proto.Message", v)
|
||||
|
40
src/runtime/vendor/google.golang.org/grpc/grpclog/component.go
generated
vendored
40
src/runtime/vendor/google.golang.org/grpc/grpclog/component.go
generated
vendored
@ -31,71 +31,71 @@ type componentData struct {
|
||||
|
||||
var cache = map[string]*componentData{}
|
||||
|
||||
func (c *componentData) InfoDepth(depth int, args ...interface{}) {
|
||||
args = append([]interface{}{"[" + string(c.name) + "]"}, args...)
|
||||
func (c *componentData) InfoDepth(depth int, args ...any) {
|
||||
args = append([]any{"[" + string(c.name) + "]"}, args...)
|
||||
grpclog.InfoDepth(depth+1, args...)
|
||||
}
|
||||
|
||||
func (c *componentData) WarningDepth(depth int, args ...interface{}) {
|
||||
args = append([]interface{}{"[" + string(c.name) + "]"}, args...)
|
||||
func (c *componentData) WarningDepth(depth int, args ...any) {
|
||||
args = append([]any{"[" + string(c.name) + "]"}, args...)
|
||||
grpclog.WarningDepth(depth+1, args...)
|
||||
}
|
||||
|
||||
func (c *componentData) ErrorDepth(depth int, args ...interface{}) {
|
||||
args = append([]interface{}{"[" + string(c.name) + "]"}, args...)
|
||||
func (c *componentData) ErrorDepth(depth int, args ...any) {
|
||||
args = append([]any{"[" + string(c.name) + "]"}, args...)
|
||||
grpclog.ErrorDepth(depth+1, args...)
|
||||
}
|
||||
|
||||
func (c *componentData) FatalDepth(depth int, args ...interface{}) {
|
||||
args = append([]interface{}{"[" + string(c.name) + "]"}, args...)
|
||||
func (c *componentData) FatalDepth(depth int, args ...any) {
|
||||
args = append([]any{"[" + string(c.name) + "]"}, args...)
|
||||
grpclog.FatalDepth(depth+1, args...)
|
||||
}
|
||||
|
||||
func (c *componentData) Info(args ...interface{}) {
|
||||
func (c *componentData) Info(args ...any) {
|
||||
c.InfoDepth(1, args...)
|
||||
}
|
||||
|
||||
func (c *componentData) Warning(args ...interface{}) {
|
||||
func (c *componentData) Warning(args ...any) {
|
||||
c.WarningDepth(1, args...)
|
||||
}
|
||||
|
||||
func (c *componentData) Error(args ...interface{}) {
|
||||
func (c *componentData) Error(args ...any) {
|
||||
c.ErrorDepth(1, args...)
|
||||
}
|
||||
|
||||
func (c *componentData) Fatal(args ...interface{}) {
|
||||
func (c *componentData) Fatal(args ...any) {
|
||||
c.FatalDepth(1, args...)
|
||||
}
|
||||
|
||||
func (c *componentData) Infof(format string, args ...interface{}) {
|
||||
func (c *componentData) Infof(format string, args ...any) {
|
||||
c.InfoDepth(1, fmt.Sprintf(format, args...))
|
||||
}
|
||||
|
||||
func (c *componentData) Warningf(format string, args ...interface{}) {
|
||||
func (c *componentData) Warningf(format string, args ...any) {
|
||||
c.WarningDepth(1, fmt.Sprintf(format, args...))
|
||||
}
|
||||
|
||||
func (c *componentData) Errorf(format string, args ...interface{}) {
|
||||
func (c *componentData) Errorf(format string, args ...any) {
|
||||
c.ErrorDepth(1, fmt.Sprintf(format, args...))
|
||||
}
|
||||
|
||||
func (c *componentData) Fatalf(format string, args ...interface{}) {
|
||||
func (c *componentData) Fatalf(format string, args ...any) {
|
||||
c.FatalDepth(1, fmt.Sprintf(format, args...))
|
||||
}
|
||||
|
||||
func (c *componentData) Infoln(args ...interface{}) {
|
||||
func (c *componentData) Infoln(args ...any) {
|
||||
c.InfoDepth(1, args...)
|
||||
}
|
||||
|
||||
func (c *componentData) Warningln(args ...interface{}) {
|
||||
func (c *componentData) Warningln(args ...any) {
|
||||
c.WarningDepth(1, args...)
|
||||
}
|
||||
|
||||
func (c *componentData) Errorln(args ...interface{}) {
|
||||
func (c *componentData) Errorln(args ...any) {
|
||||
c.ErrorDepth(1, args...)
|
||||
}
|
||||
|
||||
func (c *componentData) Fatalln(args ...interface{}) {
|
||||
func (c *componentData) Fatalln(args ...any) {
|
||||
c.FatalDepth(1, args...)
|
||||
}
|
||||
|
||||
|
30
src/runtime/vendor/google.golang.org/grpc/grpclog/grpclog.go
generated
vendored
30
src/runtime/vendor/google.golang.org/grpc/grpclog/grpclog.go
generated
vendored
@ -42,53 +42,53 @@ func V(l int) bool {
|
||||
}
|
||||
|
||||
// Info logs to the INFO log.
|
||||
func Info(args ...interface{}) {
|
||||
func Info(args ...any) {
|
||||
grpclog.Logger.Info(args...)
|
||||
}
|
||||
|
||||
// Infof logs to the INFO log. Arguments are handled in the manner of fmt.Printf.
|
||||
func Infof(format string, args ...interface{}) {
|
||||
func Infof(format string, args ...any) {
|
||||
grpclog.Logger.Infof(format, args...)
|
||||
}
|
||||
|
||||
// Infoln logs to the INFO log. Arguments are handled in the manner of fmt.Println.
|
||||
func Infoln(args ...interface{}) {
|
||||
func Infoln(args ...any) {
|
||||
grpclog.Logger.Infoln(args...)
|
||||
}
|
||||
|
||||
// Warning logs to the WARNING log.
|
||||
func Warning(args ...interface{}) {
|
||||
func Warning(args ...any) {
|
||||
grpclog.Logger.Warning(args...)
|
||||
}
|
||||
|
||||
// Warningf logs to the WARNING log. Arguments are handled in the manner of fmt.Printf.
|
||||
func Warningf(format string, args ...interface{}) {
|
||||
func Warningf(format string, args ...any) {
|
||||
grpclog.Logger.Warningf(format, args...)
|
||||
}
|
||||
|
||||
// Warningln logs to the WARNING log. Arguments are handled in the manner of fmt.Println.
|
||||
func Warningln(args ...interface{}) {
|
||||
func Warningln(args ...any) {
|
||||
grpclog.Logger.Warningln(args...)
|
||||
}
|
||||
|
||||
// Error logs to the ERROR log.
|
||||
func Error(args ...interface{}) {
|
||||
func Error(args ...any) {
|
||||
grpclog.Logger.Error(args...)
|
||||
}
|
||||
|
||||
// Errorf logs to the ERROR log. Arguments are handled in the manner of fmt.Printf.
|
||||
func Errorf(format string, args ...interface{}) {
|
||||
func Errorf(format string, args ...any) {
|
||||
grpclog.Logger.Errorf(format, args...)
|
||||
}
|
||||
|
||||
// Errorln logs to the ERROR log. Arguments are handled in the manner of fmt.Println.
|
||||
func Errorln(args ...interface{}) {
|
||||
func Errorln(args ...any) {
|
||||
grpclog.Logger.Errorln(args...)
|
||||
}
|
||||
|
||||
// Fatal logs to the FATAL log. Arguments are handled in the manner of fmt.Print.
|
||||
// It calls os.Exit() with exit code 1.
|
||||
func Fatal(args ...interface{}) {
|
||||
func Fatal(args ...any) {
|
||||
grpclog.Logger.Fatal(args...)
|
||||
// Make sure fatal logs will exit.
|
||||
os.Exit(1)
|
||||
@ -96,7 +96,7 @@ func Fatal(args ...interface{}) {
|
||||
|
||||
// Fatalf logs to the FATAL log. Arguments are handled in the manner of fmt.Printf.
|
||||
// It calls os.Exit() with exit code 1.
|
||||
func Fatalf(format string, args ...interface{}) {
|
||||
func Fatalf(format string, args ...any) {
|
||||
grpclog.Logger.Fatalf(format, args...)
|
||||
// Make sure fatal logs will exit.
|
||||
os.Exit(1)
|
||||
@ -104,7 +104,7 @@ func Fatalf(format string, args ...interface{}) {
|
||||
|
||||
// Fatalln logs to the FATAL log. Arguments are handled in the manner of fmt.Println.
|
||||
// It calle os.Exit()) with exit code 1.
|
||||
func Fatalln(args ...interface{}) {
|
||||
func Fatalln(args ...any) {
|
||||
grpclog.Logger.Fatalln(args...)
|
||||
// Make sure fatal logs will exit.
|
||||
os.Exit(1)
|
||||
@ -113,20 +113,20 @@ func Fatalln(args ...interface{}) {
|
||||
// Print prints to the logger. Arguments are handled in the manner of fmt.Print.
|
||||
//
|
||||
// Deprecated: use Info.
|
||||
func Print(args ...interface{}) {
|
||||
func Print(args ...any) {
|
||||
grpclog.Logger.Info(args...)
|
||||
}
|
||||
|
||||
// Printf prints to the logger. Arguments are handled in the manner of fmt.Printf.
|
||||
//
|
||||
// Deprecated: use Infof.
|
||||
func Printf(format string, args ...interface{}) {
|
||||
func Printf(format string, args ...any) {
|
||||
grpclog.Logger.Infof(format, args...)
|
||||
}
|
||||
|
||||
// Println prints to the logger. Arguments are handled in the manner of fmt.Println.
|
||||
//
|
||||
// Deprecated: use Infoln.
|
||||
func Println(args ...interface{}) {
|
||||
func Println(args ...any) {
|
||||
grpclog.Logger.Infoln(args...)
|
||||
}
|
||||
|
30
src/runtime/vendor/google.golang.org/grpc/grpclog/logger.go
generated
vendored
30
src/runtime/vendor/google.golang.org/grpc/grpclog/logger.go
generated
vendored
@ -24,12 +24,12 @@ import "google.golang.org/grpc/internal/grpclog"
|
||||
//
|
||||
// Deprecated: use LoggerV2.
|
||||
type Logger interface {
|
||||
Fatal(args ...interface{})
|
||||
Fatalf(format string, args ...interface{})
|
||||
Fatalln(args ...interface{})
|
||||
Print(args ...interface{})
|
||||
Printf(format string, args ...interface{})
|
||||
Println(args ...interface{})
|
||||
Fatal(args ...any)
|
||||
Fatalf(format string, args ...any)
|
||||
Fatalln(args ...any)
|
||||
Print(args ...any)
|
||||
Printf(format string, args ...any)
|
||||
Println(args ...any)
|
||||
}
|
||||
|
||||
// SetLogger sets the logger that is used in grpc. Call only from
|
||||
@ -45,39 +45,39 @@ type loggerWrapper struct {
|
||||
Logger
|
||||
}
|
||||
|
||||
func (g *loggerWrapper) Info(args ...interface{}) {
|
||||
func (g *loggerWrapper) Info(args ...any) {
|
||||
g.Logger.Print(args...)
|
||||
}
|
||||
|
||||
func (g *loggerWrapper) Infoln(args ...interface{}) {
|
||||
func (g *loggerWrapper) Infoln(args ...any) {
|
||||
g.Logger.Println(args...)
|
||||
}
|
||||
|
||||
func (g *loggerWrapper) Infof(format string, args ...interface{}) {
|
||||
func (g *loggerWrapper) Infof(format string, args ...any) {
|
||||
g.Logger.Printf(format, args...)
|
||||
}
|
||||
|
||||
func (g *loggerWrapper) Warning(args ...interface{}) {
|
||||
func (g *loggerWrapper) Warning(args ...any) {
|
||||
g.Logger.Print(args...)
|
||||
}
|
||||
|
||||
func (g *loggerWrapper) Warningln(args ...interface{}) {
|
||||
func (g *loggerWrapper) Warningln(args ...any) {
|
||||
g.Logger.Println(args...)
|
||||
}
|
||||
|
||||
func (g *loggerWrapper) Warningf(format string, args ...interface{}) {
|
||||
func (g *loggerWrapper) Warningf(format string, args ...any) {
|
||||
g.Logger.Printf(format, args...)
|
||||
}
|
||||
|
||||
func (g *loggerWrapper) Error(args ...interface{}) {
|
||||
func (g *loggerWrapper) Error(args ...any) {
|
||||
g.Logger.Print(args...)
|
||||
}
|
||||
|
||||
func (g *loggerWrapper) Errorln(args ...interface{}) {
|
||||
func (g *loggerWrapper) Errorln(args ...any) {
|
||||
g.Logger.Println(args...)
|
||||
}
|
||||
|
||||
func (g *loggerWrapper) Errorf(format string, args ...interface{}) {
|
||||
func (g *loggerWrapper) Errorf(format string, args ...any) {
|
||||
g.Logger.Printf(format, args...)
|
||||
}
|
||||
|
||||
|
65
src/runtime/vendor/google.golang.org/grpc/grpclog/loggerv2.go
generated
vendored
65
src/runtime/vendor/google.golang.org/grpc/grpclog/loggerv2.go
generated
vendored
@ -22,7 +22,6 @@ import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"os"
|
||||
"strconv"
|
||||
@ -34,35 +33,35 @@ import (
|
||||
// LoggerV2 does underlying logging work for grpclog.
|
||||
type LoggerV2 interface {
|
||||
// Info logs to INFO log. Arguments are handled in the manner of fmt.Print.
|
||||
Info(args ...interface{})
|
||||
Info(args ...any)
|
||||
// Infoln logs to INFO log. Arguments are handled in the manner of fmt.Println.
|
||||
Infoln(args ...interface{})
|
||||
Infoln(args ...any)
|
||||
// Infof logs to INFO log. Arguments are handled in the manner of fmt.Printf.
|
||||
Infof(format string, args ...interface{})
|
||||
Infof(format string, args ...any)
|
||||
// Warning logs to WARNING log. Arguments are handled in the manner of fmt.Print.
|
||||
Warning(args ...interface{})
|
||||
Warning(args ...any)
|
||||
// Warningln logs to WARNING log. Arguments are handled in the manner of fmt.Println.
|
||||
Warningln(args ...interface{})
|
||||
Warningln(args ...any)
|
||||
// Warningf logs to WARNING log. Arguments are handled in the manner of fmt.Printf.
|
||||
Warningf(format string, args ...interface{})
|
||||
Warningf(format string, args ...any)
|
||||
// Error logs to ERROR log. Arguments are handled in the manner of fmt.Print.
|
||||
Error(args ...interface{})
|
||||
Error(args ...any)
|
||||
// Errorln logs to ERROR log. Arguments are handled in the manner of fmt.Println.
|
||||
Errorln(args ...interface{})
|
||||
Errorln(args ...any)
|
||||
// Errorf logs to ERROR log. Arguments are handled in the manner of fmt.Printf.
|
||||
Errorf(format string, args ...interface{})
|
||||
Errorf(format string, args ...any)
|
||||
// Fatal logs to ERROR log. Arguments are handled in the manner of fmt.Print.
|
||||
// gRPC ensures that all Fatal logs will exit with os.Exit(1).
|
||||
// Implementations may also call os.Exit() with a non-zero exit code.
|
||||
Fatal(args ...interface{})
|
||||
Fatal(args ...any)
|
||||
// Fatalln logs to ERROR log. Arguments are handled in the manner of fmt.Println.
|
||||
// gRPC ensures that all Fatal logs will exit with os.Exit(1).
|
||||
// Implementations may also call os.Exit() with a non-zero exit code.
|
||||
Fatalln(args ...interface{})
|
||||
Fatalln(args ...any)
|
||||
// Fatalf logs to ERROR log. Arguments are handled in the manner of fmt.Printf.
|
||||
// gRPC ensures that all Fatal logs will exit with os.Exit(1).
|
||||
// Implementations may also call os.Exit() with a non-zero exit code.
|
||||
Fatalf(format string, args ...interface{})
|
||||
Fatalf(format string, args ...any)
|
||||
// V reports whether verbosity level l is at least the requested verbose level.
|
||||
V(l int) bool
|
||||
}
|
||||
@ -140,9 +139,9 @@ func newLoggerV2WithConfig(infoW, warningW, errorW io.Writer, c loggerV2Config)
|
||||
// newLoggerV2 creates a loggerV2 to be used as default logger.
|
||||
// All logs are written to stderr.
|
||||
func newLoggerV2() LoggerV2 {
|
||||
errorW := ioutil.Discard
|
||||
warningW := ioutil.Discard
|
||||
infoW := ioutil.Discard
|
||||
errorW := io.Discard
|
||||
warningW := io.Discard
|
||||
infoW := io.Discard
|
||||
|
||||
logLevel := os.Getenv("GRPC_GO_LOG_SEVERITY_LEVEL")
|
||||
switch logLevel {
|
||||
@ -183,53 +182,53 @@ func (g *loggerT) output(severity int, s string) {
|
||||
g.m[severity].Output(2, string(b))
|
||||
}
|
||||
|
||||
func (g *loggerT) Info(args ...interface{}) {
|
||||
func (g *loggerT) Info(args ...any) {
|
||||
g.output(infoLog, fmt.Sprint(args...))
|
||||
}
|
||||
|
||||
func (g *loggerT) Infoln(args ...interface{}) {
|
||||
func (g *loggerT) Infoln(args ...any) {
|
||||
g.output(infoLog, fmt.Sprintln(args...))
|
||||
}
|
||||
|
||||
func (g *loggerT) Infof(format string, args ...interface{}) {
|
||||
func (g *loggerT) Infof(format string, args ...any) {
|
||||
g.output(infoLog, fmt.Sprintf(format, args...))
|
||||
}
|
||||
|
||||
func (g *loggerT) Warning(args ...interface{}) {
|
||||
func (g *loggerT) Warning(args ...any) {
|
||||
g.output(warningLog, fmt.Sprint(args...))
|
||||
}
|
||||
|
||||
func (g *loggerT) Warningln(args ...interface{}) {
|
||||
func (g *loggerT) Warningln(args ...any) {
|
||||
g.output(warningLog, fmt.Sprintln(args...))
|
||||
}
|
||||
|
||||
func (g *loggerT) Warningf(format string, args ...interface{}) {
|
||||
func (g *loggerT) Warningf(format string, args ...any) {
|
||||
g.output(warningLog, fmt.Sprintf(format, args...))
|
||||
}
|
||||
|
||||
func (g *loggerT) Error(args ...interface{}) {
|
||||
func (g *loggerT) Error(args ...any) {
|
||||
g.output(errorLog, fmt.Sprint(args...))
|
||||
}
|
||||
|
||||
func (g *loggerT) Errorln(args ...interface{}) {
|
||||
func (g *loggerT) Errorln(args ...any) {
|
||||
g.output(errorLog, fmt.Sprintln(args...))
|
||||
}
|
||||
|
||||
func (g *loggerT) Errorf(format string, args ...interface{}) {
|
||||
func (g *loggerT) Errorf(format string, args ...any) {
|
||||
g.output(errorLog, fmt.Sprintf(format, args...))
|
||||
}
|
||||
|
||||
func (g *loggerT) Fatal(args ...interface{}) {
|
||||
func (g *loggerT) Fatal(args ...any) {
|
||||
g.output(fatalLog, fmt.Sprint(args...))
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
func (g *loggerT) Fatalln(args ...interface{}) {
|
||||
func (g *loggerT) Fatalln(args ...any) {
|
||||
g.output(fatalLog, fmt.Sprintln(args...))
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
func (g *loggerT) Fatalf(format string, args ...interface{}) {
|
||||
func (g *loggerT) Fatalf(format string, args ...any) {
|
||||
g.output(fatalLog, fmt.Sprintf(format, args...))
|
||||
os.Exit(1)
|
||||
}
|
||||
@ -242,18 +241,18 @@ func (g *loggerT) V(l int) bool {
|
||||
// DepthLoggerV2, the below functions will be called with the appropriate stack
|
||||
// depth set for trivial functions the logger may ignore.
|
||||
//
|
||||
// Experimental
|
||||
// # Experimental
|
||||
//
|
||||
// Notice: This type is EXPERIMENTAL and may be changed or removed in a
|
||||
// later release.
|
||||
type DepthLoggerV2 interface {
|
||||
LoggerV2
|
||||
// InfoDepth logs to INFO log at the specified depth. Arguments are handled in the manner of fmt.Println.
|
||||
InfoDepth(depth int, args ...interface{})
|
||||
InfoDepth(depth int, args ...any)
|
||||
// WarningDepth logs to WARNING log at the specified depth. Arguments are handled in the manner of fmt.Println.
|
||||
WarningDepth(depth int, args ...interface{})
|
||||
WarningDepth(depth int, args ...any)
|
||||
// ErrorDepth logs to ERROR log at the specified depth. Arguments are handled in the manner of fmt.Println.
|
||||
ErrorDepth(depth int, args ...interface{})
|
||||
ErrorDepth(depth int, args ...any)
|
||||
// FatalDepth logs to FATAL log at the specified depth. Arguments are handled in the manner of fmt.Println.
|
||||
FatalDepth(depth int, args ...interface{})
|
||||
FatalDepth(depth int, args ...any)
|
||||
}
|
||||
|
9
src/runtime/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go
generated
vendored
9
src/runtime/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go
generated
vendored
@ -17,14 +17,13 @@
|
||||
|
||||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||
// versions:
|
||||
// protoc-gen-go v1.25.0
|
||||
// protoc v3.14.0
|
||||
// protoc-gen-go v1.31.0
|
||||
// protoc v4.22.0
|
||||
// source: grpc/health/v1/health.proto
|
||||
|
||||
package grpc_health_v1
|
||||
|
||||
import (
|
||||
proto "github.com/golang/protobuf/proto"
|
||||
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
|
||||
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
|
||||
reflect "reflect"
|
||||
@ -38,10 +37,6 @@ const (
|
||||
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
|
||||
)
|
||||
|
||||
// This is a compile-time assertion that a sufficiently up-to-date version
|
||||
// of the legacy proto package is being used.
|
||||
const _ = proto.ProtoPackageIsVersion4
|
||||
|
||||
type HealthCheckResponse_ServingStatus int32
|
||||
|
||||
const (
|
||||
|
32
src/runtime/vendor/google.golang.org/grpc/health/grpc_health_v1/health_grpc.pb.go
generated
vendored
32
src/runtime/vendor/google.golang.org/grpc/health/grpc_health_v1/health_grpc.pb.go
generated
vendored
@ -1,7 +1,24 @@
|
||||
// Copyright 2015 The gRPC Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// The canonical version of this proto can be found at
|
||||
// https://github.com/grpc/grpc-proto/blob/master/grpc/health/v1/health.proto
|
||||
|
||||
// Code generated by protoc-gen-go-grpc. DO NOT EDIT.
|
||||
// versions:
|
||||
// - protoc-gen-go-grpc v1.2.0
|
||||
// - protoc v3.14.0
|
||||
// - protoc-gen-go-grpc v1.3.0
|
||||
// - protoc v4.22.0
|
||||
// source: grpc/health/v1/health.proto
|
||||
|
||||
package grpc_health_v1
|
||||
@ -18,6 +35,11 @@ import (
|
||||
// Requires gRPC-Go v1.32.0 or later.
|
||||
const _ = grpc.SupportPackageIsVersion7
|
||||
|
||||
const (
|
||||
Health_Check_FullMethodName = "/grpc.health.v1.Health/Check"
|
||||
Health_Watch_FullMethodName = "/grpc.health.v1.Health/Watch"
|
||||
)
|
||||
|
||||
// HealthClient is the client API for Health service.
|
||||
//
|
||||
// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream.
|
||||
@ -53,7 +75,7 @@ func NewHealthClient(cc grpc.ClientConnInterface) HealthClient {
|
||||
|
||||
func (c *healthClient) Check(ctx context.Context, in *HealthCheckRequest, opts ...grpc.CallOption) (*HealthCheckResponse, error) {
|
||||
out := new(HealthCheckResponse)
|
||||
err := c.cc.Invoke(ctx, "/grpc.health.v1.Health/Check", in, out, opts...)
|
||||
err := c.cc.Invoke(ctx, Health_Check_FullMethodName, in, out, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -61,7 +83,7 @@ func (c *healthClient) Check(ctx context.Context, in *HealthCheckRequest, opts .
|
||||
}
|
||||
|
||||
func (c *healthClient) Watch(ctx context.Context, in *HealthCheckRequest, opts ...grpc.CallOption) (Health_WatchClient, error) {
|
||||
stream, err := c.cc.NewStream(ctx, &Health_ServiceDesc.Streams[0], "/grpc.health.v1.Health/Watch", opts...)
|
||||
stream, err := c.cc.NewStream(ctx, &Health_ServiceDesc.Streams[0], Health_Watch_FullMethodName, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -149,7 +171,7 @@ func _Health_Check_Handler(srv interface{}, ctx context.Context, dec func(interf
|
||||
}
|
||||
info := &grpc.UnaryServerInfo{
|
||||
Server: srv,
|
||||
FullMethod: "/grpc.health.v1.Health/Check",
|
||||
FullMethod: Health_Check_FullMethodName,
|
||||
}
|
||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||
return srv.(HealthServer).Check(ctx, req.(*HealthCheckRequest))
|
||||
|
12
src/runtime/vendor/google.golang.org/grpc/interceptor.go
generated
vendored
12
src/runtime/vendor/google.golang.org/grpc/interceptor.go
generated
vendored
@ -23,7 +23,7 @@ import (
|
||||
)
|
||||
|
||||
// UnaryInvoker is called by UnaryClientInterceptor to complete RPCs.
|
||||
type UnaryInvoker func(ctx context.Context, method string, req, reply interface{}, cc *ClientConn, opts ...CallOption) error
|
||||
type UnaryInvoker func(ctx context.Context, method string, req, reply any, cc *ClientConn, opts ...CallOption) error
|
||||
|
||||
// UnaryClientInterceptor intercepts the execution of a unary RPC on the client.
|
||||
// Unary interceptors can be specified as a DialOption, using
|
||||
@ -40,7 +40,7 @@ type UnaryInvoker func(ctx context.Context, method string, req, reply interface{
|
||||
// defaults from the ClientConn as well as per-call options.
|
||||
//
|
||||
// The returned error must be compatible with the status package.
|
||||
type UnaryClientInterceptor func(ctx context.Context, method string, req, reply interface{}, cc *ClientConn, invoker UnaryInvoker, opts ...CallOption) error
|
||||
type UnaryClientInterceptor func(ctx context.Context, method string, req, reply any, cc *ClientConn, invoker UnaryInvoker, opts ...CallOption) error
|
||||
|
||||
// Streamer is called by StreamClientInterceptor to create a ClientStream.
|
||||
type Streamer func(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, opts ...CallOption) (ClientStream, error)
|
||||
@ -66,7 +66,7 @@ type StreamClientInterceptor func(ctx context.Context, desc *StreamDesc, cc *Cli
|
||||
// server side. All per-rpc information may be mutated by the interceptor.
|
||||
type UnaryServerInfo struct {
|
||||
// Server is the service implementation the user provides. This is read-only.
|
||||
Server interface{}
|
||||
Server any
|
||||
// FullMethod is the full RPC method string, i.e., /package.service/method.
|
||||
FullMethod string
|
||||
}
|
||||
@ -78,13 +78,13 @@ type UnaryServerInfo struct {
|
||||
// status package, or be one of the context errors. Otherwise, gRPC will use
|
||||
// codes.Unknown as the status code and err.Error() as the status message of the
|
||||
// RPC.
|
||||
type UnaryHandler func(ctx context.Context, req interface{}) (interface{}, error)
|
||||
type UnaryHandler func(ctx context.Context, req any) (any, error)
|
||||
|
||||
// UnaryServerInterceptor provides a hook to intercept the execution of a unary RPC on the server. info
|
||||
// contains all the information of this RPC the interceptor can operate on. And handler is the wrapper
|
||||
// of the service method implementation. It is the responsibility of the interceptor to invoke handler
|
||||
// to complete the RPC.
|
||||
type UnaryServerInterceptor func(ctx context.Context, req interface{}, info *UnaryServerInfo, handler UnaryHandler) (resp interface{}, err error)
|
||||
type UnaryServerInterceptor func(ctx context.Context, req any, info *UnaryServerInfo, handler UnaryHandler) (resp any, err error)
|
||||
|
||||
// StreamServerInfo consists of various information about a streaming RPC on
|
||||
// server side. All per-rpc information may be mutated by the interceptor.
|
||||
@ -101,4 +101,4 @@ type StreamServerInfo struct {
|
||||
// info contains all the information of this RPC the interceptor can operate on. And handler is the
|
||||
// service method implementation. It is the responsibility of the interceptor to invoke handler to
|
||||
// complete the RPC.
|
||||
type StreamServerInterceptor func(srv interface{}, ss ServerStream, info *StreamServerInfo, handler StreamHandler) error
|
||||
type StreamServerInterceptor func(srv any, ss ServerStream, info *StreamServerInfo, handler StreamHandler) error
|
||||
|
@ -193,13 +193,15 @@ func (gsb *Balancer) ExitIdle() {
|
||||
ei.ExitIdle()
|
||||
return
|
||||
}
|
||||
gsb.mu.Lock()
|
||||
defer gsb.mu.Unlock()
|
||||
for sc := range balToUpdate.subconns {
|
||||
sc.Connect()
|
||||
}
|
||||
}
|
||||
|
||||
// UpdateSubConnState forwards the update to the appropriate child.
|
||||
func (gsb *Balancer) UpdateSubConnState(sc balancer.SubConn, state balancer.SubConnState) {
|
||||
// updateSubConnState forwards the update to the appropriate child.
|
||||
func (gsb *Balancer) updateSubConnState(sc balancer.SubConn, state balancer.SubConnState, cb func(balancer.SubConnState)) {
|
||||
gsb.currentMu.Lock()
|
||||
defer gsb.currentMu.Unlock()
|
||||
gsb.mu.Lock()
|
||||
@ -212,13 +214,26 @@ func (gsb *Balancer) UpdateSubConnState(sc balancer.SubConn, state balancer.SubC
|
||||
} else if gsb.balancerPending != nil && gsb.balancerPending.subconns[sc] {
|
||||
balToUpdate = gsb.balancerPending
|
||||
}
|
||||
gsb.mu.Unlock()
|
||||
if balToUpdate == nil {
|
||||
// SubConn belonged to a stale lb policy that has not yet fully closed,
|
||||
// or the balancer was already closed.
|
||||
gsb.mu.Unlock()
|
||||
return
|
||||
}
|
||||
balToUpdate.UpdateSubConnState(sc, state)
|
||||
if state.ConnectivityState == connectivity.Shutdown {
|
||||
delete(balToUpdate.subconns, sc)
|
||||
}
|
||||
gsb.mu.Unlock()
|
||||
if cb != nil {
|
||||
cb(state)
|
||||
} else {
|
||||
balToUpdate.UpdateSubConnState(sc, state)
|
||||
}
|
||||
}
|
||||
|
||||
// UpdateSubConnState forwards the update to the appropriate child.
|
||||
func (gsb *Balancer) UpdateSubConnState(sc balancer.SubConn, state balancer.SubConnState) {
|
||||
gsb.updateSubConnState(sc, state, nil)
|
||||
}
|
||||
|
||||
// Close closes any active child balancers.
|
||||
@ -240,7 +255,7 @@ func (gsb *Balancer) Close() {
|
||||
//
|
||||
// It implements the balancer.ClientConn interface and is passed down in that
|
||||
// capacity to the wrapped balancer. It maintains a set of subConns created by
|
||||
// the wrapped balancer and calls from the latter to create/update/remove
|
||||
// the wrapped balancer and calls from the latter to create/update/shutdown
|
||||
// SubConns update this set before being forwarded to the parent ClientConn.
|
||||
// State updates from the wrapped balancer can result in invocation of the
|
||||
// graceful switch logic.
|
||||
@ -252,21 +267,10 @@ type balancerWrapper struct {
|
||||
subconns map[balancer.SubConn]bool // subconns created by this balancer
|
||||
}
|
||||
|
||||
func (bw *balancerWrapper) UpdateSubConnState(sc balancer.SubConn, state balancer.SubConnState) {
|
||||
if state.ConnectivityState == connectivity.Shutdown {
|
||||
bw.gsb.mu.Lock()
|
||||
delete(bw.subconns, sc)
|
||||
bw.gsb.mu.Unlock()
|
||||
}
|
||||
// There is no need to protect this read with a mutex, as the write to the
|
||||
// Balancer field happens in SwitchTo, which completes before this can be
|
||||
// called.
|
||||
bw.Balancer.UpdateSubConnState(sc, state)
|
||||
}
|
||||
|
||||
// Close closes the underlying LB policy and removes the subconns it created. bw
|
||||
// must not be referenced via balancerCurrent or balancerPending in gsb when
|
||||
// called. gsb.mu must not be held. Does not panic with a nil receiver.
|
||||
// Close closes the underlying LB policy and shuts down the subconns it
|
||||
// created. bw must not be referenced via balancerCurrent or balancerPending in
|
||||
// gsb when called. gsb.mu must not be held. Does not panic with a nil
|
||||
// receiver.
|
||||
func (bw *balancerWrapper) Close() {
|
||||
// before Close is called.
|
||||
if bw == nil {
|
||||
@ -279,7 +283,7 @@ func (bw *balancerWrapper) Close() {
|
||||
bw.Balancer.Close()
|
||||
bw.gsb.mu.Lock()
|
||||
for sc := range bw.subconns {
|
||||
bw.gsb.cc.RemoveSubConn(sc)
|
||||
sc.Shutdown()
|
||||
}
|
||||
bw.gsb.mu.Unlock()
|
||||
}
|
||||
@ -333,13 +337,16 @@ func (bw *balancerWrapper) NewSubConn(addrs []resolver.Address, opts balancer.Ne
|
||||
}
|
||||
bw.gsb.mu.Unlock()
|
||||
|
||||
var sc balancer.SubConn
|
||||
oldListener := opts.StateListener
|
||||
opts.StateListener = func(state balancer.SubConnState) { bw.gsb.updateSubConnState(sc, state, oldListener) }
|
||||
sc, err := bw.gsb.cc.NewSubConn(addrs, opts)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
bw.gsb.mu.Lock()
|
||||
if !bw.gsb.balancerCurrentOrPending(bw) { // balancer was closed during this call
|
||||
bw.gsb.cc.RemoveSubConn(sc)
|
||||
sc.Shutdown()
|
||||
bw.gsb.mu.Unlock()
|
||||
return nil, fmt.Errorf("%T at address %p that called NewSubConn is deleted", bw, bw)
|
||||
}
|
||||
@ -358,13 +365,9 @@ func (bw *balancerWrapper) ResolveNow(opts resolver.ResolveNowOptions) {
|
||||
}
|
||||
|
||||
func (bw *balancerWrapper) RemoveSubConn(sc balancer.SubConn) {
|
||||
bw.gsb.mu.Lock()
|
||||
if !bw.gsb.balancerCurrentOrPending(bw) {
|
||||
bw.gsb.mu.Unlock()
|
||||
return
|
||||
}
|
||||
bw.gsb.mu.Unlock()
|
||||
bw.gsb.cc.RemoveSubConn(sc)
|
||||
// Note: existing third party balancers may call this, so it must remain
|
||||
// until RemoveSubConn is fully removed.
|
||||
sc.Shutdown()
|
||||
}
|
||||
|
||||
func (bw *balancerWrapper) UpdateAddresses(sc balancer.SubConn, addrs []resolver.Address) {
|
||||
|
4
src/runtime/vendor/google.golang.org/grpc/internal/balancerload/load.go
generated
vendored
4
src/runtime/vendor/google.golang.org/grpc/internal/balancerload/load.go
generated
vendored
@ -25,7 +25,7 @@ import (
|
||||
// Parser converts loads from metadata into a concrete type.
|
||||
type Parser interface {
|
||||
// Parse parses loads from metadata.
|
||||
Parse(md metadata.MD) interface{}
|
||||
Parse(md metadata.MD) any
|
||||
}
|
||||
|
||||
var parser Parser
|
||||
@ -38,7 +38,7 @@ func SetParser(lr Parser) {
|
||||
}
|
||||
|
||||
// Parse calls parser.Read().
|
||||
func Parse(md metadata.MD) interface{} {
|
||||
func Parse(md metadata.MD) any {
|
||||
if parser == nil {
|
||||
return nil
|
||||
}
|
||||
|
35
src/runtime/vendor/google.golang.org/grpc/internal/binarylog/binarylog.go
generated
vendored
35
src/runtime/vendor/google.golang.org/grpc/internal/binarylog/binarylog.go
generated
vendored
@ -28,8 +28,13 @@ import (
|
||||
"google.golang.org/grpc/internal/grpcutil"
|
||||
)
|
||||
|
||||
// Logger is the global binary logger. It can be used to get binary logger for
|
||||
// each method.
|
||||
var grpclogLogger = grpclog.Component("binarylog")
|
||||
|
||||
// Logger specifies MethodLoggers for method names with a Log call that
|
||||
// takes a context.
|
||||
//
|
||||
// This is used in the 1.0 release of gcp/observability, and thus must not be
|
||||
// deleted or changed.
|
||||
type Logger interface {
|
||||
GetMethodLogger(methodName string) MethodLogger
|
||||
}
|
||||
@ -37,30 +42,28 @@ type Logger interface {
|
||||
// binLogger is the global binary logger for the binary. One of this should be
|
||||
// built at init time from the configuration (environment variable or flags).
|
||||
//
|
||||
// It is used to get a methodLogger for each individual method.
|
||||
// It is used to get a MethodLogger for each individual method.
|
||||
var binLogger Logger
|
||||
|
||||
var grpclogLogger = grpclog.Component("binarylog")
|
||||
|
||||
// SetLogger sets the binarg logger.
|
||||
// SetLogger sets the binary logger.
|
||||
//
|
||||
// Only call this at init time.
|
||||
func SetLogger(l Logger) {
|
||||
binLogger = l
|
||||
}
|
||||
|
||||
// GetLogger gets the binarg logger.
|
||||
// GetLogger gets the binary logger.
|
||||
//
|
||||
// Only call this at init time.
|
||||
func GetLogger() Logger {
|
||||
return binLogger
|
||||
}
|
||||
|
||||
// GetMethodLogger returns the methodLogger for the given methodName.
|
||||
// GetMethodLogger returns the MethodLogger for the given methodName.
|
||||
//
|
||||
// methodName should be in the format of "/service/method".
|
||||
//
|
||||
// Each methodLogger returned by this method is a new instance. This is to
|
||||
// Each MethodLogger returned by this method is a new instance. This is to
|
||||
// generate sequence id within the call.
|
||||
func GetMethodLogger(methodName string) MethodLogger {
|
||||
if binLogger == nil {
|
||||
@ -117,7 +120,7 @@ func (l *logger) setDefaultMethodLogger(ml *MethodLoggerConfig) error {
|
||||
|
||||
// Set method logger for "service/*".
|
||||
//
|
||||
// New methodLogger with same service overrides the old one.
|
||||
// New MethodLogger with same service overrides the old one.
|
||||
func (l *logger) setServiceMethodLogger(service string, ml *MethodLoggerConfig) error {
|
||||
if _, ok := l.config.Services[service]; ok {
|
||||
return fmt.Errorf("conflicting service rules for service %v found", service)
|
||||
@ -131,7 +134,7 @@ func (l *logger) setServiceMethodLogger(service string, ml *MethodLoggerConfig)
|
||||
|
||||
// Set method logger for "service/method".
|
||||
//
|
||||
// New methodLogger with same method overrides the old one.
|
||||
// New MethodLogger with same method overrides the old one.
|
||||
func (l *logger) setMethodMethodLogger(method string, ml *MethodLoggerConfig) error {
|
||||
if _, ok := l.config.Blacklist[method]; ok {
|
||||
return fmt.Errorf("conflicting blacklist rules for method %v found", method)
|
||||
@ -161,11 +164,11 @@ func (l *logger) setBlacklist(method string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// getMethodLogger returns the methodLogger for the given methodName.
|
||||
// getMethodLogger returns the MethodLogger for the given methodName.
|
||||
//
|
||||
// methodName should be in the format of "/service/method".
|
||||
//
|
||||
// Each methodLogger returned by this method is a new instance. This is to
|
||||
// Each MethodLogger returned by this method is a new instance. This is to
|
||||
// generate sequence id within the call.
|
||||
func (l *logger) GetMethodLogger(methodName string) MethodLogger {
|
||||
s, m, err := grpcutil.ParseMethod(methodName)
|
||||
@ -174,16 +177,16 @@ func (l *logger) GetMethodLogger(methodName string) MethodLogger {
|
||||
return nil
|
||||
}
|
||||
if ml, ok := l.config.Methods[s+"/"+m]; ok {
|
||||
return newMethodLogger(ml.Header, ml.Message)
|
||||
return NewTruncatingMethodLogger(ml.Header, ml.Message)
|
||||
}
|
||||
if _, ok := l.config.Blacklist[s+"/"+m]; ok {
|
||||
return nil
|
||||
}
|
||||
if ml, ok := l.config.Services[s]; ok {
|
||||
return newMethodLogger(ml.Header, ml.Message)
|
||||
return NewTruncatingMethodLogger(ml.Header, ml.Message)
|
||||
}
|
||||
if l.config.All == nil {
|
||||
return nil
|
||||
}
|
||||
return newMethodLogger(l.config.All.Header, l.config.All.Message)
|
||||
return NewTruncatingMethodLogger(l.config.All.Header, l.config.All.Message)
|
||||
}
|
||||
|
20
src/runtime/vendor/google.golang.org/grpc/internal/binarylog/env_config.go
generated
vendored
20
src/runtime/vendor/google.golang.org/grpc/internal/binarylog/env_config.go
generated
vendored
@ -30,15 +30,15 @@ import (
|
||||
// to build a new logger and assign it to binarylog.Logger.
|
||||
//
|
||||
// Example filter config strings:
|
||||
// - "" Nothing will be logged
|
||||
// - "*" All headers and messages will be fully logged.
|
||||
// - "*{h}" Only headers will be logged.
|
||||
// - "*{m:256}" Only the first 256 bytes of each message will be logged.
|
||||
// - "Foo/*" Logs every method in service Foo
|
||||
// - "Foo/*,-Foo/Bar" Logs every method in service Foo except method /Foo/Bar
|
||||
// - "Foo/*,Foo/Bar{m:256}" Logs the first 256 bytes of each message in method
|
||||
// /Foo/Bar, logs all headers and messages in every other method in service
|
||||
// Foo.
|
||||
// - "" Nothing will be logged
|
||||
// - "*" All headers and messages will be fully logged.
|
||||
// - "*{h}" Only headers will be logged.
|
||||
// - "*{m:256}" Only the first 256 bytes of each message will be logged.
|
||||
// - "Foo/*" Logs every method in service Foo
|
||||
// - "Foo/*,-Foo/Bar" Logs every method in service Foo except method /Foo/Bar
|
||||
// - "Foo/*,Foo/Bar{m:256}" Logs the first 256 bytes of each message in method
|
||||
// /Foo/Bar, logs all headers and messages in every other method in service
|
||||
// Foo.
|
||||
//
|
||||
// If two configs exist for one certain method or service, the one specified
|
||||
// later overrides the previous config.
|
||||
@ -57,7 +57,7 @@ func NewLoggerFromConfigString(s string) Logger {
|
||||
return l
|
||||
}
|
||||
|
||||
// fillMethodLoggerWithConfigString parses config, creates methodLogger and adds
|
||||
// fillMethodLoggerWithConfigString parses config, creates TruncatingMethodLogger and adds
|
||||
// it to the right map in the logger.
|
||||
func (l *logger) fillMethodLoggerWithConfigString(config string) error {
|
||||
// "" is invalid.
|
||||
|
157
src/runtime/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go
generated
vendored
157
src/runtime/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go
generated
vendored
@ -19,6 +19,7 @@
|
||||
package binarylog
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net"
|
||||
"strings"
|
||||
"sync/atomic"
|
||||
@ -26,7 +27,7 @@ import (
|
||||
|
||||
"github.com/golang/protobuf/proto"
|
||||
"github.com/golang/protobuf/ptypes"
|
||||
pb "google.golang.org/grpc/binarylog/grpc_binarylog_v1"
|
||||
binlogpb "google.golang.org/grpc/binarylog/grpc_binarylog_v1"
|
||||
"google.golang.org/grpc/metadata"
|
||||
"google.golang.org/grpc/status"
|
||||
)
|
||||
@ -48,11 +49,16 @@ func (g *callIDGenerator) reset() {
|
||||
var idGen callIDGenerator
|
||||
|
||||
// MethodLogger is the sub-logger for each method.
|
||||
//
|
||||
// This is used in the 1.0 release of gcp/observability, and thus must not be
|
||||
// deleted or changed.
|
||||
type MethodLogger interface {
|
||||
Log(LogEntryConfig)
|
||||
Log(context.Context, LogEntryConfig)
|
||||
}
|
||||
|
||||
type methodLogger struct {
|
||||
// TruncatingMethodLogger is a method logger that truncates headers and messages
|
||||
// based on configured fields.
|
||||
type TruncatingMethodLogger struct {
|
||||
headerMaxLen, messageMaxLen uint64
|
||||
|
||||
callID uint64
|
||||
@ -61,8 +67,12 @@ type methodLogger struct {
|
||||
sink Sink // TODO(blog): make this plugable.
|
||||
}
|
||||
|
||||
func newMethodLogger(h, m uint64) *methodLogger {
|
||||
return &methodLogger{
|
||||
// NewTruncatingMethodLogger returns a new truncating method logger.
|
||||
//
|
||||
// This is used in the 1.0 release of gcp/observability, and thus must not be
|
||||
// deleted or changed.
|
||||
func NewTruncatingMethodLogger(h, m uint64) *TruncatingMethodLogger {
|
||||
return &TruncatingMethodLogger{
|
||||
headerMaxLen: h,
|
||||
messageMaxLen: m,
|
||||
|
||||
@ -75,8 +85,8 @@ func newMethodLogger(h, m uint64) *methodLogger {
|
||||
|
||||
// Build is an internal only method for building the proto message out of the
|
||||
// input event. It's made public to enable other library to reuse as much logic
|
||||
// in methodLogger as possible.
|
||||
func (ml *methodLogger) Build(c LogEntryConfig) *pb.GrpcLogEntry {
|
||||
// in TruncatingMethodLogger as possible.
|
||||
func (ml *TruncatingMethodLogger) Build(c LogEntryConfig) *binlogpb.GrpcLogEntry {
|
||||
m := c.toProto()
|
||||
timestamp, _ := ptypes.TimestampProto(time.Now())
|
||||
m.Timestamp = timestamp
|
||||
@ -84,22 +94,22 @@ func (ml *methodLogger) Build(c LogEntryConfig) *pb.GrpcLogEntry {
|
||||
m.SequenceIdWithinCall = ml.idWithinCallGen.next()
|
||||
|
||||
switch pay := m.Payload.(type) {
|
||||
case *pb.GrpcLogEntry_ClientHeader:
|
||||
case *binlogpb.GrpcLogEntry_ClientHeader:
|
||||
m.PayloadTruncated = ml.truncateMetadata(pay.ClientHeader.GetMetadata())
|
||||
case *pb.GrpcLogEntry_ServerHeader:
|
||||
case *binlogpb.GrpcLogEntry_ServerHeader:
|
||||
m.PayloadTruncated = ml.truncateMetadata(pay.ServerHeader.GetMetadata())
|
||||
case *pb.GrpcLogEntry_Message:
|
||||
case *binlogpb.GrpcLogEntry_Message:
|
||||
m.PayloadTruncated = ml.truncateMessage(pay.Message)
|
||||
}
|
||||
return m
|
||||
}
|
||||
|
||||
// Log creates a proto binary log entry, and logs it to the sink.
|
||||
func (ml *methodLogger) Log(c LogEntryConfig) {
|
||||
func (ml *TruncatingMethodLogger) Log(ctx context.Context, c LogEntryConfig) {
|
||||
ml.sink.Write(ml.Build(c))
|
||||
}
|
||||
|
||||
func (ml *methodLogger) truncateMetadata(mdPb *pb.Metadata) (truncated bool) {
|
||||
func (ml *TruncatingMethodLogger) truncateMetadata(mdPb *binlogpb.Metadata) (truncated bool) {
|
||||
if ml.headerMaxLen == maxUInt {
|
||||
return false
|
||||
}
|
||||
@ -118,7 +128,7 @@ func (ml *methodLogger) truncateMetadata(mdPb *pb.Metadata) (truncated bool) {
|
||||
// but not counted towards the size limit.
|
||||
continue
|
||||
}
|
||||
currentEntryLen := uint64(len(entry.Value))
|
||||
currentEntryLen := uint64(len(entry.GetKey())) + uint64(len(entry.GetValue()))
|
||||
if currentEntryLen > bytesLimit {
|
||||
break
|
||||
}
|
||||
@ -129,7 +139,7 @@ func (ml *methodLogger) truncateMetadata(mdPb *pb.Metadata) (truncated bool) {
|
||||
return truncated
|
||||
}
|
||||
|
||||
func (ml *methodLogger) truncateMessage(msgPb *pb.Message) (truncated bool) {
|
||||
func (ml *TruncatingMethodLogger) truncateMessage(msgPb *binlogpb.Message) (truncated bool) {
|
||||
if ml.messageMaxLen == maxUInt {
|
||||
return false
|
||||
}
|
||||
@ -141,8 +151,11 @@ func (ml *methodLogger) truncateMessage(msgPb *pb.Message) (truncated bool) {
|
||||
}
|
||||
|
||||
// LogEntryConfig represents the configuration for binary log entry.
|
||||
//
|
||||
// This is used in the 1.0 release of gcp/observability, and thus must not be
|
||||
// deleted or changed.
|
||||
type LogEntryConfig interface {
|
||||
toProto() *pb.GrpcLogEntry
|
||||
toProto() *binlogpb.GrpcLogEntry
|
||||
}
|
||||
|
||||
// ClientHeader configs the binary log entry to be a ClientHeader entry.
|
||||
@ -156,10 +169,10 @@ type ClientHeader struct {
|
||||
PeerAddr net.Addr
|
||||
}
|
||||
|
||||
func (c *ClientHeader) toProto() *pb.GrpcLogEntry {
|
||||
func (c *ClientHeader) toProto() *binlogpb.GrpcLogEntry {
|
||||
// This function doesn't need to set all the fields (e.g. seq ID). The Log
|
||||
// function will set the fields when necessary.
|
||||
clientHeader := &pb.ClientHeader{
|
||||
clientHeader := &binlogpb.ClientHeader{
|
||||
Metadata: mdToMetadataProto(c.Header),
|
||||
MethodName: c.MethodName,
|
||||
Authority: c.Authority,
|
||||
@ -167,16 +180,16 @@ func (c *ClientHeader) toProto() *pb.GrpcLogEntry {
|
||||
if c.Timeout > 0 {
|
||||
clientHeader.Timeout = ptypes.DurationProto(c.Timeout)
|
||||
}
|
||||
ret := &pb.GrpcLogEntry{
|
||||
Type: pb.GrpcLogEntry_EVENT_TYPE_CLIENT_HEADER,
|
||||
Payload: &pb.GrpcLogEntry_ClientHeader{
|
||||
ret := &binlogpb.GrpcLogEntry{
|
||||
Type: binlogpb.GrpcLogEntry_EVENT_TYPE_CLIENT_HEADER,
|
||||
Payload: &binlogpb.GrpcLogEntry_ClientHeader{
|
||||
ClientHeader: clientHeader,
|
||||
},
|
||||
}
|
||||
if c.OnClientSide {
|
||||
ret.Logger = pb.GrpcLogEntry_LOGGER_CLIENT
|
||||
ret.Logger = binlogpb.GrpcLogEntry_LOGGER_CLIENT
|
||||
} else {
|
||||
ret.Logger = pb.GrpcLogEntry_LOGGER_SERVER
|
||||
ret.Logger = binlogpb.GrpcLogEntry_LOGGER_SERVER
|
||||
}
|
||||
if c.PeerAddr != nil {
|
||||
ret.Peer = addrToProto(c.PeerAddr)
|
||||
@ -192,19 +205,19 @@ type ServerHeader struct {
|
||||
PeerAddr net.Addr
|
||||
}
|
||||
|
||||
func (c *ServerHeader) toProto() *pb.GrpcLogEntry {
|
||||
ret := &pb.GrpcLogEntry{
|
||||
Type: pb.GrpcLogEntry_EVENT_TYPE_SERVER_HEADER,
|
||||
Payload: &pb.GrpcLogEntry_ServerHeader{
|
||||
ServerHeader: &pb.ServerHeader{
|
||||
func (c *ServerHeader) toProto() *binlogpb.GrpcLogEntry {
|
||||
ret := &binlogpb.GrpcLogEntry{
|
||||
Type: binlogpb.GrpcLogEntry_EVENT_TYPE_SERVER_HEADER,
|
||||
Payload: &binlogpb.GrpcLogEntry_ServerHeader{
|
||||
ServerHeader: &binlogpb.ServerHeader{
|
||||
Metadata: mdToMetadataProto(c.Header),
|
||||
},
|
||||
},
|
||||
}
|
||||
if c.OnClientSide {
|
||||
ret.Logger = pb.GrpcLogEntry_LOGGER_CLIENT
|
||||
ret.Logger = binlogpb.GrpcLogEntry_LOGGER_CLIENT
|
||||
} else {
|
||||
ret.Logger = pb.GrpcLogEntry_LOGGER_SERVER
|
||||
ret.Logger = binlogpb.GrpcLogEntry_LOGGER_SERVER
|
||||
}
|
||||
if c.PeerAddr != nil {
|
||||
ret.Peer = addrToProto(c.PeerAddr)
|
||||
@ -217,10 +230,10 @@ type ClientMessage struct {
|
||||
OnClientSide bool
|
||||
// Message can be a proto.Message or []byte. Other messages formats are not
|
||||
// supported.
|
||||
Message interface{}
|
||||
Message any
|
||||
}
|
||||
|
||||
func (c *ClientMessage) toProto() *pb.GrpcLogEntry {
|
||||
func (c *ClientMessage) toProto() *binlogpb.GrpcLogEntry {
|
||||
var (
|
||||
data []byte
|
||||
err error
|
||||
@ -235,19 +248,19 @@ func (c *ClientMessage) toProto() *pb.GrpcLogEntry {
|
||||
} else {
|
||||
grpclogLogger.Infof("binarylogging: message to log is neither proto.message nor []byte")
|
||||
}
|
||||
ret := &pb.GrpcLogEntry{
|
||||
Type: pb.GrpcLogEntry_EVENT_TYPE_CLIENT_MESSAGE,
|
||||
Payload: &pb.GrpcLogEntry_Message{
|
||||
Message: &pb.Message{
|
||||
ret := &binlogpb.GrpcLogEntry{
|
||||
Type: binlogpb.GrpcLogEntry_EVENT_TYPE_CLIENT_MESSAGE,
|
||||
Payload: &binlogpb.GrpcLogEntry_Message{
|
||||
Message: &binlogpb.Message{
|
||||
Length: uint32(len(data)),
|
||||
Data: data,
|
||||
},
|
||||
},
|
||||
}
|
||||
if c.OnClientSide {
|
||||
ret.Logger = pb.GrpcLogEntry_LOGGER_CLIENT
|
||||
ret.Logger = binlogpb.GrpcLogEntry_LOGGER_CLIENT
|
||||
} else {
|
||||
ret.Logger = pb.GrpcLogEntry_LOGGER_SERVER
|
||||
ret.Logger = binlogpb.GrpcLogEntry_LOGGER_SERVER
|
||||
}
|
||||
return ret
|
||||
}
|
||||
@ -257,10 +270,10 @@ type ServerMessage struct {
|
||||
OnClientSide bool
|
||||
// Message can be a proto.Message or []byte. Other messages formats are not
|
||||
// supported.
|
||||
Message interface{}
|
||||
Message any
|
||||
}
|
||||
|
||||
func (c *ServerMessage) toProto() *pb.GrpcLogEntry {
|
||||
func (c *ServerMessage) toProto() *binlogpb.GrpcLogEntry {
|
||||
var (
|
||||
data []byte
|
||||
err error
|
||||
@ -275,19 +288,19 @@ func (c *ServerMessage) toProto() *pb.GrpcLogEntry {
|
||||
} else {
|
||||
grpclogLogger.Infof("binarylogging: message to log is neither proto.message nor []byte")
|
||||
}
|
||||
ret := &pb.GrpcLogEntry{
|
||||
Type: pb.GrpcLogEntry_EVENT_TYPE_SERVER_MESSAGE,
|
||||
Payload: &pb.GrpcLogEntry_Message{
|
||||
Message: &pb.Message{
|
||||
ret := &binlogpb.GrpcLogEntry{
|
||||
Type: binlogpb.GrpcLogEntry_EVENT_TYPE_SERVER_MESSAGE,
|
||||
Payload: &binlogpb.GrpcLogEntry_Message{
|
||||
Message: &binlogpb.Message{
|
||||
Length: uint32(len(data)),
|
||||
Data: data,
|
||||
},
|
||||
},
|
||||
}
|
||||
if c.OnClientSide {
|
||||
ret.Logger = pb.GrpcLogEntry_LOGGER_CLIENT
|
||||
ret.Logger = binlogpb.GrpcLogEntry_LOGGER_CLIENT
|
||||
} else {
|
||||
ret.Logger = pb.GrpcLogEntry_LOGGER_SERVER
|
||||
ret.Logger = binlogpb.GrpcLogEntry_LOGGER_SERVER
|
||||
}
|
||||
return ret
|
||||
}
|
||||
@ -297,15 +310,15 @@ type ClientHalfClose struct {
|
||||
OnClientSide bool
|
||||
}
|
||||
|
||||
func (c *ClientHalfClose) toProto() *pb.GrpcLogEntry {
|
||||
ret := &pb.GrpcLogEntry{
|
||||
Type: pb.GrpcLogEntry_EVENT_TYPE_CLIENT_HALF_CLOSE,
|
||||
func (c *ClientHalfClose) toProto() *binlogpb.GrpcLogEntry {
|
||||
ret := &binlogpb.GrpcLogEntry{
|
||||
Type: binlogpb.GrpcLogEntry_EVENT_TYPE_CLIENT_HALF_CLOSE,
|
||||
Payload: nil, // No payload here.
|
||||
}
|
||||
if c.OnClientSide {
|
||||
ret.Logger = pb.GrpcLogEntry_LOGGER_CLIENT
|
||||
ret.Logger = binlogpb.GrpcLogEntry_LOGGER_CLIENT
|
||||
} else {
|
||||
ret.Logger = pb.GrpcLogEntry_LOGGER_SERVER
|
||||
ret.Logger = binlogpb.GrpcLogEntry_LOGGER_SERVER
|
||||
}
|
||||
return ret
|
||||
}
|
||||
@ -321,7 +334,7 @@ type ServerTrailer struct {
|
||||
PeerAddr net.Addr
|
||||
}
|
||||
|
||||
func (c *ServerTrailer) toProto() *pb.GrpcLogEntry {
|
||||
func (c *ServerTrailer) toProto() *binlogpb.GrpcLogEntry {
|
||||
st, ok := status.FromError(c.Err)
|
||||
if !ok {
|
||||
grpclogLogger.Info("binarylogging: error in trailer is not a status error")
|
||||
@ -337,10 +350,10 @@ func (c *ServerTrailer) toProto() *pb.GrpcLogEntry {
|
||||
grpclogLogger.Infof("binarylogging: failed to marshal status proto: %v", err)
|
||||
}
|
||||
}
|
||||
ret := &pb.GrpcLogEntry{
|
||||
Type: pb.GrpcLogEntry_EVENT_TYPE_SERVER_TRAILER,
|
||||
Payload: &pb.GrpcLogEntry_Trailer{
|
||||
Trailer: &pb.Trailer{
|
||||
ret := &binlogpb.GrpcLogEntry{
|
||||
Type: binlogpb.GrpcLogEntry_EVENT_TYPE_SERVER_TRAILER,
|
||||
Payload: &binlogpb.GrpcLogEntry_Trailer{
|
||||
Trailer: &binlogpb.Trailer{
|
||||
Metadata: mdToMetadataProto(c.Trailer),
|
||||
StatusCode: uint32(st.Code()),
|
||||
StatusMessage: st.Message(),
|
||||
@ -349,9 +362,9 @@ func (c *ServerTrailer) toProto() *pb.GrpcLogEntry {
|
||||
},
|
||||
}
|
||||
if c.OnClientSide {
|
||||
ret.Logger = pb.GrpcLogEntry_LOGGER_CLIENT
|
||||
ret.Logger = binlogpb.GrpcLogEntry_LOGGER_CLIENT
|
||||
} else {
|
||||
ret.Logger = pb.GrpcLogEntry_LOGGER_SERVER
|
||||
ret.Logger = binlogpb.GrpcLogEntry_LOGGER_SERVER
|
||||
}
|
||||
if c.PeerAddr != nil {
|
||||
ret.Peer = addrToProto(c.PeerAddr)
|
||||
@ -364,15 +377,15 @@ type Cancel struct {
|
||||
OnClientSide bool
|
||||
}
|
||||
|
||||
func (c *Cancel) toProto() *pb.GrpcLogEntry {
|
||||
ret := &pb.GrpcLogEntry{
|
||||
Type: pb.GrpcLogEntry_EVENT_TYPE_CANCEL,
|
||||
func (c *Cancel) toProto() *binlogpb.GrpcLogEntry {
|
||||
ret := &binlogpb.GrpcLogEntry{
|
||||
Type: binlogpb.GrpcLogEntry_EVENT_TYPE_CANCEL,
|
||||
Payload: nil,
|
||||
}
|
||||
if c.OnClientSide {
|
||||
ret.Logger = pb.GrpcLogEntry_LOGGER_CLIENT
|
||||
ret.Logger = binlogpb.GrpcLogEntry_LOGGER_CLIENT
|
||||
} else {
|
||||
ret.Logger = pb.GrpcLogEntry_LOGGER_SERVER
|
||||
ret.Logger = binlogpb.GrpcLogEntry_LOGGER_SERVER
|
||||
}
|
||||
return ret
|
||||
}
|
||||
@ -389,15 +402,15 @@ func metadataKeyOmit(key string) bool {
|
||||
return strings.HasPrefix(key, "grpc-")
|
||||
}
|
||||
|
||||
func mdToMetadataProto(md metadata.MD) *pb.Metadata {
|
||||
ret := &pb.Metadata{}
|
||||
func mdToMetadataProto(md metadata.MD) *binlogpb.Metadata {
|
||||
ret := &binlogpb.Metadata{}
|
||||
for k, vv := range md {
|
||||
if metadataKeyOmit(k) {
|
||||
continue
|
||||
}
|
||||
for _, v := range vv {
|
||||
ret.Entry = append(ret.Entry,
|
||||
&pb.MetadataEntry{
|
||||
&binlogpb.MetadataEntry{
|
||||
Key: k,
|
||||
Value: []byte(v),
|
||||
},
|
||||
@ -407,26 +420,26 @@ func mdToMetadataProto(md metadata.MD) *pb.Metadata {
|
||||
return ret
|
||||
}
|
||||
|
||||
func addrToProto(addr net.Addr) *pb.Address {
|
||||
ret := &pb.Address{}
|
||||
func addrToProto(addr net.Addr) *binlogpb.Address {
|
||||
ret := &binlogpb.Address{}
|
||||
switch a := addr.(type) {
|
||||
case *net.TCPAddr:
|
||||
if a.IP.To4() != nil {
|
||||
ret.Type = pb.Address_TYPE_IPV4
|
||||
ret.Type = binlogpb.Address_TYPE_IPV4
|
||||
} else if a.IP.To16() != nil {
|
||||
ret.Type = pb.Address_TYPE_IPV6
|
||||
ret.Type = binlogpb.Address_TYPE_IPV6
|
||||
} else {
|
||||
ret.Type = pb.Address_TYPE_UNKNOWN
|
||||
ret.Type = binlogpb.Address_TYPE_UNKNOWN
|
||||
// Do not set address and port fields.
|
||||
break
|
||||
}
|
||||
ret.Address = a.IP.String()
|
||||
ret.IpPort = uint32(a.Port)
|
||||
case *net.UnixAddr:
|
||||
ret.Type = pb.Address_TYPE_UNIX
|
||||
ret.Type = binlogpb.Address_TYPE_UNIX
|
||||
ret.Address = a.String()
|
||||
default:
|
||||
ret.Type = pb.Address_TYPE_UNKNOWN
|
||||
ret.Type = binlogpb.Address_TYPE_UNKNOWN
|
||||
}
|
||||
return ret
|
||||
}
|
||||
|
12
src/runtime/vendor/google.golang.org/grpc/internal/binarylog/sink.go
generated
vendored
12
src/runtime/vendor/google.golang.org/grpc/internal/binarylog/sink.go
generated
vendored
@ -26,7 +26,7 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/golang/protobuf/proto"
|
||||
pb "google.golang.org/grpc/binarylog/grpc_binarylog_v1"
|
||||
binlogpb "google.golang.org/grpc/binarylog/grpc_binarylog_v1"
|
||||
)
|
||||
|
||||
var (
|
||||
@ -42,15 +42,15 @@ type Sink interface {
|
||||
// Write will be called to write the log entry into the sink.
|
||||
//
|
||||
// It should be thread-safe so it can be called in parallel.
|
||||
Write(*pb.GrpcLogEntry) error
|
||||
Write(*binlogpb.GrpcLogEntry) error
|
||||
// Close will be called when the Sink is replaced by a new Sink.
|
||||
Close() error
|
||||
}
|
||||
|
||||
type noopSink struct{}
|
||||
|
||||
func (ns *noopSink) Write(*pb.GrpcLogEntry) error { return nil }
|
||||
func (ns *noopSink) Close() error { return nil }
|
||||
func (ns *noopSink) Write(*binlogpb.GrpcLogEntry) error { return nil }
|
||||
func (ns *noopSink) Close() error { return nil }
|
||||
|
||||
// newWriterSink creates a binary log sink with the given writer.
|
||||
//
|
||||
@ -66,7 +66,7 @@ type writerSink struct {
|
||||
out io.Writer
|
||||
}
|
||||
|
||||
func (ws *writerSink) Write(e *pb.GrpcLogEntry) error {
|
||||
func (ws *writerSink) Write(e *binlogpb.GrpcLogEntry) error {
|
||||
b, err := proto.Marshal(e)
|
||||
if err != nil {
|
||||
grpclogLogger.Errorf("binary logging: failed to marshal proto message: %v", err)
|
||||
@ -96,7 +96,7 @@ type bufferedSink struct {
|
||||
done chan struct{}
|
||||
}
|
||||
|
||||
func (fs *bufferedSink) Write(e *pb.GrpcLogEntry) error {
|
||||
func (fs *bufferedSink) Write(e *binlogpb.GrpcLogEntry) error {
|
||||
fs.mu.Lock()
|
||||
defer fs.mu.Unlock()
|
||||
if !fs.flusherStarted {
|
||||
|
44
src/runtime/vendor/google.golang.org/grpc/internal/buffer/unbounded.go
generated
vendored
44
src/runtime/vendor/google.golang.org/grpc/internal/buffer/unbounded.go
generated
vendored
@ -28,35 +28,38 @@ import "sync"
|
||||
// the underlying mutex used for synchronization.
|
||||
//
|
||||
// Unbounded supports values of any type to be stored in it by using a channel
|
||||
// of `interface{}`. This means that a call to Put() incurs an extra memory
|
||||
// allocation, and also that users need a type assertion while reading. For
|
||||
// performance critical code paths, using Unbounded is strongly discouraged and
|
||||
// defining a new type specific implementation of this buffer is preferred. See
|
||||
// of `any`. This means that a call to Put() incurs an extra memory allocation,
|
||||
// and also that users need a type assertion while reading. For performance
|
||||
// critical code paths, using Unbounded is strongly discouraged and defining a
|
||||
// new type specific implementation of this buffer is preferred. See
|
||||
// internal/transport/transport.go for an example of this.
|
||||
type Unbounded struct {
|
||||
c chan interface{}
|
||||
c chan any
|
||||
closed bool
|
||||
mu sync.Mutex
|
||||
backlog []interface{}
|
||||
backlog []any
|
||||
}
|
||||
|
||||
// NewUnbounded returns a new instance of Unbounded.
|
||||
func NewUnbounded() *Unbounded {
|
||||
return &Unbounded{c: make(chan interface{}, 1)}
|
||||
return &Unbounded{c: make(chan any, 1)}
|
||||
}
|
||||
|
||||
// Put adds t to the unbounded buffer.
|
||||
func (b *Unbounded) Put(t interface{}) {
|
||||
func (b *Unbounded) Put(t any) {
|
||||
b.mu.Lock()
|
||||
defer b.mu.Unlock()
|
||||
if b.closed {
|
||||
return
|
||||
}
|
||||
if len(b.backlog) == 0 {
|
||||
select {
|
||||
case b.c <- t:
|
||||
b.mu.Unlock()
|
||||
return
|
||||
default:
|
||||
}
|
||||
}
|
||||
b.backlog = append(b.backlog, t)
|
||||
b.mu.Unlock()
|
||||
}
|
||||
|
||||
// Load sends the earliest buffered data, if any, onto the read channel
|
||||
@ -64,6 +67,10 @@ func (b *Unbounded) Put(t interface{}) {
|
||||
// value from the read channel.
|
||||
func (b *Unbounded) Load() {
|
||||
b.mu.Lock()
|
||||
defer b.mu.Unlock()
|
||||
if b.closed {
|
||||
return
|
||||
}
|
||||
if len(b.backlog) > 0 {
|
||||
select {
|
||||
case b.c <- b.backlog[0]:
|
||||
@ -72,7 +79,6 @@ func (b *Unbounded) Load() {
|
||||
default:
|
||||
}
|
||||
}
|
||||
b.mu.Unlock()
|
||||
}
|
||||
|
||||
// Get returns a read channel on which values added to the buffer, via Put(),
|
||||
@ -80,6 +86,20 @@ func (b *Unbounded) Load() {
|
||||
//
|
||||
// Upon reading a value from this channel, users are expected to call Load() to
|
||||
// send the next buffered value onto the channel if there is any.
|
||||
func (b *Unbounded) Get() <-chan interface{} {
|
||||
//
|
||||
// If the unbounded buffer is closed, the read channel returned by this method
|
||||
// is closed.
|
||||
func (b *Unbounded) Get() <-chan any {
|
||||
return b.c
|
||||
}
|
||||
|
||||
// Close closes the unbounded buffer.
|
||||
func (b *Unbounded) Close() {
|
||||
b.mu.Lock()
|
||||
defer b.mu.Unlock()
|
||||
if b.closed {
|
||||
return
|
||||
}
|
||||
b.closed = true
|
||||
close(b.c)
|
||||
}
|
||||
|
69
src/runtime/vendor/google.golang.org/grpc/internal/channelz/funcs.go
generated
vendored
69
src/runtime/vendor/google.golang.org/grpc/internal/channelz/funcs.go
generated
vendored
@ -24,9 +24,7 @@
|
||||
package channelz
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"sort"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
@ -40,8 +38,11 @@ const (
|
||||
)
|
||||
|
||||
var (
|
||||
db dbWrapper
|
||||
idGen idGenerator
|
||||
// IDGen is the global channelz entity ID generator. It should not be used
|
||||
// outside this package except by tests.
|
||||
IDGen IDGenerator
|
||||
|
||||
db dbWrapper
|
||||
// EntryPerPage defines the number of channelz entries to be shown on a web page.
|
||||
EntryPerPage = int64(50)
|
||||
curState int32
|
||||
@ -52,14 +53,14 @@ var (
|
||||
func TurnOn() {
|
||||
if !IsOn() {
|
||||
db.set(newChannelMap())
|
||||
idGen.reset()
|
||||
IDGen.Reset()
|
||||
atomic.StoreInt32(&curState, 1)
|
||||
}
|
||||
}
|
||||
|
||||
// IsOn returns whether channelz data collection is on.
|
||||
func IsOn() bool {
|
||||
return atomic.CompareAndSwapInt32(&curState, 1, 1)
|
||||
return atomic.LoadInt32(&curState) == 1
|
||||
}
|
||||
|
||||
// SetMaxTraceEntry sets maximum number of trace entry per entity (i.e. channel/subchannel).
|
||||
@ -97,43 +98,6 @@ func (d *dbWrapper) get() *channelMap {
|
||||
return d.DB
|
||||
}
|
||||
|
||||
// NewChannelzStorageForTesting initializes channelz data storage and id
|
||||
// generator for testing purposes.
|
||||
//
|
||||
// Returns a cleanup function to be invoked by the test, which waits for up to
|
||||
// 10s for all channelz state to be reset by the grpc goroutines when those
|
||||
// entities get closed. This cleanup function helps with ensuring that tests
|
||||
// don't mess up each other.
|
||||
func NewChannelzStorageForTesting() (cleanup func() error) {
|
||||
db.set(newChannelMap())
|
||||
idGen.reset()
|
||||
|
||||
return func() error {
|
||||
cm := db.get()
|
||||
if cm == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||
defer cancel()
|
||||
ticker := time.NewTicker(10 * time.Millisecond)
|
||||
defer ticker.Stop()
|
||||
for {
|
||||
cm.mu.RLock()
|
||||
topLevelChannels, servers, channels, subChannels, listenSockets, normalSockets := len(cm.topLevelChannels), len(cm.servers), len(cm.channels), len(cm.subChannels), len(cm.listenSockets), len(cm.normalSockets)
|
||||
cm.mu.RUnlock()
|
||||
|
||||
if err := ctx.Err(); err != nil {
|
||||
return fmt.Errorf("after 10s the channelz map has not been cleaned up yet, topchannels: %d, servers: %d, channels: %d, subchannels: %d, listen sockets: %d, normal sockets: %d", topLevelChannels, servers, channels, subChannels, listenSockets, normalSockets)
|
||||
}
|
||||
if topLevelChannels == 0 && servers == 0 && channels == 0 && subChannels == 0 && listenSockets == 0 && normalSockets == 0 {
|
||||
return nil
|
||||
}
|
||||
<-ticker.C
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// GetTopChannels returns a slice of top channel's ChannelMetric, along with a
|
||||
// boolean indicating whether there's more top channels to be queried for.
|
||||
//
|
||||
@ -193,7 +157,7 @@ func GetServer(id int64) *ServerMetric {
|
||||
//
|
||||
// If channelz is not turned ON, the channelz database is not mutated.
|
||||
func RegisterChannel(c Channel, pid *Identifier, ref string) *Identifier {
|
||||
id := idGen.genID()
|
||||
id := IDGen.genID()
|
||||
var parent int64
|
||||
isTopChannel := true
|
||||
if pid != nil {
|
||||
@ -229,7 +193,7 @@ func RegisterSubChannel(c Channel, pid *Identifier, ref string) (*Identifier, er
|
||||
if pid == nil {
|
||||
return nil, errors.New("a SubChannel's parent id cannot be nil")
|
||||
}
|
||||
id := idGen.genID()
|
||||
id := IDGen.genID()
|
||||
if !IsOn() {
|
||||
return newIdentifer(RefSubChannel, id, pid), nil
|
||||
}
|
||||
@ -251,7 +215,7 @@ func RegisterSubChannel(c Channel, pid *Identifier, ref string) (*Identifier, er
|
||||
//
|
||||
// If channelz is not turned ON, the channelz database is not mutated.
|
||||
func RegisterServer(s Server, ref string) *Identifier {
|
||||
id := idGen.genID()
|
||||
id := IDGen.genID()
|
||||
if !IsOn() {
|
||||
return newIdentifer(RefServer, id, nil)
|
||||
}
|
||||
@ -277,7 +241,7 @@ func RegisterListenSocket(s Socket, pid *Identifier, ref string) (*Identifier, e
|
||||
if pid == nil {
|
||||
return nil, errors.New("a ListenSocket's parent id cannot be 0")
|
||||
}
|
||||
id := idGen.genID()
|
||||
id := IDGen.genID()
|
||||
if !IsOn() {
|
||||
return newIdentifer(RefListenSocket, id, pid), nil
|
||||
}
|
||||
@ -297,7 +261,7 @@ func RegisterNormalSocket(s Socket, pid *Identifier, ref string) (*Identifier, e
|
||||
if pid == nil {
|
||||
return nil, errors.New("a NormalSocket's parent id cannot be 0")
|
||||
}
|
||||
id := idGen.genID()
|
||||
id := IDGen.genID()
|
||||
if !IsOn() {
|
||||
return newIdentifer(RefNormalSocket, id, pid), nil
|
||||
}
|
||||
@ -776,14 +740,17 @@ func (c *channelMap) GetServer(id int64) *ServerMetric {
|
||||
return sm
|
||||
}
|
||||
|
||||
type idGenerator struct {
|
||||
// IDGenerator is an incrementing atomic that tracks IDs for channelz entities.
|
||||
type IDGenerator struct {
|
||||
id int64
|
||||
}
|
||||
|
||||
func (i *idGenerator) reset() {
|
||||
// Reset resets the generated ID back to zero. Should only be used at
|
||||
// initialization or by tests sensitive to the ID number.
|
||||
func (i *IDGenerator) Reset() {
|
||||
atomic.StoreInt64(&i.id, 0)
|
||||
}
|
||||
|
||||
func (i *idGenerator) genID() int64 {
|
||||
func (i *IDGenerator) genID() int64 {
|
||||
return atomic.AddInt64(&i.id, 1)
|
||||
}
|
||||
|
12
src/runtime/vendor/google.golang.org/grpc/internal/channelz/logging.go
generated
vendored
12
src/runtime/vendor/google.golang.org/grpc/internal/channelz/logging.go
generated
vendored
@ -31,7 +31,7 @@ func withParens(id *Identifier) string {
|
||||
}
|
||||
|
||||
// Info logs and adds a trace event if channelz is on.
|
||||
func Info(l grpclog.DepthLoggerV2, id *Identifier, args ...interface{}) {
|
||||
func Info(l grpclog.DepthLoggerV2, id *Identifier, args ...any) {
|
||||
AddTraceEvent(l, id, 1, &TraceEventDesc{
|
||||
Desc: fmt.Sprint(args...),
|
||||
Severity: CtInfo,
|
||||
@ -39,7 +39,7 @@ func Info(l grpclog.DepthLoggerV2, id *Identifier, args ...interface{}) {
|
||||
}
|
||||
|
||||
// Infof logs and adds a trace event if channelz is on.
|
||||
func Infof(l grpclog.DepthLoggerV2, id *Identifier, format string, args ...interface{}) {
|
||||
func Infof(l grpclog.DepthLoggerV2, id *Identifier, format string, args ...any) {
|
||||
AddTraceEvent(l, id, 1, &TraceEventDesc{
|
||||
Desc: fmt.Sprintf(format, args...),
|
||||
Severity: CtInfo,
|
||||
@ -47,7 +47,7 @@ func Infof(l grpclog.DepthLoggerV2, id *Identifier, format string, args ...inter
|
||||
}
|
||||
|
||||
// Warning logs and adds a trace event if channelz is on.
|
||||
func Warning(l grpclog.DepthLoggerV2, id *Identifier, args ...interface{}) {
|
||||
func Warning(l grpclog.DepthLoggerV2, id *Identifier, args ...any) {
|
||||
AddTraceEvent(l, id, 1, &TraceEventDesc{
|
||||
Desc: fmt.Sprint(args...),
|
||||
Severity: CtWarning,
|
||||
@ -55,7 +55,7 @@ func Warning(l grpclog.DepthLoggerV2, id *Identifier, args ...interface{}) {
|
||||
}
|
||||
|
||||
// Warningf logs and adds a trace event if channelz is on.
|
||||
func Warningf(l grpclog.DepthLoggerV2, id *Identifier, format string, args ...interface{}) {
|
||||
func Warningf(l grpclog.DepthLoggerV2, id *Identifier, format string, args ...any) {
|
||||
AddTraceEvent(l, id, 1, &TraceEventDesc{
|
||||
Desc: fmt.Sprintf(format, args...),
|
||||
Severity: CtWarning,
|
||||
@ -63,7 +63,7 @@ func Warningf(l grpclog.DepthLoggerV2, id *Identifier, format string, args ...in
|
||||
}
|
||||
|
||||
// Error logs and adds a trace event if channelz is on.
|
||||
func Error(l grpclog.DepthLoggerV2, id *Identifier, args ...interface{}) {
|
||||
func Error(l grpclog.DepthLoggerV2, id *Identifier, args ...any) {
|
||||
AddTraceEvent(l, id, 1, &TraceEventDesc{
|
||||
Desc: fmt.Sprint(args...),
|
||||
Severity: CtError,
|
||||
@ -71,7 +71,7 @@ func Error(l grpclog.DepthLoggerV2, id *Identifier, args ...interface{}) {
|
||||
}
|
||||
|
||||
// Errorf logs and adds a trace event if channelz is on.
|
||||
func Errorf(l grpclog.DepthLoggerV2, id *Identifier, format string, args ...interface{}) {
|
||||
func Errorf(l grpclog.DepthLoggerV2, id *Identifier, format string, args ...any) {
|
||||
AddTraceEvent(l, id, 1, &TraceEventDesc{
|
||||
Desc: fmt.Sprintf(format, args...),
|
||||
Severity: CtError,
|
||||
|
21
src/runtime/vendor/google.golang.org/grpc/internal/channelz/types.go
generated
vendored
21
src/runtime/vendor/google.golang.org/grpc/internal/channelz/types.go
generated
vendored
@ -273,10 +273,10 @@ func (c *channel) deleteSelfFromMap() (delete bool) {
|
||||
|
||||
// deleteSelfIfReady tries to delete the channel itself from the channelz database.
|
||||
// The delete process includes two steps:
|
||||
// 1. delete the channel from the entry relation tree, i.e. delete the channel reference from its
|
||||
// parent's child list.
|
||||
// 2. delete the channel from the map, i.e. delete the channel entirely from channelz. Lookup by id
|
||||
// will return entry not found error.
|
||||
// 1. delete the channel from the entry relation tree, i.e. delete the channel reference from its
|
||||
// parent's child list.
|
||||
// 2. delete the channel from the map, i.e. delete the channel entirely from channelz. Lookup by id
|
||||
// will return entry not found error.
|
||||
func (c *channel) deleteSelfIfReady() {
|
||||
if !c.deleteSelfFromTree() {
|
||||
return
|
||||
@ -381,10 +381,10 @@ func (sc *subChannel) deleteSelfFromMap() (delete bool) {
|
||||
|
||||
// deleteSelfIfReady tries to delete the subchannel itself from the channelz database.
|
||||
// The delete process includes two steps:
|
||||
// 1. delete the subchannel from the entry relation tree, i.e. delete the subchannel reference from
|
||||
// its parent's child list.
|
||||
// 2. delete the subchannel from the map, i.e. delete the subchannel entirely from channelz. Lookup
|
||||
// by id will return entry not found error.
|
||||
// 1. delete the subchannel from the entry relation tree, i.e. delete the subchannel reference from
|
||||
// its parent's child list.
|
||||
// 2. delete the subchannel from the map, i.e. delete the subchannel entirely from channelz. Lookup
|
||||
// by id will return entry not found error.
|
||||
func (sc *subChannel) deleteSelfIfReady() {
|
||||
if !sc.deleteSelfFromTree() {
|
||||
return
|
||||
@ -628,6 +628,7 @@ type tracedChannel interface {
|
||||
|
||||
type channelTrace struct {
|
||||
cm *channelMap
|
||||
clearCalled bool
|
||||
createdTime time.Time
|
||||
eventCount int64
|
||||
mu sync.Mutex
|
||||
@ -656,6 +657,10 @@ func (c *channelTrace) append(e *TraceEvent) {
|
||||
}
|
||||
|
||||
func (c *channelTrace) clear() {
|
||||
if c.clearCalled {
|
||||
return
|
||||
}
|
||||
c.clearCalled = true
|
||||
c.mu.Lock()
|
||||
for _, e := range c.events {
|
||||
if e.RefID != 0 {
|
||||
|
2
src/runtime/vendor/google.golang.org/grpc/internal/channelz/util_linux.go
generated
vendored
2
src/runtime/vendor/google.golang.org/grpc/internal/channelz/util_linux.go
generated
vendored
@ -23,7 +23,7 @@ import (
|
||||
)
|
||||
|
||||
// GetSocketOption gets the socket option info of the conn.
|
||||
func GetSocketOption(socket interface{}) *SocketOptionData {
|
||||
func GetSocketOption(socket any) *SocketOptionData {
|
||||
c, ok := socket.(syscall.Conn)
|
||||
if !ok {
|
||||
return nil
|
||||
|
2
src/runtime/vendor/google.golang.org/grpc/internal/channelz/util_nonlinux.go
generated
vendored
2
src/runtime/vendor/google.golang.org/grpc/internal/channelz/util_nonlinux.go
generated
vendored
@ -22,6 +22,6 @@
|
||||
package channelz
|
||||
|
||||
// GetSocketOption gets the socket option info of the conn.
|
||||
func GetSocketOption(c interface{}) *SocketOptionData {
|
||||
func GetSocketOption(c any) *SocketOptionData {
|
||||
return nil
|
||||
}
|
||||
|
8
src/runtime/vendor/google.golang.org/grpc/internal/credentials/credentials.go
generated
vendored
8
src/runtime/vendor/google.golang.org/grpc/internal/credentials/credentials.go
generated
vendored
@ -25,12 +25,12 @@ import (
|
||||
type requestInfoKey struct{}
|
||||
|
||||
// NewRequestInfoContext creates a context with ri.
|
||||
func NewRequestInfoContext(ctx context.Context, ri interface{}) context.Context {
|
||||
func NewRequestInfoContext(ctx context.Context, ri any) context.Context {
|
||||
return context.WithValue(ctx, requestInfoKey{}, ri)
|
||||
}
|
||||
|
||||
// RequestInfoFromContext extracts the RequestInfo from ctx.
|
||||
func RequestInfoFromContext(ctx context.Context) interface{} {
|
||||
func RequestInfoFromContext(ctx context.Context) any {
|
||||
return ctx.Value(requestInfoKey{})
|
||||
}
|
||||
|
||||
@ -39,11 +39,11 @@ func RequestInfoFromContext(ctx context.Context) interface{} {
|
||||
type clientHandshakeInfoKey struct{}
|
||||
|
||||
// ClientHandshakeInfoFromContext extracts the ClientHandshakeInfo from ctx.
|
||||
func ClientHandshakeInfoFromContext(ctx context.Context) interface{} {
|
||||
func ClientHandshakeInfoFromContext(ctx context.Context) any {
|
||||
return ctx.Value(clientHandshakeInfoKey{})
|
||||
}
|
||||
|
||||
// NewClientHandshakeInfoContext creates a context with chi.
|
||||
func NewClientHandshakeInfoContext(ctx context.Context, chi interface{}) context.Context {
|
||||
func NewClientHandshakeInfoContext(ctx context.Context, chi any) context.Context {
|
||||
return context.WithValue(ctx, clientHandshakeInfoKey{}, chi)
|
||||
}
|
||||
|
49
src/runtime/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go
generated
vendored
49
src/runtime/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go
generated
vendored
@ -21,15 +21,52 @@ package envconfig
|
||||
|
||||
import (
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
const (
|
||||
prefix = "GRPC_GO_"
|
||||
txtErrIgnoreStr = prefix + "IGNORE_TXT_ERRORS"
|
||||
)
|
||||
|
||||
var (
|
||||
// TXTErrIgnore is set if TXT errors should be ignored ("GRPC_GO_IGNORE_TXT_ERRORS" is not "false").
|
||||
TXTErrIgnore = !strings.EqualFold(os.Getenv(txtErrIgnoreStr), "false")
|
||||
TXTErrIgnore = boolFromEnv("GRPC_GO_IGNORE_TXT_ERRORS", true)
|
||||
// AdvertiseCompressors is set if registered compressor should be advertised
|
||||
// ("GRPC_GO_ADVERTISE_COMPRESSORS" is not "false").
|
||||
AdvertiseCompressors = boolFromEnv("GRPC_GO_ADVERTISE_COMPRESSORS", true)
|
||||
// RingHashCap indicates the maximum ring size which defaults to 4096
|
||||
// entries but may be overridden by setting the environment variable
|
||||
// "GRPC_RING_HASH_CAP". This does not override the default bounds
|
||||
// checking which NACKs configs specifying ring sizes > 8*1024*1024 (~8M).
|
||||
RingHashCap = uint64FromEnv("GRPC_RING_HASH_CAP", 4096, 1, 8*1024*1024)
|
||||
// PickFirstLBConfig is set if we should support configuration of the
|
||||
// pick_first LB policy.
|
||||
PickFirstLBConfig = boolFromEnv("GRPC_EXPERIMENTAL_PICKFIRST_LB_CONFIG", true)
|
||||
// LeastRequestLB is set if we should support the least_request_experimental
|
||||
// LB policy, which can be enabled by setting the environment variable
|
||||
// "GRPC_EXPERIMENTAL_ENABLE_LEAST_REQUEST" to "true".
|
||||
LeastRequestLB = boolFromEnv("GRPC_EXPERIMENTAL_ENABLE_LEAST_REQUEST", false)
|
||||
// ALTSMaxConcurrentHandshakes is the maximum number of concurrent ALTS
|
||||
// handshakes that can be performed.
|
||||
ALTSMaxConcurrentHandshakes = uint64FromEnv("GRPC_ALTS_MAX_CONCURRENT_HANDSHAKES", 100, 1, 100)
|
||||
)
|
||||
|
||||
func boolFromEnv(envVar string, def bool) bool {
|
||||
if def {
|
||||
// The default is true; return true unless the variable is "false".
|
||||
return !strings.EqualFold(os.Getenv(envVar), "false")
|
||||
}
|
||||
// The default is false; return false unless the variable is "true".
|
||||
return strings.EqualFold(os.Getenv(envVar), "true")
|
||||
}
|
||||
|
||||
func uint64FromEnv(envVar string, def, min, max uint64) uint64 {
|
||||
v, err := strconv.ParseUint(os.Getenv(envVar), 10, 64)
|
||||
if err != nil {
|
||||
return def
|
||||
}
|
||||
if v < min {
|
||||
return min
|
||||
}
|
||||
if v > max {
|
||||
return max
|
||||
}
|
||||
return v
|
||||
}
|
||||
|
42
src/runtime/vendor/google.golang.org/grpc/internal/envconfig/observability.go
generated
vendored
Normal file
42
src/runtime/vendor/google.golang.org/grpc/internal/envconfig/observability.go
generated
vendored
Normal file
@ -0,0 +1,42 @@
|
||||
/*
|
||||
*
|
||||
* Copyright 2022 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
package envconfig
|
||||
|
||||
import "os"
|
||||
|
||||
const (
|
||||
envObservabilityConfig = "GRPC_GCP_OBSERVABILITY_CONFIG"
|
||||
envObservabilityConfigFile = "GRPC_GCP_OBSERVABILITY_CONFIG_FILE"
|
||||
)
|
||||
|
||||
var (
|
||||
// ObservabilityConfig is the json configuration for the gcp/observability
|
||||
// package specified directly in the envObservabilityConfig env var.
|
||||
//
|
||||
// This is used in the 1.0 release of gcp/observability, and thus must not be
|
||||
// deleted or changed.
|
||||
ObservabilityConfig = os.Getenv(envObservabilityConfig)
|
||||
// ObservabilityConfigFile is the json configuration for the
|
||||
// gcp/observability specified in a file with the location specified in
|
||||
// envObservabilityConfigFile env var.
|
||||
//
|
||||
// This is used in the 1.0 release of gcp/observability, and thus must not be
|
||||
// deleted or changed.
|
||||
ObservabilityConfigFile = os.Getenv(envObservabilityConfigFile)
|
||||
)
|
52
src/runtime/vendor/google.golang.org/grpc/internal/envconfig/xds.go
generated
vendored
52
src/runtime/vendor/google.golang.org/grpc/internal/envconfig/xds.go
generated
vendored
@ -20,7 +20,6 @@ package envconfig
|
||||
|
||||
import (
|
||||
"os"
|
||||
"strings"
|
||||
)
|
||||
|
||||
const (
|
||||
@ -36,16 +35,6 @@ const (
|
||||
//
|
||||
// When both bootstrap FileName and FileContent are set, FileName is used.
|
||||
XDSBootstrapFileContentEnv = "GRPC_XDS_BOOTSTRAP_CONFIG"
|
||||
|
||||
ringHashSupportEnv = "GRPC_XDS_EXPERIMENTAL_ENABLE_RING_HASH"
|
||||
clientSideSecuritySupportEnv = "GRPC_XDS_EXPERIMENTAL_SECURITY_SUPPORT"
|
||||
aggregateAndDNSSupportEnv = "GRPC_XDS_EXPERIMENTAL_ENABLE_AGGREGATE_AND_LOGICAL_DNS_CLUSTER"
|
||||
rbacSupportEnv = "GRPC_XDS_EXPERIMENTAL_RBAC"
|
||||
outlierDetectionSupportEnv = "GRPC_EXPERIMENTAL_ENABLE_OUTLIER_DETECTION"
|
||||
federationEnv = "GRPC_EXPERIMENTAL_XDS_FEDERATION"
|
||||
rlsInXDSEnv = "GRPC_EXPERIMENTAL_XDS_RLS_LB"
|
||||
|
||||
c2pResolverTestOnlyTrafficDirectorURIEnv = "GRPC_TEST_ONLY_GOOGLE_C2P_RESOLVER_TRAFFIC_DIRECTOR_URI"
|
||||
)
|
||||
|
||||
var (
|
||||
@ -64,38 +53,43 @@ var (
|
||||
// XDSRingHash indicates whether ring hash support is enabled, which can be
|
||||
// disabled by setting the environment variable
|
||||
// "GRPC_XDS_EXPERIMENTAL_ENABLE_RING_HASH" to "false".
|
||||
XDSRingHash = !strings.EqualFold(os.Getenv(ringHashSupportEnv), "false")
|
||||
XDSRingHash = boolFromEnv("GRPC_XDS_EXPERIMENTAL_ENABLE_RING_HASH", true)
|
||||
// XDSClientSideSecurity is used to control processing of security
|
||||
// configuration on the client-side.
|
||||
//
|
||||
// Note that there is no env var protection for the server-side because we
|
||||
// have a brand new API on the server-side and users explicitly need to use
|
||||
// the new API to get security integration on the server.
|
||||
XDSClientSideSecurity = !strings.EqualFold(os.Getenv(clientSideSecuritySupportEnv), "false")
|
||||
// XDSAggregateAndDNS indicates whether processing of aggregated cluster
|
||||
// and DNS cluster is enabled, which can be enabled by setting the
|
||||
// environment variable
|
||||
// "GRPC_XDS_EXPERIMENTAL_ENABLE_AGGREGATE_AND_LOGICAL_DNS_CLUSTER" to
|
||||
// "true".
|
||||
XDSAggregateAndDNS = strings.EqualFold(os.Getenv(aggregateAndDNSSupportEnv), "true")
|
||||
XDSClientSideSecurity = boolFromEnv("GRPC_XDS_EXPERIMENTAL_SECURITY_SUPPORT", true)
|
||||
// XDSAggregateAndDNS indicates whether processing of aggregated cluster and
|
||||
// DNS cluster is enabled, which can be disabled by setting the environment
|
||||
// variable "GRPC_XDS_EXPERIMENTAL_ENABLE_AGGREGATE_AND_LOGICAL_DNS_CLUSTER"
|
||||
// to "false".
|
||||
XDSAggregateAndDNS = boolFromEnv("GRPC_XDS_EXPERIMENTAL_ENABLE_AGGREGATE_AND_LOGICAL_DNS_CLUSTER", true)
|
||||
|
||||
// XDSRBAC indicates whether xDS configured RBAC HTTP Filter is enabled,
|
||||
// which can be disabled by setting the environment variable
|
||||
// "GRPC_XDS_EXPERIMENTAL_RBAC" to "false".
|
||||
XDSRBAC = !strings.EqualFold(os.Getenv(rbacSupportEnv), "false")
|
||||
XDSRBAC = boolFromEnv("GRPC_XDS_EXPERIMENTAL_RBAC", true)
|
||||
// XDSOutlierDetection indicates whether outlier detection support is
|
||||
// enabled, which can be enabled by setting the environment variable
|
||||
// "GRPC_EXPERIMENTAL_ENABLE_OUTLIER_DETECTION" to "true".
|
||||
XDSOutlierDetection = strings.EqualFold(os.Getenv(outlierDetectionSupportEnv), "true")
|
||||
// XDSFederation indicates whether federation support is enabled.
|
||||
XDSFederation = strings.EqualFold(os.Getenv(federationEnv), "true")
|
||||
// enabled, which can be disabled by setting the environment variable
|
||||
// "GRPC_EXPERIMENTAL_ENABLE_OUTLIER_DETECTION" to "false".
|
||||
XDSOutlierDetection = boolFromEnv("GRPC_EXPERIMENTAL_ENABLE_OUTLIER_DETECTION", true)
|
||||
// XDSFederation indicates whether federation support is enabled, which can
|
||||
// be enabled by setting the environment variable
|
||||
// "GRPC_EXPERIMENTAL_XDS_FEDERATION" to "true".
|
||||
XDSFederation = boolFromEnv("GRPC_EXPERIMENTAL_XDS_FEDERATION", true)
|
||||
|
||||
// XDSRLS indicates whether processing of Cluster Specifier plugins and
|
||||
// support for the RLS CLuster Specifier is enabled, which can be enabled by
|
||||
// support for the RLS CLuster Specifier is enabled, which can be disabled by
|
||||
// setting the environment variable "GRPC_EXPERIMENTAL_XDS_RLS_LB" to
|
||||
// "true".
|
||||
XDSRLS = strings.EqualFold(os.Getenv(rlsInXDSEnv), "true")
|
||||
// "false".
|
||||
XDSRLS = boolFromEnv("GRPC_EXPERIMENTAL_XDS_RLS_LB", true)
|
||||
|
||||
// C2PResolverTestOnlyTrafficDirectorURI is the TD URI for testing.
|
||||
C2PResolverTestOnlyTrafficDirectorURI = os.Getenv(c2pResolverTestOnlyTrafficDirectorURIEnv)
|
||||
C2PResolverTestOnlyTrafficDirectorURI = os.Getenv("GRPC_TEST_ONLY_GOOGLE_C2P_RESOLVER_TRAFFIC_DIRECTOR_URI")
|
||||
// XDSCustomLBPolicy indicates whether Custom LB Policies are enabled, which
|
||||
// can be disabled by setting the environment variable
|
||||
// "GRPC_EXPERIMENTAL_XDS_CUSTOM_LB_CONFIG" to "false".
|
||||
XDSCustomLBPolicy = boolFromEnv("GRPC_EXPERIMENTAL_XDS_CUSTOM_LB_CONFIG", true)
|
||||
)
|
||||
|
42
src/runtime/vendor/google.golang.org/grpc/internal/grpclog/grpclog.go
generated
vendored
42
src/runtime/vendor/google.golang.org/grpc/internal/grpclog/grpclog.go
generated
vendored
@ -30,7 +30,7 @@ var Logger LoggerV2
|
||||
var DepthLogger DepthLoggerV2
|
||||
|
||||
// InfoDepth logs to the INFO log at the specified depth.
|
||||
func InfoDepth(depth int, args ...interface{}) {
|
||||
func InfoDepth(depth int, args ...any) {
|
||||
if DepthLogger != nil {
|
||||
DepthLogger.InfoDepth(depth, args...)
|
||||
} else {
|
||||
@ -39,7 +39,7 @@ func InfoDepth(depth int, args ...interface{}) {
|
||||
}
|
||||
|
||||
// WarningDepth logs to the WARNING log at the specified depth.
|
||||
func WarningDepth(depth int, args ...interface{}) {
|
||||
func WarningDepth(depth int, args ...any) {
|
||||
if DepthLogger != nil {
|
||||
DepthLogger.WarningDepth(depth, args...)
|
||||
} else {
|
||||
@ -48,7 +48,7 @@ func WarningDepth(depth int, args ...interface{}) {
|
||||
}
|
||||
|
||||
// ErrorDepth logs to the ERROR log at the specified depth.
|
||||
func ErrorDepth(depth int, args ...interface{}) {
|
||||
func ErrorDepth(depth int, args ...any) {
|
||||
if DepthLogger != nil {
|
||||
DepthLogger.ErrorDepth(depth, args...)
|
||||
} else {
|
||||
@ -57,7 +57,7 @@ func ErrorDepth(depth int, args ...interface{}) {
|
||||
}
|
||||
|
||||
// FatalDepth logs to the FATAL log at the specified depth.
|
||||
func FatalDepth(depth int, args ...interface{}) {
|
||||
func FatalDepth(depth int, args ...any) {
|
||||
if DepthLogger != nil {
|
||||
DepthLogger.FatalDepth(depth, args...)
|
||||
} else {
|
||||
@ -71,35 +71,35 @@ func FatalDepth(depth int, args ...interface{}) {
|
||||
// is defined here to avoid a circular dependency.
|
||||
type LoggerV2 interface {
|
||||
// Info logs to INFO log. Arguments are handled in the manner of fmt.Print.
|
||||
Info(args ...interface{})
|
||||
Info(args ...any)
|
||||
// Infoln logs to INFO log. Arguments are handled in the manner of fmt.Println.
|
||||
Infoln(args ...interface{})
|
||||
Infoln(args ...any)
|
||||
// Infof logs to INFO log. Arguments are handled in the manner of fmt.Printf.
|
||||
Infof(format string, args ...interface{})
|
||||
Infof(format string, args ...any)
|
||||
// Warning logs to WARNING log. Arguments are handled in the manner of fmt.Print.
|
||||
Warning(args ...interface{})
|
||||
Warning(args ...any)
|
||||
// Warningln logs to WARNING log. Arguments are handled in the manner of fmt.Println.
|
||||
Warningln(args ...interface{})
|
||||
Warningln(args ...any)
|
||||
// Warningf logs to WARNING log. Arguments are handled in the manner of fmt.Printf.
|
||||
Warningf(format string, args ...interface{})
|
||||
Warningf(format string, args ...any)
|
||||
// Error logs to ERROR log. Arguments are handled in the manner of fmt.Print.
|
||||
Error(args ...interface{})
|
||||
Error(args ...any)
|
||||
// Errorln logs to ERROR log. Arguments are handled in the manner of fmt.Println.
|
||||
Errorln(args ...interface{})
|
||||
Errorln(args ...any)
|
||||
// Errorf logs to ERROR log. Arguments are handled in the manner of fmt.Printf.
|
||||
Errorf(format string, args ...interface{})
|
||||
Errorf(format string, args ...any)
|
||||
// Fatal logs to ERROR log. Arguments are handled in the manner of fmt.Print.
|
||||
// gRPC ensures that all Fatal logs will exit with os.Exit(1).
|
||||
// Implementations may also call os.Exit() with a non-zero exit code.
|
||||
Fatal(args ...interface{})
|
||||
Fatal(args ...any)
|
||||
// Fatalln logs to ERROR log. Arguments are handled in the manner of fmt.Println.
|
||||
// gRPC ensures that all Fatal logs will exit with os.Exit(1).
|
||||
// Implementations may also call os.Exit() with a non-zero exit code.
|
||||
Fatalln(args ...interface{})
|
||||
Fatalln(args ...any)
|
||||
// Fatalf logs to ERROR log. Arguments are handled in the manner of fmt.Printf.
|
||||
// gRPC ensures that all Fatal logs will exit with os.Exit(1).
|
||||
// Implementations may also call os.Exit() with a non-zero exit code.
|
||||
Fatalf(format string, args ...interface{})
|
||||
Fatalf(format string, args ...any)
|
||||
// V reports whether verbosity level l is at least the requested verbose level.
|
||||
V(l int) bool
|
||||
}
|
||||
@ -110,17 +110,17 @@ type LoggerV2 interface {
|
||||
// This is a copy of the DepthLoggerV2 defined in the external grpclog package.
|
||||
// It is defined here to avoid a circular dependency.
|
||||
//
|
||||
// Experimental
|
||||
// # Experimental
|
||||
//
|
||||
// Notice: This type is EXPERIMENTAL and may be changed or removed in a
|
||||
// later release.
|
||||
type DepthLoggerV2 interface {
|
||||
// InfoDepth logs to INFO log at the specified depth. Arguments are handled in the manner of fmt.Println.
|
||||
InfoDepth(depth int, args ...interface{})
|
||||
InfoDepth(depth int, args ...any)
|
||||
// WarningDepth logs to WARNING log at the specified depth. Arguments are handled in the manner of fmt.Println.
|
||||
WarningDepth(depth int, args ...interface{})
|
||||
WarningDepth(depth int, args ...any)
|
||||
// ErrorDepth logs to ERROR log at the specified depth. Arguments are handled in the manner of fmt.Println.
|
||||
ErrorDepth(depth int, args ...interface{})
|
||||
ErrorDepth(depth int, args ...any)
|
||||
// FatalDepth logs to FATAL log at the specified depth. Arguments are handled in the manner of fmt.Println.
|
||||
FatalDepth(depth int, args ...interface{})
|
||||
FatalDepth(depth int, args ...any)
|
||||
}
|
||||
|
20
src/runtime/vendor/google.golang.org/grpc/internal/grpclog/prefixLogger.go
generated
vendored
20
src/runtime/vendor/google.golang.org/grpc/internal/grpclog/prefixLogger.go
generated
vendored
@ -31,7 +31,7 @@ type PrefixLogger struct {
|
||||
}
|
||||
|
||||
// Infof does info logging.
|
||||
func (pl *PrefixLogger) Infof(format string, args ...interface{}) {
|
||||
func (pl *PrefixLogger) Infof(format string, args ...any) {
|
||||
if pl != nil {
|
||||
// Handle nil, so the tests can pass in a nil logger.
|
||||
format = pl.prefix + format
|
||||
@ -42,7 +42,7 @@ func (pl *PrefixLogger) Infof(format string, args ...interface{}) {
|
||||
}
|
||||
|
||||
// Warningf does warning logging.
|
||||
func (pl *PrefixLogger) Warningf(format string, args ...interface{}) {
|
||||
func (pl *PrefixLogger) Warningf(format string, args ...any) {
|
||||
if pl != nil {
|
||||
format = pl.prefix + format
|
||||
pl.logger.WarningDepth(1, fmt.Sprintf(format, args...))
|
||||
@ -52,7 +52,7 @@ func (pl *PrefixLogger) Warningf(format string, args ...interface{}) {
|
||||
}
|
||||
|
||||
// Errorf does error logging.
|
||||
func (pl *PrefixLogger) Errorf(format string, args ...interface{}) {
|
||||
func (pl *PrefixLogger) Errorf(format string, args ...any) {
|
||||
if pl != nil {
|
||||
format = pl.prefix + format
|
||||
pl.logger.ErrorDepth(1, fmt.Sprintf(format, args...))
|
||||
@ -62,7 +62,10 @@ func (pl *PrefixLogger) Errorf(format string, args ...interface{}) {
|
||||
}
|
||||
|
||||
// Debugf does info logging at verbose level 2.
|
||||
func (pl *PrefixLogger) Debugf(format string, args ...interface{}) {
|
||||
func (pl *PrefixLogger) Debugf(format string, args ...any) {
|
||||
// TODO(6044): Refactor interfaces LoggerV2 and DepthLogger, and maybe
|
||||
// rewrite PrefixLogger a little to ensure that we don't use the global
|
||||
// `Logger` here, and instead use the `logger` field.
|
||||
if !Logger.V(2) {
|
||||
return
|
||||
}
|
||||
@ -73,6 +76,15 @@ func (pl *PrefixLogger) Debugf(format string, args ...interface{}) {
|
||||
return
|
||||
}
|
||||
InfoDepth(1, fmt.Sprintf(format, args...))
|
||||
|
||||
}
|
||||
|
||||
// V reports whether verbosity level l is at least the requested verbose level.
|
||||
func (pl *PrefixLogger) V(l int) bool {
|
||||
// TODO(6044): Refactor interfaces LoggerV2 and DepthLogger, and maybe
|
||||
// rewrite PrefixLogger a little to ensure that we don't use the global
|
||||
// `Logger` here, and instead use the `logger` field.
|
||||
return Logger.V(l)
|
||||
}
|
||||
|
||||
// NewPrefixLogger creates a prefix logger with the given prefix.
|
||||
|
28
src/runtime/vendor/google.golang.org/grpc/internal/grpcrand/grpcrand.go
generated
vendored
28
src/runtime/vendor/google.golang.org/grpc/internal/grpcrand/grpcrand.go
generated
vendored
@ -52,6 +52,13 @@ func Intn(n int) int {
|
||||
return r.Intn(n)
|
||||
}
|
||||
|
||||
// Int31n implements rand.Int31n on the grpcrand global source.
|
||||
func Int31n(n int32) int32 {
|
||||
mu.Lock()
|
||||
defer mu.Unlock()
|
||||
return r.Int31n(n)
|
||||
}
|
||||
|
||||
// Float64 implements rand.Float64 on the grpcrand global source.
|
||||
func Float64() float64 {
|
||||
mu.Lock()
|
||||
@ -65,3 +72,24 @@ func Uint64() uint64 {
|
||||
defer mu.Unlock()
|
||||
return r.Uint64()
|
||||
}
|
||||
|
||||
// Uint32 implements rand.Uint32 on the grpcrand global source.
|
||||
func Uint32() uint32 {
|
||||
mu.Lock()
|
||||
defer mu.Unlock()
|
||||
return r.Uint32()
|
||||
}
|
||||
|
||||
// ExpFloat64 implements rand.ExpFloat64 on the grpcrand global source.
|
||||
func ExpFloat64() float64 {
|
||||
mu.Lock()
|
||||
defer mu.Unlock()
|
||||
return r.ExpFloat64()
|
||||
}
|
||||
|
||||
// Shuffle implements rand.Shuffle on the grpcrand global source.
|
||||
var Shuffle = func(n int, f func(int, int)) {
|
||||
mu.Lock()
|
||||
defer mu.Unlock()
|
||||
r.Shuffle(n, f)
|
||||
}
|
||||
|
125
src/runtime/vendor/google.golang.org/grpc/internal/grpcsync/callback_serializer.go
generated
vendored
Normal file
125
src/runtime/vendor/google.golang.org/grpc/internal/grpcsync/callback_serializer.go
generated
vendored
Normal file
@ -0,0 +1,125 @@
|
||||
/*
|
||||
*
|
||||
* Copyright 2022 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
package grpcsync
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sync"
|
||||
|
||||
"google.golang.org/grpc/internal/buffer"
|
||||
)
|
||||
|
||||
// CallbackSerializer provides a mechanism to schedule callbacks in a
|
||||
// synchronized manner. It provides a FIFO guarantee on the order of execution
|
||||
// of scheduled callbacks. New callbacks can be scheduled by invoking the
|
||||
// Schedule() method.
|
||||
//
|
||||
// This type is safe for concurrent access.
|
||||
type CallbackSerializer struct {
|
||||
// done is closed once the serializer is shut down completely, i.e all
|
||||
// scheduled callbacks are executed and the serializer has deallocated all
|
||||
// its resources.
|
||||
done chan struct{}
|
||||
|
||||
callbacks *buffer.Unbounded
|
||||
closedMu sync.Mutex
|
||||
closed bool
|
||||
}
|
||||
|
||||
// NewCallbackSerializer returns a new CallbackSerializer instance. The provided
|
||||
// context will be passed to the scheduled callbacks. Users should cancel the
|
||||
// provided context to shutdown the CallbackSerializer. It is guaranteed that no
|
||||
// callbacks will be added once this context is canceled, and any pending un-run
|
||||
// callbacks will be executed before the serializer is shut down.
|
||||
func NewCallbackSerializer(ctx context.Context) *CallbackSerializer {
|
||||
cs := &CallbackSerializer{
|
||||
done: make(chan struct{}),
|
||||
callbacks: buffer.NewUnbounded(),
|
||||
}
|
||||
go cs.run(ctx)
|
||||
return cs
|
||||
}
|
||||
|
||||
// Schedule adds a callback to be scheduled after existing callbacks are run.
|
||||
//
|
||||
// Callbacks are expected to honor the context when performing any blocking
|
||||
// operations, and should return early when the context is canceled.
|
||||
//
|
||||
// Return value indicates if the callback was successfully added to the list of
|
||||
// callbacks to be executed by the serializer. It is not possible to add
|
||||
// callbacks once the context passed to NewCallbackSerializer is cancelled.
|
||||
func (cs *CallbackSerializer) Schedule(f func(ctx context.Context)) bool {
|
||||
cs.closedMu.Lock()
|
||||
defer cs.closedMu.Unlock()
|
||||
|
||||
if cs.closed {
|
||||
return false
|
||||
}
|
||||
cs.callbacks.Put(f)
|
||||
return true
|
||||
}
|
||||
|
||||
func (cs *CallbackSerializer) run(ctx context.Context) {
|
||||
var backlog []func(context.Context)
|
||||
|
||||
defer close(cs.done)
|
||||
for ctx.Err() == nil {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
// Do nothing here. Next iteration of the for loop will not happen,
|
||||
// since ctx.Err() would be non-nil.
|
||||
case callback, ok := <-cs.callbacks.Get():
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
cs.callbacks.Load()
|
||||
callback.(func(ctx context.Context))(ctx)
|
||||
}
|
||||
}
|
||||
|
||||
// Fetch pending callbacks if any, and execute them before returning from
|
||||
// this method and closing cs.done.
|
||||
cs.closedMu.Lock()
|
||||
cs.closed = true
|
||||
backlog = cs.fetchPendingCallbacks()
|
||||
cs.callbacks.Close()
|
||||
cs.closedMu.Unlock()
|
||||
for _, b := range backlog {
|
||||
b(ctx)
|
||||
}
|
||||
}
|
||||
|
||||
func (cs *CallbackSerializer) fetchPendingCallbacks() []func(context.Context) {
|
||||
var backlog []func(context.Context)
|
||||
for {
|
||||
select {
|
||||
case b := <-cs.callbacks.Get():
|
||||
backlog = append(backlog, b.(func(context.Context)))
|
||||
cs.callbacks.Load()
|
||||
default:
|
||||
return backlog
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Done returns a channel that is closed after the context passed to
|
||||
// NewCallbackSerializer is canceled and all callbacks have been executed.
|
||||
func (cs *CallbackSerializer) Done() <-chan struct{} {
|
||||
return cs.done
|
||||
}
|
32
src/runtime/vendor/google.golang.org/grpc/internal/grpcsync/oncefunc.go
generated
vendored
Normal file
32
src/runtime/vendor/google.golang.org/grpc/internal/grpcsync/oncefunc.go
generated
vendored
Normal file
@ -0,0 +1,32 @@
|
||||
/*
|
||||
*
|
||||
* Copyright 2022 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
package grpcsync
|
||||
|
||||
import (
|
||||
"sync"
|
||||
)
|
||||
|
||||
// OnceFunc returns a function wrapping f which ensures f is only executed
|
||||
// once even if the returned function is executed multiple times.
|
||||
func OnceFunc(f func()) func() {
|
||||
var once sync.Once
|
||||
return func() {
|
||||
once.Do(f)
|
||||
}
|
||||
}
|
121
src/runtime/vendor/google.golang.org/grpc/internal/grpcsync/pubsub.go
generated
vendored
Normal file
121
src/runtime/vendor/google.golang.org/grpc/internal/grpcsync/pubsub.go
generated
vendored
Normal file
@ -0,0 +1,121 @@
|
||||
/*
|
||||
*
|
||||
* Copyright 2023 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
package grpcsync
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sync"
|
||||
)
|
||||
|
||||
// Subscriber represents an entity that is subscribed to messages published on
|
||||
// a PubSub. It wraps the callback to be invoked by the PubSub when a new
|
||||
// message is published.
|
||||
type Subscriber interface {
|
||||
// OnMessage is invoked when a new message is published. Implementations
|
||||
// must not block in this method.
|
||||
OnMessage(msg any)
|
||||
}
|
||||
|
||||
// PubSub is a simple one-to-many publish-subscribe system that supports
|
||||
// messages of arbitrary type. It guarantees that messages are delivered in
|
||||
// the same order in which they were published.
|
||||
//
|
||||
// Publisher invokes the Publish() method to publish new messages, while
|
||||
// subscribers interested in receiving these messages register a callback
|
||||
// via the Subscribe() method.
|
||||
//
|
||||
// Once a PubSub is stopped, no more messages can be published, but any pending
|
||||
// published messages will be delivered to the subscribers. Done may be used
|
||||
// to determine when all published messages have been delivered.
|
||||
type PubSub struct {
|
||||
cs *CallbackSerializer
|
||||
|
||||
// Access to the below fields are guarded by this mutex.
|
||||
mu sync.Mutex
|
||||
msg any
|
||||
subscribers map[Subscriber]bool
|
||||
}
|
||||
|
||||
// NewPubSub returns a new PubSub instance. Users should cancel the
|
||||
// provided context to shutdown the PubSub.
|
||||
func NewPubSub(ctx context.Context) *PubSub {
|
||||
return &PubSub{
|
||||
cs: NewCallbackSerializer(ctx),
|
||||
subscribers: map[Subscriber]bool{},
|
||||
}
|
||||
}
|
||||
|
||||
// Subscribe registers the provided Subscriber to the PubSub.
|
||||
//
|
||||
// If the PubSub contains a previously published message, the Subscriber's
|
||||
// OnMessage() callback will be invoked asynchronously with the existing
|
||||
// message to begin with, and subsequently for every newly published message.
|
||||
//
|
||||
// The caller is responsible for invoking the returned cancel function to
|
||||
// unsubscribe itself from the PubSub.
|
||||
func (ps *PubSub) Subscribe(sub Subscriber) (cancel func()) {
|
||||
ps.mu.Lock()
|
||||
defer ps.mu.Unlock()
|
||||
|
||||
ps.subscribers[sub] = true
|
||||
|
||||
if ps.msg != nil {
|
||||
msg := ps.msg
|
||||
ps.cs.Schedule(func(context.Context) {
|
||||
ps.mu.Lock()
|
||||
defer ps.mu.Unlock()
|
||||
if !ps.subscribers[sub] {
|
||||
return
|
||||
}
|
||||
sub.OnMessage(msg)
|
||||
})
|
||||
}
|
||||
|
||||
return func() {
|
||||
ps.mu.Lock()
|
||||
defer ps.mu.Unlock()
|
||||
delete(ps.subscribers, sub)
|
||||
}
|
||||
}
|
||||
|
||||
// Publish publishes the provided message to the PubSub, and invokes
|
||||
// callbacks registered by subscribers asynchronously.
|
||||
func (ps *PubSub) Publish(msg any) {
|
||||
ps.mu.Lock()
|
||||
defer ps.mu.Unlock()
|
||||
|
||||
ps.msg = msg
|
||||
for sub := range ps.subscribers {
|
||||
s := sub
|
||||
ps.cs.Schedule(func(context.Context) {
|
||||
ps.mu.Lock()
|
||||
defer ps.mu.Unlock()
|
||||
if !ps.subscribers[s] {
|
||||
return
|
||||
}
|
||||
s.OnMessage(msg)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// Done returns a channel that is closed after the context passed to NewPubSub
|
||||
// is canceled and all updates have been sent to subscribers.
|
||||
func (ps *PubSub) Done() <-chan struct{} {
|
||||
return ps.cs.Done()
|
||||
}
|
47
src/runtime/vendor/google.golang.org/grpc/internal/grpcutil/compressor.go
generated
vendored
Normal file
47
src/runtime/vendor/google.golang.org/grpc/internal/grpcutil/compressor.go
generated
vendored
Normal file
@ -0,0 +1,47 @@
|
||||
/*
|
||||
*
|
||||
* Copyright 2022 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
package grpcutil
|
||||
|
||||
import (
|
||||
"strings"
|
||||
|
||||
"google.golang.org/grpc/internal/envconfig"
|
||||
)
|
||||
|
||||
// RegisteredCompressorNames holds names of the registered compressors.
|
||||
var RegisteredCompressorNames []string
|
||||
|
||||
// IsCompressorNameRegistered returns true when name is available in registry.
|
||||
func IsCompressorNameRegistered(name string) bool {
|
||||
for _, compressor := range RegisteredCompressorNames {
|
||||
if compressor == name {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// RegisteredCompressors returns a string of registered compressor names
|
||||
// separated by comma.
|
||||
func RegisteredCompressors() string {
|
||||
if !envconfig.AdvertiseCompressors {
|
||||
return ""
|
||||
}
|
||||
return strings.Join(RegisteredCompressorNames, ",")
|
||||
}
|
6
src/runtime/vendor/google.golang.org/grpc/internal/grpcutil/method.go
generated
vendored
6
src/runtime/vendor/google.golang.org/grpc/internal/grpcutil/method.go
generated
vendored
@ -25,7 +25,6 @@ import (
|
||||
|
||||
// ParseMethod splits service and method from the input. It expects format
|
||||
// "/service/method".
|
||||
//
|
||||
func ParseMethod(methodName string) (service, method string, _ error) {
|
||||
if !strings.HasPrefix(methodName, "/") {
|
||||
return "", "", errors.New("invalid method name: should start with /")
|
||||
@ -39,6 +38,11 @@ func ParseMethod(methodName string) (service, method string, _ error) {
|
||||
return methodName[:pos], methodName[pos+1:], nil
|
||||
}
|
||||
|
||||
// baseContentType is the base content-type for gRPC. This is a valid
|
||||
// content-type on it's own, but can also include a content-subtype such as
|
||||
// "proto" as a suffix after "+" or ";". See
|
||||
// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests
|
||||
// for more details.
|
||||
const baseContentType = "application/grpc"
|
||||
|
||||
// ContentSubtype returns the content-subtype for the given content-type. The
|
||||
|
301
src/runtime/vendor/google.golang.org/grpc/internal/idle/idle.go
generated
vendored
Normal file
301
src/runtime/vendor/google.golang.org/grpc/internal/idle/idle.go
generated
vendored
Normal file
@ -0,0 +1,301 @@
|
||||
/*
|
||||
*
|
||||
* Copyright 2023 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
// Package idle contains a component for managing idleness (entering and exiting)
|
||||
// based on RPC activity.
|
||||
package idle
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"google.golang.org/grpc/grpclog"
|
||||
)
|
||||
|
||||
// For overriding in unit tests.
|
||||
var timeAfterFunc = func(d time.Duration, f func()) *time.Timer {
|
||||
return time.AfterFunc(d, f)
|
||||
}
|
||||
|
||||
// Enforcer is the functionality provided by grpc.ClientConn to enter
|
||||
// and exit from idle mode.
|
||||
type Enforcer interface {
|
||||
ExitIdleMode() error
|
||||
EnterIdleMode() error
|
||||
}
|
||||
|
||||
// Manager defines the functionality required to track RPC activity on a
|
||||
// channel.
|
||||
type Manager interface {
|
||||
OnCallBegin() error
|
||||
OnCallEnd()
|
||||
Close()
|
||||
}
|
||||
|
||||
type noopManager struct{}
|
||||
|
||||
func (noopManager) OnCallBegin() error { return nil }
|
||||
func (noopManager) OnCallEnd() {}
|
||||
func (noopManager) Close() {}
|
||||
|
||||
// manager implements the Manager interface. It uses atomic operations to
|
||||
// synchronize access to shared state and a mutex to guarantee mutual exclusion
|
||||
// in a critical section.
|
||||
type manager struct {
|
||||
// State accessed atomically.
|
||||
lastCallEndTime int64 // Unix timestamp in nanos; time when the most recent RPC completed.
|
||||
activeCallsCount int32 // Count of active RPCs; -math.MaxInt32 means channel is idle or is trying to get there.
|
||||
activeSinceLastTimerCheck int32 // Boolean; True if there was an RPC since the last timer callback.
|
||||
closed int32 // Boolean; True when the manager is closed.
|
||||
|
||||
// Can be accessed without atomics or mutex since these are set at creation
|
||||
// time and read-only after that.
|
||||
enforcer Enforcer // Functionality provided by grpc.ClientConn.
|
||||
timeout int64 // Idle timeout duration nanos stored as an int64.
|
||||
logger grpclog.LoggerV2
|
||||
|
||||
// idleMu is used to guarantee mutual exclusion in two scenarios:
|
||||
// - Opposing intentions:
|
||||
// - a: Idle timeout has fired and handleIdleTimeout() is trying to put
|
||||
// the channel in idle mode because the channel has been inactive.
|
||||
// - b: At the same time an RPC is made on the channel, and OnCallBegin()
|
||||
// is trying to prevent the channel from going idle.
|
||||
// - Competing intentions:
|
||||
// - The channel is in idle mode and there are multiple RPCs starting at
|
||||
// the same time, all trying to move the channel out of idle. Only one
|
||||
// of them should succeed in doing so, while the other RPCs should
|
||||
// piggyback on the first one and be successfully handled.
|
||||
idleMu sync.RWMutex
|
||||
actuallyIdle bool
|
||||
timer *time.Timer
|
||||
}
|
||||
|
||||
// ManagerOptions is a collection of options used by
|
||||
// NewManager.
|
||||
type ManagerOptions struct {
|
||||
Enforcer Enforcer
|
||||
Timeout time.Duration
|
||||
Logger grpclog.LoggerV2
|
||||
}
|
||||
|
||||
// NewManager creates a new idleness manager implementation for the
|
||||
// given idle timeout.
|
||||
func NewManager(opts ManagerOptions) Manager {
|
||||
if opts.Timeout == 0 {
|
||||
return noopManager{}
|
||||
}
|
||||
|
||||
m := &manager{
|
||||
enforcer: opts.Enforcer,
|
||||
timeout: int64(opts.Timeout),
|
||||
logger: opts.Logger,
|
||||
}
|
||||
m.timer = timeAfterFunc(opts.Timeout, m.handleIdleTimeout)
|
||||
return m
|
||||
}
|
||||
|
||||
// resetIdleTimer resets the idle timer to the given duration. This method
|
||||
// should only be called from the timer callback.
|
||||
func (m *manager) resetIdleTimer(d time.Duration) {
|
||||
m.idleMu.Lock()
|
||||
defer m.idleMu.Unlock()
|
||||
|
||||
if m.timer == nil {
|
||||
// Only close sets timer to nil. We are done.
|
||||
return
|
||||
}
|
||||
|
||||
// It is safe to ignore the return value from Reset() because this method is
|
||||
// only ever called from the timer callback, which means the timer has
|
||||
// already fired.
|
||||
m.timer.Reset(d)
|
||||
}
|
||||
|
||||
// handleIdleTimeout is the timer callback that is invoked upon expiry of the
|
||||
// configured idle timeout. The channel is considered inactive if there are no
|
||||
// ongoing calls and no RPC activity since the last time the timer fired.
|
||||
func (m *manager) handleIdleTimeout() {
|
||||
if m.isClosed() {
|
||||
return
|
||||
}
|
||||
|
||||
if atomic.LoadInt32(&m.activeCallsCount) > 0 {
|
||||
m.resetIdleTimer(time.Duration(m.timeout))
|
||||
return
|
||||
}
|
||||
|
||||
// There has been activity on the channel since we last got here. Reset the
|
||||
// timer and return.
|
||||
if atomic.LoadInt32(&m.activeSinceLastTimerCheck) == 1 {
|
||||
// Set the timer to fire after a duration of idle timeout, calculated
|
||||
// from the time the most recent RPC completed.
|
||||
atomic.StoreInt32(&m.activeSinceLastTimerCheck, 0)
|
||||
m.resetIdleTimer(time.Duration(atomic.LoadInt64(&m.lastCallEndTime) + m.timeout - time.Now().UnixNano()))
|
||||
return
|
||||
}
|
||||
|
||||
// This CAS operation is extremely likely to succeed given that there has
|
||||
// been no activity since the last time we were here. Setting the
|
||||
// activeCallsCount to -math.MaxInt32 indicates to OnCallBegin() that the
|
||||
// channel is either in idle mode or is trying to get there.
|
||||
if !atomic.CompareAndSwapInt32(&m.activeCallsCount, 0, -math.MaxInt32) {
|
||||
// This CAS operation can fail if an RPC started after we checked for
|
||||
// activity at the top of this method, or one was ongoing from before
|
||||
// the last time we were here. In both case, reset the timer and return.
|
||||
m.resetIdleTimer(time.Duration(m.timeout))
|
||||
return
|
||||
}
|
||||
|
||||
// Now that we've set the active calls count to -math.MaxInt32, it's time to
|
||||
// actually move to idle mode.
|
||||
if m.tryEnterIdleMode() {
|
||||
// Successfully entered idle mode. No timer needed until we exit idle.
|
||||
return
|
||||
}
|
||||
|
||||
// Failed to enter idle mode due to a concurrent RPC that kept the channel
|
||||
// active, or because of an error from the channel. Undo the attempt to
|
||||
// enter idle, and reset the timer to try again later.
|
||||
atomic.AddInt32(&m.activeCallsCount, math.MaxInt32)
|
||||
m.resetIdleTimer(time.Duration(m.timeout))
|
||||
}
|
||||
|
||||
// tryEnterIdleMode instructs the channel to enter idle mode. But before
|
||||
// that, it performs a last minute check to ensure that no new RPC has come in,
|
||||
// making the channel active.
|
||||
//
|
||||
// Return value indicates whether or not the channel moved to idle mode.
|
||||
//
|
||||
// Holds idleMu which ensures mutual exclusion with exitIdleMode.
|
||||
func (m *manager) tryEnterIdleMode() bool {
|
||||
m.idleMu.Lock()
|
||||
defer m.idleMu.Unlock()
|
||||
|
||||
if atomic.LoadInt32(&m.activeCallsCount) != -math.MaxInt32 {
|
||||
// We raced and lost to a new RPC. Very rare, but stop entering idle.
|
||||
return false
|
||||
}
|
||||
if atomic.LoadInt32(&m.activeSinceLastTimerCheck) == 1 {
|
||||
// An very short RPC could have come in (and also finished) after we
|
||||
// checked for calls count and activity in handleIdleTimeout(), but
|
||||
// before the CAS operation. So, we need to check for activity again.
|
||||
return false
|
||||
}
|
||||
|
||||
// No new RPCs have come in since we last set the active calls count value
|
||||
// -math.MaxInt32 in the timer callback. And since we have the lock, it is
|
||||
// safe to enter idle mode now.
|
||||
if err := m.enforcer.EnterIdleMode(); err != nil {
|
||||
m.logger.Errorf("Failed to enter idle mode: %v", err)
|
||||
return false
|
||||
}
|
||||
|
||||
// Successfully entered idle mode.
|
||||
m.actuallyIdle = true
|
||||
return true
|
||||
}
|
||||
|
||||
// OnCallBegin is invoked at the start of every RPC.
|
||||
func (m *manager) OnCallBegin() error {
|
||||
if m.isClosed() {
|
||||
return nil
|
||||
}
|
||||
|
||||
if atomic.AddInt32(&m.activeCallsCount, 1) > 0 {
|
||||
// Channel is not idle now. Set the activity bit and allow the call.
|
||||
atomic.StoreInt32(&m.activeSinceLastTimerCheck, 1)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Channel is either in idle mode or is in the process of moving to idle
|
||||
// mode. Attempt to exit idle mode to allow this RPC.
|
||||
if err := m.exitIdleMode(); err != nil {
|
||||
// Undo the increment to calls count, and return an error causing the
|
||||
// RPC to fail.
|
||||
atomic.AddInt32(&m.activeCallsCount, -1)
|
||||
return err
|
||||
}
|
||||
|
||||
atomic.StoreInt32(&m.activeSinceLastTimerCheck, 1)
|
||||
return nil
|
||||
}
|
||||
|
||||
// exitIdleMode instructs the channel to exit idle mode.
|
||||
//
|
||||
// Holds idleMu which ensures mutual exclusion with tryEnterIdleMode.
|
||||
func (m *manager) exitIdleMode() error {
|
||||
m.idleMu.Lock()
|
||||
defer m.idleMu.Unlock()
|
||||
|
||||
if !m.actuallyIdle {
|
||||
// This can happen in two scenarios:
|
||||
// - handleIdleTimeout() set the calls count to -math.MaxInt32 and called
|
||||
// tryEnterIdleMode(). But before the latter could grab the lock, an RPC
|
||||
// came in and OnCallBegin() noticed that the calls count is negative.
|
||||
// - Channel is in idle mode, and multiple new RPCs come in at the same
|
||||
// time, all of them notice a negative calls count in OnCallBegin and get
|
||||
// here. The first one to get the lock would got the channel to exit idle.
|
||||
//
|
||||
// Either way, nothing to do here.
|
||||
return nil
|
||||
}
|
||||
|
||||
if err := m.enforcer.ExitIdleMode(); err != nil {
|
||||
return fmt.Errorf("channel failed to exit idle mode: %v", err)
|
||||
}
|
||||
|
||||
// Undo the idle entry process. This also respects any new RPC attempts.
|
||||
atomic.AddInt32(&m.activeCallsCount, math.MaxInt32)
|
||||
m.actuallyIdle = false
|
||||
|
||||
// Start a new timer to fire after the configured idle timeout.
|
||||
m.timer = timeAfterFunc(time.Duration(m.timeout), m.handleIdleTimeout)
|
||||
return nil
|
||||
}
|
||||
|
||||
// OnCallEnd is invoked at the end of every RPC.
|
||||
func (m *manager) OnCallEnd() {
|
||||
if m.isClosed() {
|
||||
return
|
||||
}
|
||||
|
||||
// Record the time at which the most recent call finished.
|
||||
atomic.StoreInt64(&m.lastCallEndTime, time.Now().UnixNano())
|
||||
|
||||
// Decrement the active calls count. This count can temporarily go negative
|
||||
// when the timer callback is in the process of moving the channel to idle
|
||||
// mode, but one or more RPCs come in and complete before the timer callback
|
||||
// can get done with the process of moving to idle mode.
|
||||
atomic.AddInt32(&m.activeCallsCount, -1)
|
||||
}
|
||||
|
||||
func (m *manager) isClosed() bool {
|
||||
return atomic.LoadInt32(&m.closed) == 1
|
||||
}
|
||||
|
||||
func (m *manager) Close() {
|
||||
atomic.StoreInt32(&m.closed, 1)
|
||||
|
||||
m.idleMu.Lock()
|
||||
m.timer.Stop()
|
||||
m.timer = nil
|
||||
m.idleMu.Unlock()
|
||||
}
|
126
src/runtime/vendor/google.golang.org/grpc/internal/internal.go
generated
vendored
126
src/runtime/vendor/google.golang.org/grpc/internal/internal.go
generated
vendored
@ -30,7 +30,7 @@ import (
|
||||
|
||||
var (
|
||||
// WithHealthCheckFunc is set by dialoptions.go
|
||||
WithHealthCheckFunc interface{} // func (HealthChecker) DialOption
|
||||
WithHealthCheckFunc any // func (HealthChecker) DialOption
|
||||
// HealthCheckFunc is used to provide client-side LB channel health checking
|
||||
HealthCheckFunc HealthChecker
|
||||
// BalancerUnregister is exported by package balancer to unregister a balancer.
|
||||
@ -38,8 +38,12 @@ var (
|
||||
// KeepaliveMinPingTime is the minimum ping interval. This must be 10s by
|
||||
// default, but tests may wish to set it lower for convenience.
|
||||
KeepaliveMinPingTime = 10 * time.Second
|
||||
// KeepaliveMinServerPingTime is the minimum ping interval for servers.
|
||||
// This must be 1s by default, but tests may wish to set it lower for
|
||||
// convenience.
|
||||
KeepaliveMinServerPingTime = time.Second
|
||||
// ParseServiceConfig parses a JSON representation of the service config.
|
||||
ParseServiceConfig interface{} // func(string) *serviceconfig.ParseResult
|
||||
ParseServiceConfig any // func(string) *serviceconfig.ParseResult
|
||||
// EqualServiceConfigForTesting is for testing service config generation and
|
||||
// parsing. Both a and b should be returned by ParseServiceConfig.
|
||||
// This function compares the config without rawJSON stripped, in case the
|
||||
@ -49,20 +53,128 @@ var (
|
||||
// given name. This is set by package certprovider for use from xDS
|
||||
// bootstrap code while parsing certificate provider configs in the
|
||||
// bootstrap file.
|
||||
GetCertificateProviderBuilder interface{} // func(string) certprovider.Builder
|
||||
GetCertificateProviderBuilder any // func(string) certprovider.Builder
|
||||
// GetXDSHandshakeInfoForTesting returns a pointer to the xds.HandshakeInfo
|
||||
// stored in the passed in attributes. This is set by
|
||||
// credentials/xds/xds.go.
|
||||
GetXDSHandshakeInfoForTesting interface{} // func (*attributes.Attributes) *xds.HandshakeInfo
|
||||
GetXDSHandshakeInfoForTesting any // func (*attributes.Attributes) *xds.HandshakeInfo
|
||||
// GetServerCredentials returns the transport credentials configured on a
|
||||
// gRPC server. An xDS-enabled server needs to know what type of credentials
|
||||
// is configured on the underlying gRPC server. This is set by server.go.
|
||||
GetServerCredentials interface{} // func (*grpc.Server) credentials.TransportCredentials
|
||||
GetServerCredentials any // func (*grpc.Server) credentials.TransportCredentials
|
||||
// CanonicalString returns the canonical string of the code defined here:
|
||||
// https://github.com/grpc/grpc/blob/master/doc/statuscodes.md.
|
||||
//
|
||||
// This is used in the 1.0 release of gcp/observability, and thus must not be
|
||||
// deleted or changed.
|
||||
CanonicalString any // func (codes.Code) string
|
||||
// DrainServerTransports initiates a graceful close of existing connections
|
||||
// on a gRPC server accepted on the provided listener address. An
|
||||
// xDS-enabled server invokes this method on a grpc.Server when a particular
|
||||
// listener moves to "not-serving" mode.
|
||||
DrainServerTransports interface{} // func(*grpc.Server, string)
|
||||
DrainServerTransports any // func(*grpc.Server, string)
|
||||
// AddGlobalServerOptions adds an array of ServerOption that will be
|
||||
// effective globally for newly created servers. The priority will be: 1.
|
||||
// user-provided; 2. this method; 3. default values.
|
||||
//
|
||||
// This is used in the 1.0 release of gcp/observability, and thus must not be
|
||||
// deleted or changed.
|
||||
AddGlobalServerOptions any // func(opt ...ServerOption)
|
||||
// ClearGlobalServerOptions clears the array of extra ServerOption. This
|
||||
// method is useful in testing and benchmarking.
|
||||
//
|
||||
// This is used in the 1.0 release of gcp/observability, and thus must not be
|
||||
// deleted or changed.
|
||||
ClearGlobalServerOptions func()
|
||||
// AddGlobalDialOptions adds an array of DialOption that will be effective
|
||||
// globally for newly created client channels. The priority will be: 1.
|
||||
// user-provided; 2. this method; 3. default values.
|
||||
//
|
||||
// This is used in the 1.0 release of gcp/observability, and thus must not be
|
||||
// deleted or changed.
|
||||
AddGlobalDialOptions any // func(opt ...DialOption)
|
||||
// DisableGlobalDialOptions returns a DialOption that prevents the
|
||||
// ClientConn from applying the global DialOptions (set via
|
||||
// AddGlobalDialOptions).
|
||||
//
|
||||
// This is used in the 1.0 release of gcp/observability, and thus must not be
|
||||
// deleted or changed.
|
||||
DisableGlobalDialOptions any // func() grpc.DialOption
|
||||
// ClearGlobalDialOptions clears the array of extra DialOption. This
|
||||
// method is useful in testing and benchmarking.
|
||||
//
|
||||
// This is used in the 1.0 release of gcp/observability, and thus must not be
|
||||
// deleted or changed.
|
||||
ClearGlobalDialOptions func()
|
||||
// JoinDialOptions combines the dial options passed as arguments into a
|
||||
// single dial option.
|
||||
JoinDialOptions any // func(...grpc.DialOption) grpc.DialOption
|
||||
// JoinServerOptions combines the server options passed as arguments into a
|
||||
// single server option.
|
||||
JoinServerOptions any // func(...grpc.ServerOption) grpc.ServerOption
|
||||
|
||||
// WithBinaryLogger returns a DialOption that specifies the binary logger
|
||||
// for a ClientConn.
|
||||
//
|
||||
// This is used in the 1.0 release of gcp/observability, and thus must not be
|
||||
// deleted or changed.
|
||||
WithBinaryLogger any // func(binarylog.Logger) grpc.DialOption
|
||||
// BinaryLogger returns a ServerOption that can set the binary logger for a
|
||||
// server.
|
||||
//
|
||||
// This is used in the 1.0 release of gcp/observability, and thus must not be
|
||||
// deleted or changed.
|
||||
BinaryLogger any // func(binarylog.Logger) grpc.ServerOption
|
||||
|
||||
// SubscribeToConnectivityStateChanges adds a grpcsync.Subscriber to a provided grpc.ClientConn
|
||||
SubscribeToConnectivityStateChanges any // func(*grpc.ClientConn, grpcsync.Subscriber)
|
||||
|
||||
// NewXDSResolverWithConfigForTesting creates a new xds resolver builder using
|
||||
// the provided xds bootstrap config instead of the global configuration from
|
||||
// the supported environment variables. The resolver.Builder is meant to be
|
||||
// used in conjunction with the grpc.WithResolvers DialOption.
|
||||
//
|
||||
// Testing Only
|
||||
//
|
||||
// This function should ONLY be used for testing and may not work with some
|
||||
// other features, including the CSDS service.
|
||||
NewXDSResolverWithConfigForTesting any // func([]byte) (resolver.Builder, error)
|
||||
|
||||
// RegisterRLSClusterSpecifierPluginForTesting registers the RLS Cluster
|
||||
// Specifier Plugin for testing purposes, regardless of the XDSRLS environment
|
||||
// variable.
|
||||
//
|
||||
// TODO: Remove this function once the RLS env var is removed.
|
||||
RegisterRLSClusterSpecifierPluginForTesting func()
|
||||
|
||||
// UnregisterRLSClusterSpecifierPluginForTesting unregisters the RLS Cluster
|
||||
// Specifier Plugin for testing purposes. This is needed because there is no way
|
||||
// to unregister the RLS Cluster Specifier Plugin after registering it solely
|
||||
// for testing purposes using RegisterRLSClusterSpecifierPluginForTesting().
|
||||
//
|
||||
// TODO: Remove this function once the RLS env var is removed.
|
||||
UnregisterRLSClusterSpecifierPluginForTesting func()
|
||||
|
||||
// RegisterRBACHTTPFilterForTesting registers the RBAC HTTP Filter for testing
|
||||
// purposes, regardless of the RBAC environment variable.
|
||||
//
|
||||
// TODO: Remove this function once the RBAC env var is removed.
|
||||
RegisterRBACHTTPFilterForTesting func()
|
||||
|
||||
// UnregisterRBACHTTPFilterForTesting unregisters the RBAC HTTP Filter for
|
||||
// testing purposes. This is needed because there is no way to unregister the
|
||||
// HTTP Filter after registering it solely for testing purposes using
|
||||
// RegisterRBACHTTPFilterForTesting().
|
||||
//
|
||||
// TODO: Remove this function once the RBAC env var is removed.
|
||||
UnregisterRBACHTTPFilterForTesting func()
|
||||
|
||||
// ORCAAllowAnyMinReportingInterval is for examples/orca use ONLY.
|
||||
ORCAAllowAnyMinReportingInterval any // func(so *orca.ServiceOptions)
|
||||
|
||||
// GRPCResolverSchemeExtraMetadata determines when gRPC will add extra
|
||||
// metadata to RPCs.
|
||||
GRPCResolverSchemeExtraMetadata string = "xds"
|
||||
)
|
||||
|
||||
// HealthChecker defines the signature of the client-side LB channel health checking function.
|
||||
@ -73,7 +185,7 @@ var (
|
||||
//
|
||||
// The health checking protocol is defined at:
|
||||
// https://github.com/grpc/grpc/blob/master/doc/health-checking.md
|
||||
type HealthChecker func(ctx context.Context, newStream func(string) (interface{}, error), setConnectivityState func(connectivity.State, error), serviceName string) error
|
||||
type HealthChecker func(ctx context.Context, newStream func(string) (any, error), setConnectivityState func(connectivity.State, error), serviceName string) error
|
||||
|
||||
const (
|
||||
// CredsBundleModeFallback switches GoogleDefaultCreds to fallback mode.
|
||||
|
64
src/runtime/vendor/google.golang.org/grpc/internal/metadata/metadata.go
generated
vendored
64
src/runtime/vendor/google.golang.org/grpc/internal/metadata/metadata.go
generated
vendored
@ -35,7 +35,7 @@ const mdKey = mdKeyType("grpc.internal.address.metadata")
|
||||
|
||||
type mdValue metadata.MD
|
||||
|
||||
func (m mdValue) Equal(o interface{}) bool {
|
||||
func (m mdValue) Equal(o any) bool {
|
||||
om, ok := o.(mdValue)
|
||||
if !ok {
|
||||
return false
|
||||
@ -76,33 +76,11 @@ func Set(addr resolver.Address, md metadata.MD) resolver.Address {
|
||||
return addr
|
||||
}
|
||||
|
||||
// Validate returns an error if the input md contains invalid keys or values.
|
||||
//
|
||||
// If the header is not a pseudo-header, the following items are checked:
|
||||
// - header names must contain one or more characters from this set [0-9 a-z _ - .].
|
||||
// - if the header-name ends with a "-bin" suffix, no validation of the header value is performed.
|
||||
// - otherwise, the header value must contain one or more characters from the set [%x20-%x7E].
|
||||
// Validate validates every pair in md with ValidatePair.
|
||||
func Validate(md metadata.MD) error {
|
||||
for k, vals := range md {
|
||||
// pseudo-header will be ignored
|
||||
if k[0] == ':' {
|
||||
continue
|
||||
}
|
||||
// check key, for i that saving a conversion if not using for range
|
||||
for i := 0; i < len(k); i++ {
|
||||
r := k[i]
|
||||
if !(r >= 'a' && r <= 'z') && !(r >= '0' && r <= '9') && r != '.' && r != '-' && r != '_' {
|
||||
return fmt.Errorf("header key %q contains illegal characters not in [0-9a-z-_.]", k)
|
||||
}
|
||||
}
|
||||
if strings.HasSuffix(k, "-bin") {
|
||||
continue
|
||||
}
|
||||
// check value
|
||||
for _, val := range vals {
|
||||
if hasNotPrintable(val) {
|
||||
return fmt.Errorf("header key %q contains value with non-printable ASCII characters", k)
|
||||
}
|
||||
if err := ValidatePair(k, vals...); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
@ -118,3 +96,37 @@ func hasNotPrintable(msg string) bool {
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// ValidatePair validate a key-value pair with the following rules (the pseudo-header will be skipped) :
|
||||
//
|
||||
// - key must contain one or more characters.
|
||||
// - the characters in the key must be contained in [0-9 a-z _ - .].
|
||||
// - if the key ends with a "-bin" suffix, no validation of the corresponding value is performed.
|
||||
// - the characters in the every value must be printable (in [%x20-%x7E]).
|
||||
func ValidatePair(key string, vals ...string) error {
|
||||
// key should not be empty
|
||||
if key == "" {
|
||||
return fmt.Errorf("there is an empty key in the header")
|
||||
}
|
||||
// pseudo-header will be ignored
|
||||
if key[0] == ':' {
|
||||
return nil
|
||||
}
|
||||
// check key, for i that saving a conversion if not using for range
|
||||
for i := 0; i < len(key); i++ {
|
||||
r := key[i]
|
||||
if !(r >= 'a' && r <= 'z') && !(r >= '0' && r <= '9') && r != '.' && r != '-' && r != '_' {
|
||||
return fmt.Errorf("header key %q contains illegal characters not in [0-9a-z-_.]", key)
|
||||
}
|
||||
}
|
||||
if strings.HasSuffix(key, "-bin") {
|
||||
return nil
|
||||
}
|
||||
// check value
|
||||
for _, val := range vals {
|
||||
if hasNotPrintable(val) {
|
||||
return fmt.Errorf("header key %q contains value with non-printable ASCII characters", key)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
2
src/runtime/vendor/google.golang.org/grpc/internal/pretty/pretty.go
generated
vendored
2
src/runtime/vendor/google.golang.org/grpc/internal/pretty/pretty.go
generated
vendored
@ -35,7 +35,7 @@ const jsonIndent = " "
|
||||
// ToJSON marshals the input into a json string.
|
||||
//
|
||||
// If marshal fails, it falls back to fmt.Sprintf("%+v").
|
||||
func ToJSON(e interface{}) string {
|
||||
func ToJSON(e any) string {
|
||||
switch ee := e.(type) {
|
||||
case protov1.Message:
|
||||
mm := jsonpb.Marshaler{Indent: jsonIndent}
|
||||
|
4
src/runtime/vendor/google.golang.org/grpc/internal/resolver/config_selector.go
generated
vendored
4
src/runtime/vendor/google.golang.org/grpc/internal/resolver/config_selector.go
generated
vendored
@ -92,7 +92,7 @@ type ClientStream interface {
|
||||
// calling RecvMsg on the same stream at the same time, but it is not safe
|
||||
// to call SendMsg on the same stream in different goroutines. It is also
|
||||
// not safe to call CloseSend concurrently with SendMsg.
|
||||
SendMsg(m interface{}) error
|
||||
SendMsg(m any) error
|
||||
// RecvMsg blocks until it receives a message into m or the stream is
|
||||
// done. It returns io.EOF when the stream completes successfully. On
|
||||
// any other error, the stream is aborted and the error contains the RPC
|
||||
@ -101,7 +101,7 @@ type ClientStream interface {
|
||||
// It is safe to have a goroutine calling SendMsg and another goroutine
|
||||
// calling RecvMsg on the same stream at the same time, but it is not
|
||||
// safe to call RecvMsg on the same stream in different goroutines.
|
||||
RecvMsg(m interface{}) error
|
||||
RecvMsg(m any) error
|
||||
}
|
||||
|
||||
// ClientInterceptor is an interceptor for gRPC client streams.
|
||||
|
78
src/runtime/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go
generated
vendored
78
src/runtime/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go
generated
vendored
@ -62,7 +62,8 @@ const (
|
||||
defaultPort = "443"
|
||||
defaultDNSSvrPort = "53"
|
||||
golang = "GO"
|
||||
// txtPrefix is the prefix string to be prepended to the host name for txt record lookup.
|
||||
// txtPrefix is the prefix string to be prepended to the host name for txt
|
||||
// record lookup.
|
||||
txtPrefix = "_grpc_config."
|
||||
// In DNS, service config is encoded in a TXT record via the mechanism
|
||||
// described in RFC-1464 using the attribute name grpc_config.
|
||||
@ -86,14 +87,14 @@ var (
|
||||
minDNSResRate = 30 * time.Second
|
||||
)
|
||||
|
||||
var customAuthorityDialler = func(authority string) func(ctx context.Context, network, address string) (net.Conn, error) {
|
||||
return func(ctx context.Context, network, address string) (net.Conn, error) {
|
||||
var addressDialer = func(address string) func(context.Context, string, string) (net.Conn, error) {
|
||||
return func(ctx context.Context, network, _ string) (net.Conn, error) {
|
||||
var dialer net.Dialer
|
||||
return dialer.DialContext(ctx, network, authority)
|
||||
return dialer.DialContext(ctx, network, address)
|
||||
}
|
||||
}
|
||||
|
||||
var customAuthorityResolver = func(authority string) (netResolver, error) {
|
||||
var newNetResolver = func(authority string) (netResolver, error) {
|
||||
host, port, err := parseTarget(authority, defaultDNSSvrPort)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@ -103,7 +104,7 @@ var customAuthorityResolver = func(authority string) (netResolver, error) {
|
||||
|
||||
return &net.Resolver{
|
||||
PreferGo: true,
|
||||
Dial: customAuthorityDialler(authorityWithPort),
|
||||
Dial: addressDialer(authorityWithPort),
|
||||
}, nil
|
||||
}
|
||||
|
||||
@ -114,9 +115,10 @@ func NewBuilder() resolver.Builder {
|
||||
|
||||
type dnsBuilder struct{}
|
||||
|
||||
// Build creates and starts a DNS resolver that watches the name resolution of the target.
|
||||
// Build creates and starts a DNS resolver that watches the name resolution of
|
||||
// the target.
|
||||
func (b *dnsBuilder) Build(target resolver.Target, cc resolver.ClientConn, opts resolver.BuildOptions) (resolver.Resolver, error) {
|
||||
host, port, err := parseTarget(target.Endpoint, defaultPort)
|
||||
host, port, err := parseTarget(target.Endpoint(), defaultPort)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -140,10 +142,10 @@ func (b *dnsBuilder) Build(target resolver.Target, cc resolver.ClientConn, opts
|
||||
disableServiceConfig: opts.DisableServiceConfig,
|
||||
}
|
||||
|
||||
if target.Authority == "" {
|
||||
if target.URL.Host == "" {
|
||||
d.resolver = defaultResolver
|
||||
} else {
|
||||
d.resolver, err = customAuthorityResolver(target.Authority)
|
||||
d.resolver, err = newNetResolver(target.URL.Host)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -180,19 +182,22 @@ type dnsResolver struct {
|
||||
ctx context.Context
|
||||
cancel context.CancelFunc
|
||||
cc resolver.ClientConn
|
||||
// rn channel is used by ResolveNow() to force an immediate resolution of the target.
|
||||
// rn channel is used by ResolveNow() to force an immediate resolution of the
|
||||
// target.
|
||||
rn chan struct{}
|
||||
// wg is used to enforce Close() to return after the watcher() goroutine has finished.
|
||||
// Otherwise, data race will be possible. [Race Example] in dns_resolver_test we
|
||||
// replace the real lookup functions with mocked ones to facilitate testing.
|
||||
// If Close() doesn't wait for watcher() goroutine finishes, race detector sometimes
|
||||
// will warns lookup (READ the lookup function pointers) inside watcher() goroutine
|
||||
// has data race with replaceNetFunc (WRITE the lookup function pointers).
|
||||
// wg is used to enforce Close() to return after the watcher() goroutine has
|
||||
// finished. Otherwise, data race will be possible. [Race Example] in
|
||||
// dns_resolver_test we replace the real lookup functions with mocked ones to
|
||||
// facilitate testing. If Close() doesn't wait for watcher() goroutine
|
||||
// finishes, race detector sometimes will warns lookup (READ the lookup
|
||||
// function pointers) inside watcher() goroutine has data race with
|
||||
// replaceNetFunc (WRITE the lookup function pointers).
|
||||
wg sync.WaitGroup
|
||||
disableServiceConfig bool
|
||||
}
|
||||
|
||||
// ResolveNow invoke an immediate resolution of the target that this dnsResolver watches.
|
||||
// ResolveNow invoke an immediate resolution of the target that this
|
||||
// dnsResolver watches.
|
||||
func (d *dnsResolver) ResolveNow(resolver.ResolveNowOptions) {
|
||||
select {
|
||||
case d.rn <- struct{}{}:
|
||||
@ -220,8 +225,8 @@ func (d *dnsResolver) watcher() {
|
||||
|
||||
var timer *time.Timer
|
||||
if err == nil {
|
||||
// Success resolving, wait for the next ResolveNow. However, also wait 30 seconds at the very least
|
||||
// to prevent constantly re-resolving.
|
||||
// Success resolving, wait for the next ResolveNow. However, also wait 30
|
||||
// seconds at the very least to prevent constantly re-resolving.
|
||||
backoffIndex = 1
|
||||
timer = newTimerDNSResRate(minDNSResRate)
|
||||
select {
|
||||
@ -231,7 +236,8 @@ func (d *dnsResolver) watcher() {
|
||||
case <-d.rn:
|
||||
}
|
||||
} else {
|
||||
// Poll on an error found in DNS Resolver or an error received from ClientConn.
|
||||
// Poll on an error found in DNS Resolver or an error received from
|
||||
// ClientConn.
|
||||
timer = newTimer(backoff.DefaultExponential.Backoff(backoffIndex))
|
||||
backoffIndex++
|
||||
}
|
||||
@ -278,7 +284,8 @@ func (d *dnsResolver) lookupSRV() ([]resolver.Address, error) {
|
||||
}
|
||||
|
||||
func handleDNSError(err error, lookupType string) error {
|
||||
if dnsErr, ok := err.(*net.DNSError); ok && !dnsErr.IsTimeout && !dnsErr.IsTemporary {
|
||||
dnsErr, ok := err.(*net.DNSError)
|
||||
if ok && !dnsErr.IsTimeout && !dnsErr.IsTemporary {
|
||||
// Timeouts and temporary errors should be communicated to gRPC to
|
||||
// attempt another DNS query (with backoff). Other errors should be
|
||||
// suppressed (they may represent the absence of a TXT record).
|
||||
@ -307,10 +314,12 @@ func (d *dnsResolver) lookupTXT() *serviceconfig.ParseResult {
|
||||
res += s
|
||||
}
|
||||
|
||||
// TXT record must have "grpc_config=" attribute in order to be used as service config.
|
||||
// TXT record must have "grpc_config=" attribute in order to be used as
|
||||
// service config.
|
||||
if !strings.HasPrefix(res, txtAttribute) {
|
||||
logger.Warningf("dns: TXT record %v missing %v attribute", res, txtAttribute)
|
||||
// This is not an error; it is the equivalent of not having a service config.
|
||||
// This is not an error; it is the equivalent of not having a service
|
||||
// config.
|
||||
return nil
|
||||
}
|
||||
sc := canaryingSC(strings.TrimPrefix(res, txtAttribute))
|
||||
@ -352,9 +361,10 @@ func (d *dnsResolver) lookup() (*resolver.State, error) {
|
||||
return &state, nil
|
||||
}
|
||||
|
||||
// formatIP returns ok = false if addr is not a valid textual representation of an IP address.
|
||||
// If addr is an IPv4 address, return the addr and ok = true.
|
||||
// If addr is an IPv6 address, return the addr enclosed in square brackets and ok = true.
|
||||
// formatIP returns ok = false if addr is not a valid textual representation of
|
||||
// an IP address. If addr is an IPv4 address, return the addr and ok = true.
|
||||
// If addr is an IPv6 address, return the addr enclosed in square brackets and
|
||||
// ok = true.
|
||||
func formatIP(addr string) (addrIP string, ok bool) {
|
||||
ip := net.ParseIP(addr)
|
||||
if ip == nil {
|
||||
@ -366,10 +376,10 @@ func formatIP(addr string) (addrIP string, ok bool) {
|
||||
return "[" + addr + "]", true
|
||||
}
|
||||
|
||||
// parseTarget takes the user input target string and default port, returns formatted host and port info.
|
||||
// If target doesn't specify a port, set the port to be the defaultPort.
|
||||
// If target is in IPv6 format and host-name is enclosed in square brackets, brackets
|
||||
// are stripped when setting the host.
|
||||
// parseTarget takes the user input target string and default port, returns
|
||||
// formatted host and port info. If target doesn't specify a port, set the port
|
||||
// to be the defaultPort. If target is in IPv6 format and host-name is enclosed
|
||||
// in square brackets, brackets are stripped when setting the host.
|
||||
// examples:
|
||||
// target: "www.google.com" defaultPort: "443" returns host: "www.google.com", port: "443"
|
||||
// target: "ipv4-host:80" defaultPort: "443" returns host: "ipv4-host", port: "80"
|
||||
@ -385,12 +395,14 @@ func parseTarget(target, defaultPort string) (host, port string, err error) {
|
||||
}
|
||||
if host, port, err = net.SplitHostPort(target); err == nil {
|
||||
if port == "" {
|
||||
// If the port field is empty (target ends with colon), e.g. "[::1]:", this is an error.
|
||||
// If the port field is empty (target ends with colon), e.g. "[::1]:",
|
||||
// this is an error.
|
||||
return "", "", errEndsWithColon
|
||||
}
|
||||
// target has port, i.e ipv4-host:port, [ipv6-host]:port, host-name:port
|
||||
if host == "" {
|
||||
// Keep consistent with net.Dial(): If the host is empty, as in ":80", the local system is assumed.
|
||||
// Keep consistent with net.Dial(): If the host is empty, as in ":80",
|
||||
// the local system is assumed.
|
||||
host = "localhost"
|
||||
}
|
||||
return host, port, nil
|
||||
|
11
src/runtime/vendor/google.golang.org/grpc/internal/resolver/passthrough/passthrough.go
generated
vendored
11
src/runtime/vendor/google.golang.org/grpc/internal/resolver/passthrough/passthrough.go
generated
vendored
@ -20,13 +20,20 @@
|
||||
// name without scheme back to gRPC as resolved address.
|
||||
package passthrough
|
||||
|
||||
import "google.golang.org/grpc/resolver"
|
||||
import (
|
||||
"errors"
|
||||
|
||||
"google.golang.org/grpc/resolver"
|
||||
)
|
||||
|
||||
const scheme = "passthrough"
|
||||
|
||||
type passthroughBuilder struct{}
|
||||
|
||||
func (*passthroughBuilder) Build(target resolver.Target, cc resolver.ClientConn, opts resolver.BuildOptions) (resolver.Resolver, error) {
|
||||
if target.Endpoint() == "" && opts.Dialer == nil {
|
||||
return nil, errors.New("passthrough: received empty target in Build()")
|
||||
}
|
||||
r := &passthroughResolver{
|
||||
target: target,
|
||||
cc: cc,
|
||||
@ -45,7 +52,7 @@ type passthroughResolver struct {
|
||||
}
|
||||
|
||||
func (r *passthroughResolver) start() {
|
||||
r.cc.UpdateState(resolver.State{Addresses: []resolver.Address{{Addr: r.target.Endpoint}}})
|
||||
r.cc.UpdateState(resolver.State{Addresses: []resolver.Address{{Addr: r.target.Endpoint()}}})
|
||||
}
|
||||
|
||||
func (*passthroughResolver) ResolveNow(o resolver.ResolveNowOptions) {}
|
||||
|
9
src/runtime/vendor/google.golang.org/grpc/internal/resolver/unix/unix.go
generated
vendored
9
src/runtime/vendor/google.golang.org/grpc/internal/resolver/unix/unix.go
generated
vendored
@ -34,8 +34,8 @@ type builder struct {
|
||||
}
|
||||
|
||||
func (b *builder) Build(target resolver.Target, cc resolver.ClientConn, _ resolver.BuildOptions) (resolver.Resolver, error) {
|
||||
if target.Authority != "" {
|
||||
return nil, fmt.Errorf("invalid (non-empty) authority: %v", target.Authority)
|
||||
if target.URL.Host != "" {
|
||||
return nil, fmt.Errorf("invalid (non-empty) authority: %v", target.URL.Host)
|
||||
}
|
||||
|
||||
// gRPC was parsing the dial target manually before PR #4817, and we
|
||||
@ -49,8 +49,9 @@ func (b *builder) Build(target resolver.Target, cc resolver.ClientConn, _ resolv
|
||||
}
|
||||
addr := resolver.Address{Addr: endpoint}
|
||||
if b.scheme == unixAbstractScheme {
|
||||
// prepend "\x00" to address for unix-abstract
|
||||
addr.Addr = "\x00" + addr.Addr
|
||||
// We can not prepend \0 as c++ gRPC does, as in Golang '@' is used to signify we do
|
||||
// not want trailing \0 in address.
|
||||
addr.Addr = "@" + addr.Addr
|
||||
}
|
||||
cc.UpdateState(resolver.State{Addresses: []resolver.Address{networktype.Set(addr, "unix")}})
|
||||
return &nopResolver{}, nil
|
||||
|
130
src/runtime/vendor/google.golang.org/grpc/internal/serviceconfig/duration.go
generated
vendored
Normal file
130
src/runtime/vendor/google.golang.org/grpc/internal/serviceconfig/duration.go
generated
vendored
Normal file
@ -0,0 +1,130 @@
|
||||
/*
|
||||
*
|
||||
* Copyright 2023 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
package serviceconfig
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"math"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
// Duration defines JSON marshal and unmarshal methods to conform to the
|
||||
// protobuf JSON spec defined [here].
|
||||
//
|
||||
// [here]: https://protobuf.dev/reference/protobuf/google.protobuf/#duration
|
||||
type Duration time.Duration
|
||||
|
||||
func (d Duration) String() string {
|
||||
return fmt.Sprint(time.Duration(d))
|
||||
}
|
||||
|
||||
// MarshalJSON converts from d to a JSON string output.
|
||||
func (d Duration) MarshalJSON() ([]byte, error) {
|
||||
ns := time.Duration(d).Nanoseconds()
|
||||
sec := ns / int64(time.Second)
|
||||
ns = ns % int64(time.Second)
|
||||
|
||||
var sign string
|
||||
if sec < 0 || ns < 0 {
|
||||
sign, sec, ns = "-", -1*sec, -1*ns
|
||||
}
|
||||
|
||||
// Generated output always contains 0, 3, 6, or 9 fractional digits,
|
||||
// depending on required precision.
|
||||
str := fmt.Sprintf("%s%d.%09d", sign, sec, ns)
|
||||
str = strings.TrimSuffix(str, "000")
|
||||
str = strings.TrimSuffix(str, "000")
|
||||
str = strings.TrimSuffix(str, ".000")
|
||||
return []byte(fmt.Sprintf("\"%ss\"", str)), nil
|
||||
}
|
||||
|
||||
// UnmarshalJSON unmarshals b as a duration JSON string into d.
|
||||
func (d *Duration) UnmarshalJSON(b []byte) error {
|
||||
var s string
|
||||
if err := json.Unmarshal(b, &s); err != nil {
|
||||
return err
|
||||
}
|
||||
if !strings.HasSuffix(s, "s") {
|
||||
return fmt.Errorf("malformed duration %q: missing seconds unit", s)
|
||||
}
|
||||
neg := false
|
||||
if s[0] == '-' {
|
||||
neg = true
|
||||
s = s[1:]
|
||||
}
|
||||
ss := strings.SplitN(s[:len(s)-1], ".", 3)
|
||||
if len(ss) > 2 {
|
||||
return fmt.Errorf("malformed duration %q: too many decimals", s)
|
||||
}
|
||||
// hasDigits is set if either the whole or fractional part of the number is
|
||||
// present, since both are optional but one is required.
|
||||
hasDigits := false
|
||||
var sec, ns int64
|
||||
if len(ss[0]) > 0 {
|
||||
var err error
|
||||
if sec, err = strconv.ParseInt(ss[0], 10, 64); err != nil {
|
||||
return fmt.Errorf("malformed duration %q: %v", s, err)
|
||||
}
|
||||
// Maximum seconds value per the durationpb spec.
|
||||
const maxProtoSeconds = 315_576_000_000
|
||||
if sec > maxProtoSeconds {
|
||||
return fmt.Errorf("out of range: %q", s)
|
||||
}
|
||||
hasDigits = true
|
||||
}
|
||||
if len(ss) == 2 && len(ss[1]) > 0 {
|
||||
if len(ss[1]) > 9 {
|
||||
return fmt.Errorf("malformed duration %q: too many digits after decimal", s)
|
||||
}
|
||||
var err error
|
||||
if ns, err = strconv.ParseInt(ss[1], 10, 64); err != nil {
|
||||
return fmt.Errorf("malformed duration %q: %v", s, err)
|
||||
}
|
||||
for i := 9; i > len(ss[1]); i-- {
|
||||
ns *= 10
|
||||
}
|
||||
hasDigits = true
|
||||
}
|
||||
if !hasDigits {
|
||||
return fmt.Errorf("malformed duration %q: contains no numbers", s)
|
||||
}
|
||||
|
||||
if neg {
|
||||
sec *= -1
|
||||
ns *= -1
|
||||
}
|
||||
|
||||
// Maximum/minimum seconds/nanoseconds representable by Go's time.Duration.
|
||||
const maxSeconds = math.MaxInt64 / int64(time.Second)
|
||||
const maxNanosAtMaxSeconds = math.MaxInt64 % int64(time.Second)
|
||||
const minSeconds = math.MinInt64 / int64(time.Second)
|
||||
const minNanosAtMinSeconds = math.MinInt64 % int64(time.Second)
|
||||
|
||||
if sec > maxSeconds || (sec == maxSeconds && ns >= maxNanosAtMaxSeconds) {
|
||||
*d = Duration(math.MaxInt64)
|
||||
} else if sec < minSeconds || (sec == minSeconds && ns <= minNanosAtMinSeconds) {
|
||||
*d = Duration(math.MinInt64)
|
||||
} else {
|
||||
*d = Duration(sec*int64(time.Second) + ns)
|
||||
}
|
||||
return nil
|
||||
}
|
8
src/runtime/vendor/google.golang.org/grpc/internal/serviceconfig/serviceconfig.go
generated
vendored
8
src/runtime/vendor/google.golang.org/grpc/internal/serviceconfig/serviceconfig.go
generated
vendored
@ -67,10 +67,10 @@ func (bc *BalancerConfig) MarshalJSON() ([]byte, error) {
|
||||
// ServiceConfig contains a list of loadBalancingConfigs, each with a name and
|
||||
// config. This method iterates through that list in order, and stops at the
|
||||
// first policy that is supported.
|
||||
// - If the config for the first supported policy is invalid, the whole service
|
||||
// config is invalid.
|
||||
// - If the list doesn't contain any supported policy, the whole service config
|
||||
// is invalid.
|
||||
// - If the config for the first supported policy is invalid, the whole service
|
||||
// config is invalid.
|
||||
// - If the list doesn't contain any supported policy, the whole service config
|
||||
// is invalid.
|
||||
func (bc *BalancerConfig) UnmarshalJSON(b []byte) error {
|
||||
var ir intermediateBalancerConfig
|
||||
err := json.Unmarshal(b, &ir)
|
||||
|
18
src/runtime/vendor/google.golang.org/grpc/internal/status/status.go
generated
vendored
18
src/runtime/vendor/google.golang.org/grpc/internal/status/status.go
generated
vendored
@ -49,7 +49,7 @@ func New(c codes.Code, msg string) *Status {
|
||||
}
|
||||
|
||||
// Newf returns New(c, fmt.Sprintf(format, a...)).
|
||||
func Newf(c codes.Code, format string, a ...interface{}) *Status {
|
||||
func Newf(c codes.Code, format string, a ...any) *Status {
|
||||
return New(c, fmt.Sprintf(format, a...))
|
||||
}
|
||||
|
||||
@ -64,7 +64,7 @@ func Err(c codes.Code, msg string) error {
|
||||
}
|
||||
|
||||
// Errorf returns Error(c, fmt.Sprintf(format, a...)).
|
||||
func Errorf(c codes.Code, format string, a ...interface{}) error {
|
||||
func Errorf(c codes.Code, format string, a ...any) error {
|
||||
return Err(c, fmt.Sprintf(format, a...))
|
||||
}
|
||||
|
||||
@ -120,11 +120,11 @@ func (s *Status) WithDetails(details ...proto.Message) (*Status, error) {
|
||||
|
||||
// Details returns a slice of details messages attached to the status.
|
||||
// If a detail cannot be decoded, the error is returned in place of the detail.
|
||||
func (s *Status) Details() []interface{} {
|
||||
func (s *Status) Details() []any {
|
||||
if s == nil || s.s == nil {
|
||||
return nil
|
||||
}
|
||||
details := make([]interface{}, 0, len(s.s.Details))
|
||||
details := make([]any, 0, len(s.s.Details))
|
||||
for _, any := range s.s.Details {
|
||||
detail := &ptypes.DynamicAny{}
|
||||
if err := ptypes.UnmarshalAny(any, detail); err != nil {
|
||||
@ -164,3 +164,13 @@ func (e *Error) Is(target error) bool {
|
||||
}
|
||||
return proto.Equal(e.s.s, tse.s.s)
|
||||
}
|
||||
|
||||
// IsRestrictedControlPlaneCode returns whether the status includes a code
|
||||
// restricted for control plane usage as defined by gRFC A54.
|
||||
func IsRestrictedControlPlaneCode(s *Status) bool {
|
||||
switch s.Code() {
|
||||
case codes.InvalidArgument, codes.NotFound, codes.AlreadyExists, codes.FailedPrecondition, codes.Aborted, codes.OutOfRange, codes.DataLoss:
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
147
src/runtime/vendor/google.golang.org/grpc/internal/transport/controlbuf.go
generated
vendored
147
src/runtime/vendor/google.golang.org/grpc/internal/transport/controlbuf.go
generated
vendored
@ -22,6 +22,7 @@ import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net"
|
||||
"runtime"
|
||||
"strconv"
|
||||
"sync"
|
||||
@ -29,6 +30,7 @@ import (
|
||||
|
||||
"golang.org/x/net/http2"
|
||||
"golang.org/x/net/http2/hpack"
|
||||
"google.golang.org/grpc/internal/grpclog"
|
||||
"google.golang.org/grpc/internal/grpcutil"
|
||||
"google.golang.org/grpc/status"
|
||||
)
|
||||
@ -38,7 +40,7 @@ var updateHeaderTblSize = func(e *hpack.Encoder, v uint32) {
|
||||
}
|
||||
|
||||
type itemNode struct {
|
||||
it interface{}
|
||||
it any
|
||||
next *itemNode
|
||||
}
|
||||
|
||||
@ -47,7 +49,7 @@ type itemList struct {
|
||||
tail *itemNode
|
||||
}
|
||||
|
||||
func (il *itemList) enqueue(i interface{}) {
|
||||
func (il *itemList) enqueue(i any) {
|
||||
n := &itemNode{it: i}
|
||||
if il.tail == nil {
|
||||
il.head, il.tail = n, n
|
||||
@ -59,11 +61,11 @@ func (il *itemList) enqueue(i interface{}) {
|
||||
|
||||
// peek returns the first item in the list without removing it from the
|
||||
// list.
|
||||
func (il *itemList) peek() interface{} {
|
||||
func (il *itemList) peek() any {
|
||||
return il.head.it
|
||||
}
|
||||
|
||||
func (il *itemList) dequeue() interface{} {
|
||||
func (il *itemList) dequeue() any {
|
||||
if il.head == nil {
|
||||
return nil
|
||||
}
|
||||
@ -191,7 +193,7 @@ type goAway struct {
|
||||
code http2.ErrCode
|
||||
debugData []byte
|
||||
headsUp bool
|
||||
closeConn bool
|
||||
closeConn error // if set, loopyWriter will exit, resulting in conn closure
|
||||
}
|
||||
|
||||
func (*goAway) isTransportResponseFrame() bool { return false }
|
||||
@ -209,6 +211,14 @@ type outFlowControlSizeRequest struct {
|
||||
|
||||
func (*outFlowControlSizeRequest) isTransportResponseFrame() bool { return false }
|
||||
|
||||
// closeConnection is an instruction to tell the loopy writer to flush the
|
||||
// framer and exit, which will cause the transport's connection to be closed
|
||||
// (by the client or server). The transport itself will close after the reader
|
||||
// encounters the EOF caused by the connection closure.
|
||||
type closeConnection struct{}
|
||||
|
||||
func (closeConnection) isTransportResponseFrame() bool { return false }
|
||||
|
||||
type outStreamState int
|
||||
|
||||
const (
|
||||
@ -326,7 +336,7 @@ func (c *controlBuffer) put(it cbItem) error {
|
||||
return err
|
||||
}
|
||||
|
||||
func (c *controlBuffer) executeAndPut(f func(it interface{}) bool, it cbItem) (bool, error) {
|
||||
func (c *controlBuffer) executeAndPut(f func(it any) bool, it cbItem) (bool, error) {
|
||||
var wakeUp bool
|
||||
c.mu.Lock()
|
||||
if c.err != nil {
|
||||
@ -363,7 +373,7 @@ func (c *controlBuffer) executeAndPut(f func(it interface{}) bool, it cbItem) (b
|
||||
}
|
||||
|
||||
// Note argument f should never be nil.
|
||||
func (c *controlBuffer) execute(f func(it interface{}) bool, it interface{}) (bool, error) {
|
||||
func (c *controlBuffer) execute(f func(it any) bool, it any) (bool, error) {
|
||||
c.mu.Lock()
|
||||
if c.err != nil {
|
||||
c.mu.Unlock()
|
||||
@ -377,7 +387,7 @@ func (c *controlBuffer) execute(f func(it interface{}) bool, it interface{}) (bo
|
||||
return true, nil
|
||||
}
|
||||
|
||||
func (c *controlBuffer) get(block bool) (interface{}, error) {
|
||||
func (c *controlBuffer) get(block bool) (any, error) {
|
||||
for {
|
||||
c.mu.Lock()
|
||||
if c.err != nil {
|
||||
@ -408,7 +418,7 @@ func (c *controlBuffer) get(block bool) (interface{}, error) {
|
||||
select {
|
||||
case <-c.ch:
|
||||
case <-c.done:
|
||||
return nil, ErrConnClosing
|
||||
return nil, errors.New("transport closed by client")
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -478,12 +488,14 @@ type loopyWriter struct {
|
||||
hEnc *hpack.Encoder // HPACK encoder.
|
||||
bdpEst *bdpEstimator
|
||||
draining bool
|
||||
conn net.Conn
|
||||
logger *grpclog.PrefixLogger
|
||||
|
||||
// Side-specific handlers
|
||||
ssGoAwayHandler func(*goAway) (bool, error)
|
||||
}
|
||||
|
||||
func newLoopyWriter(s side, fr *framer, cbuf *controlBuffer, bdpEst *bdpEstimator) *loopyWriter {
|
||||
func newLoopyWriter(s side, fr *framer, cbuf *controlBuffer, bdpEst *bdpEstimator, conn net.Conn, logger *grpclog.PrefixLogger) *loopyWriter {
|
||||
var buf bytes.Buffer
|
||||
l := &loopyWriter{
|
||||
side: s,
|
||||
@ -496,6 +508,8 @@ func newLoopyWriter(s side, fr *framer, cbuf *controlBuffer, bdpEst *bdpEstimato
|
||||
hBuf: &buf,
|
||||
hEnc: hpack.NewEncoder(&buf),
|
||||
bdpEst: bdpEst,
|
||||
conn: conn,
|
||||
logger: logger,
|
||||
}
|
||||
return l
|
||||
}
|
||||
@ -513,23 +527,26 @@ const minBatchSize = 1000
|
||||
// 2. Stream level flow control quota available.
|
||||
//
|
||||
// In each iteration of run loop, other than processing the incoming control
|
||||
// frame, loopy calls processData, which processes one node from the activeStreams linked-list.
|
||||
// This results in writing of HTTP2 frames into an underlying write buffer.
|
||||
// When there's no more control frames to read from controlBuf, loopy flushes the write buffer.
|
||||
// As an optimization, to increase the batch size for each flush, loopy yields the processor, once
|
||||
// if the batch size is too low to give stream goroutines a chance to fill it up.
|
||||
// frame, loopy calls processData, which processes one node from the
|
||||
// activeStreams linked-list. This results in writing of HTTP2 frames into an
|
||||
// underlying write buffer. When there's no more control frames to read from
|
||||
// controlBuf, loopy flushes the write buffer. As an optimization, to increase
|
||||
// the batch size for each flush, loopy yields the processor, once if the batch
|
||||
// size is too low to give stream goroutines a chance to fill it up.
|
||||
//
|
||||
// Upon exiting, if the error causing the exit is not an I/O error, run()
|
||||
// flushes and closes the underlying connection. Otherwise, the connection is
|
||||
// left open to allow the I/O error to be encountered by the reader instead.
|
||||
func (l *loopyWriter) run() (err error) {
|
||||
defer func() {
|
||||
if err == ErrConnClosing {
|
||||
// Don't log ErrConnClosing as error since it happens
|
||||
// 1. When the connection is closed by some other known issue.
|
||||
// 2. User closed the connection.
|
||||
// 3. A graceful close of connection.
|
||||
if logger.V(logLevel) {
|
||||
logger.Infof("transport: loopyWriter.run returning. %v", err)
|
||||
}
|
||||
err = nil
|
||||
if l.logger.V(logLevel) {
|
||||
l.logger.Infof("loopyWriter exiting with error: %v", err)
|
||||
}
|
||||
if !isIOError(err) {
|
||||
l.framer.writer.Flush()
|
||||
l.conn.Close()
|
||||
}
|
||||
l.cbuf.finish()
|
||||
}()
|
||||
for {
|
||||
it, err := l.cbuf.get(true)
|
||||
@ -574,7 +591,6 @@ func (l *loopyWriter) run() (err error) {
|
||||
}
|
||||
l.framer.writer.Flush()
|
||||
break hasdata
|
||||
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -583,11 +599,11 @@ func (l *loopyWriter) outgoingWindowUpdateHandler(w *outgoingWindowUpdate) error
|
||||
return l.framer.fr.WriteWindowUpdate(w.streamID, w.increment)
|
||||
}
|
||||
|
||||
func (l *loopyWriter) incomingWindowUpdateHandler(w *incomingWindowUpdate) error {
|
||||
func (l *loopyWriter) incomingWindowUpdateHandler(w *incomingWindowUpdate) {
|
||||
// Otherwise update the quota.
|
||||
if w.streamID == 0 {
|
||||
l.sendQuota += w.increment
|
||||
return nil
|
||||
return
|
||||
}
|
||||
// Find the stream and update it.
|
||||
if str, ok := l.estdStreams[w.streamID]; ok {
|
||||
@ -595,10 +611,9 @@ func (l *loopyWriter) incomingWindowUpdateHandler(w *incomingWindowUpdate) error
|
||||
if strQuota := int(l.oiws) - str.bytesOutStanding; strQuota > 0 && str.state == waitingOnStreamQuota {
|
||||
str.state = active
|
||||
l.activeStreams.enqueue(str)
|
||||
return nil
|
||||
return
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (l *loopyWriter) outgoingSettingsHandler(s *outgoingSettings) error {
|
||||
@ -606,13 +621,11 @@ func (l *loopyWriter) outgoingSettingsHandler(s *outgoingSettings) error {
|
||||
}
|
||||
|
||||
func (l *loopyWriter) incomingSettingsHandler(s *incomingSettings) error {
|
||||
if err := l.applySettings(s.ss); err != nil {
|
||||
return err
|
||||
}
|
||||
l.applySettings(s.ss)
|
||||
return l.framer.fr.WriteSettingsAck()
|
||||
}
|
||||
|
||||
func (l *loopyWriter) registerStreamHandler(h *registerStream) error {
|
||||
func (l *loopyWriter) registerStreamHandler(h *registerStream) {
|
||||
str := &outStream{
|
||||
id: h.streamID,
|
||||
state: empty,
|
||||
@ -620,15 +633,14 @@ func (l *loopyWriter) registerStreamHandler(h *registerStream) error {
|
||||
wq: h.wq,
|
||||
}
|
||||
l.estdStreams[h.streamID] = str
|
||||
return nil
|
||||
}
|
||||
|
||||
func (l *loopyWriter) headerHandler(h *headerFrame) error {
|
||||
if l.side == serverSide {
|
||||
str, ok := l.estdStreams[h.streamID]
|
||||
if !ok {
|
||||
if logger.V(logLevel) {
|
||||
logger.Warningf("transport: loopy doesn't recognize the stream: %d", h.streamID)
|
||||
if l.logger.V(logLevel) {
|
||||
l.logger.Infof("Unrecognized streamID %d in loopyWriter", h.streamID)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@ -655,19 +667,20 @@ func (l *loopyWriter) headerHandler(h *headerFrame) error {
|
||||
itl: &itemList{},
|
||||
wq: h.wq,
|
||||
}
|
||||
str.itl.enqueue(h)
|
||||
return l.originateStream(str)
|
||||
return l.originateStream(str, h)
|
||||
}
|
||||
|
||||
func (l *loopyWriter) originateStream(str *outStream) error {
|
||||
hdr := str.itl.dequeue().(*headerFrame)
|
||||
if err := hdr.initStream(str.id); err != nil {
|
||||
if err == ErrConnClosing {
|
||||
return err
|
||||
}
|
||||
// Other errors(errStreamDrain) need not close transport.
|
||||
func (l *loopyWriter) originateStream(str *outStream, hdr *headerFrame) error {
|
||||
// l.draining is set when handling GoAway. In which case, we want to avoid
|
||||
// creating new streams.
|
||||
if l.draining {
|
||||
// TODO: provide a better error with the reason we are in draining.
|
||||
hdr.onOrphaned(errStreamDrain)
|
||||
return nil
|
||||
}
|
||||
if err := hdr.initStream(str.id); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := l.writeHeader(str.id, hdr.endStream, hdr.hf, hdr.onWrite); err != nil {
|
||||
return err
|
||||
}
|
||||
@ -682,8 +695,8 @@ func (l *loopyWriter) writeHeader(streamID uint32, endStream bool, hf []hpack.He
|
||||
l.hBuf.Reset()
|
||||
for _, f := range hf {
|
||||
if err := l.hEnc.WriteField(f); err != nil {
|
||||
if logger.V(logLevel) {
|
||||
logger.Warningf("transport: loopyWriter.writeHeader encountered error while encoding headers: %v", err)
|
||||
if l.logger.V(logLevel) {
|
||||
l.logger.Warningf("Encountered error while encoding headers: %v", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -721,10 +734,10 @@ func (l *loopyWriter) writeHeader(streamID uint32, endStream bool, hf []hpack.He
|
||||
return nil
|
||||
}
|
||||
|
||||
func (l *loopyWriter) preprocessData(df *dataFrame) error {
|
||||
func (l *loopyWriter) preprocessData(df *dataFrame) {
|
||||
str, ok := l.estdStreams[df.streamID]
|
||||
if !ok {
|
||||
return nil
|
||||
return
|
||||
}
|
||||
// If we got data for a stream it means that
|
||||
// stream was originated and the headers were sent out.
|
||||
@ -733,7 +746,6 @@ func (l *loopyWriter) preprocessData(df *dataFrame) error {
|
||||
str.state = active
|
||||
l.activeStreams.enqueue(str)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (l *loopyWriter) pingHandler(p *ping) error {
|
||||
@ -744,9 +756,8 @@ func (l *loopyWriter) pingHandler(p *ping) error {
|
||||
|
||||
}
|
||||
|
||||
func (l *loopyWriter) outFlowControlSizeRequestHandler(o *outFlowControlSizeRequest) error {
|
||||
func (l *loopyWriter) outFlowControlSizeRequestHandler(o *outFlowControlSizeRequest) {
|
||||
o.resp <- l.sendQuota
|
||||
return nil
|
||||
}
|
||||
|
||||
func (l *loopyWriter) cleanupStreamHandler(c *cleanupStream) error {
|
||||
@ -763,8 +774,9 @@ func (l *loopyWriter) cleanupStreamHandler(c *cleanupStream) error {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if l.side == clientSide && l.draining && len(l.estdStreams) == 0 {
|
||||
return ErrConnClosing
|
||||
if l.draining && len(l.estdStreams) == 0 {
|
||||
// Flush and close the connection; we are done with it.
|
||||
return errors.New("finished processing active streams while in draining mode")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@ -799,7 +811,8 @@ func (l *loopyWriter) incomingGoAwayHandler(*incomingGoAway) error {
|
||||
if l.side == clientSide {
|
||||
l.draining = true
|
||||
if len(l.estdStreams) == 0 {
|
||||
return ErrConnClosing
|
||||
// Flush and close the connection; we are done with it.
|
||||
return errors.New("received GOAWAY with no active streams")
|
||||
}
|
||||
}
|
||||
return nil
|
||||
@ -817,10 +830,10 @@ func (l *loopyWriter) goAwayHandler(g *goAway) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (l *loopyWriter) handle(i interface{}) error {
|
||||
func (l *loopyWriter) handle(i any) error {
|
||||
switch i := i.(type) {
|
||||
case *incomingWindowUpdate:
|
||||
return l.incomingWindowUpdateHandler(i)
|
||||
l.incomingWindowUpdateHandler(i)
|
||||
case *outgoingWindowUpdate:
|
||||
return l.outgoingWindowUpdateHandler(i)
|
||||
case *incomingSettings:
|
||||
@ -830,7 +843,7 @@ func (l *loopyWriter) handle(i interface{}) error {
|
||||
case *headerFrame:
|
||||
return l.headerHandler(i)
|
||||
case *registerStream:
|
||||
return l.registerStreamHandler(i)
|
||||
l.registerStreamHandler(i)
|
||||
case *cleanupStream:
|
||||
return l.cleanupStreamHandler(i)
|
||||
case *earlyAbortStream:
|
||||
@ -838,19 +851,24 @@ func (l *loopyWriter) handle(i interface{}) error {
|
||||
case *incomingGoAway:
|
||||
return l.incomingGoAwayHandler(i)
|
||||
case *dataFrame:
|
||||
return l.preprocessData(i)
|
||||
l.preprocessData(i)
|
||||
case *ping:
|
||||
return l.pingHandler(i)
|
||||
case *goAway:
|
||||
return l.goAwayHandler(i)
|
||||
case *outFlowControlSizeRequest:
|
||||
return l.outFlowControlSizeRequestHandler(i)
|
||||
l.outFlowControlSizeRequestHandler(i)
|
||||
case closeConnection:
|
||||
// Just return a non-I/O error and run() will flush and close the
|
||||
// connection.
|
||||
return ErrConnClosing
|
||||
default:
|
||||
return fmt.Errorf("transport: unknown control message type %T", i)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (l *loopyWriter) applySettings(ss []http2.Setting) error {
|
||||
func (l *loopyWriter) applySettings(ss []http2.Setting) {
|
||||
for _, s := range ss {
|
||||
switch s.ID {
|
||||
case http2.SettingInitialWindowSize:
|
||||
@ -869,7 +887,6 @@ func (l *loopyWriter) applySettings(ss []http2.Setting) error {
|
||||
updateHeaderTblSize(l.hEnc, s.Val)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// processData removes the first stream from active streams, writes out at most 16KB
|
||||
@ -886,9 +903,9 @@ func (l *loopyWriter) processData() (bool, error) {
|
||||
dataItem := str.itl.peek().(*dataFrame) // Peek at the first data item this stream.
|
||||
// A data item is represented by a dataFrame, since it later translates into
|
||||
// multiple HTTP2 data frames.
|
||||
// Every dataFrame has two buffers; h that keeps grpc-message header and d that is acutal data.
|
||||
// Every dataFrame has two buffers; h that keeps grpc-message header and d that is actual data.
|
||||
// As an optimization to keep wire traffic low, data from d is copied to h to make as big as the
|
||||
// maximum possilbe HTTP2 frame size.
|
||||
// maximum possible HTTP2 frame size.
|
||||
|
||||
if len(dataItem.h) == 0 && len(dataItem.d) == 0 { // Empty data frame
|
||||
// Client sends out empty data frame with endStream = true
|
||||
@ -903,7 +920,7 @@ func (l *loopyWriter) processData() (bool, error) {
|
||||
return false, err
|
||||
}
|
||||
if err := l.cleanupStreamHandler(trailer.cleanup); err != nil {
|
||||
return false, nil
|
||||
return false, err
|
||||
}
|
||||
} else {
|
||||
l.activeStreams.enqueue(str)
|
||||
|
6
src/runtime/vendor/google.golang.org/grpc/internal/transport/defaults.go
generated
vendored
6
src/runtime/vendor/google.golang.org/grpc/internal/transport/defaults.go
generated
vendored
@ -47,3 +47,9 @@ const (
|
||||
defaultClientMaxHeaderListSize = uint32(16 << 20)
|
||||
defaultServerMaxHeaderListSize = uint32(16 << 20)
|
||||
)
|
||||
|
||||
// MaxStreamID is the upper bound for the stream ID before the current
|
||||
// transport gracefully closes and new transport is created for subsequent RPCs.
|
||||
// This is set to 75% of 2^31-1. Streams are identified with an unsigned 31-bit
|
||||
// integer. It's exported so that tests can override it.
|
||||
var MaxStreamID = uint32(math.MaxInt32 * 3 / 4)
|
||||
|
80
src/runtime/vendor/google.golang.org/grpc/internal/transport/handler_server.go
generated
vendored
80
src/runtime/vendor/google.golang.org/grpc/internal/transport/handler_server.go
generated
vendored
@ -39,6 +39,7 @@ import (
|
||||
"golang.org/x/net/http2"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/credentials"
|
||||
"google.golang.org/grpc/internal/grpclog"
|
||||
"google.golang.org/grpc/internal/grpcutil"
|
||||
"google.golang.org/grpc/metadata"
|
||||
"google.golang.org/grpc/peer"
|
||||
@ -46,24 +47,32 @@ import (
|
||||
"google.golang.org/grpc/status"
|
||||
)
|
||||
|
||||
// NewServerHandlerTransport returns a ServerTransport handling gRPC
|
||||
// from inside an http.Handler. It requires that the http Server
|
||||
// supports HTTP/2.
|
||||
func NewServerHandlerTransport(w http.ResponseWriter, r *http.Request, stats stats.Handler) (ServerTransport, error) {
|
||||
// NewServerHandlerTransport returns a ServerTransport handling gRPC from
|
||||
// inside an http.Handler, or writes an HTTP error to w and returns an error.
|
||||
// It requires that the http Server supports HTTP/2.
|
||||
func NewServerHandlerTransport(w http.ResponseWriter, r *http.Request, stats []stats.Handler) (ServerTransport, error) {
|
||||
if r.ProtoMajor != 2 {
|
||||
return nil, errors.New("gRPC requires HTTP/2")
|
||||
msg := "gRPC requires HTTP/2"
|
||||
http.Error(w, msg, http.StatusBadRequest)
|
||||
return nil, errors.New(msg)
|
||||
}
|
||||
if r.Method != "POST" {
|
||||
return nil, errors.New("invalid gRPC request method")
|
||||
msg := fmt.Sprintf("invalid gRPC request method %q", r.Method)
|
||||
http.Error(w, msg, http.StatusBadRequest)
|
||||
return nil, errors.New(msg)
|
||||
}
|
||||
contentType := r.Header.Get("Content-Type")
|
||||
// TODO: do we assume contentType is lowercase? we did before
|
||||
contentSubtype, validContentType := grpcutil.ContentSubtype(contentType)
|
||||
if !validContentType {
|
||||
return nil, errors.New("invalid gRPC request content-type")
|
||||
msg := fmt.Sprintf("invalid gRPC request content-type %q", contentType)
|
||||
http.Error(w, msg, http.StatusUnsupportedMediaType)
|
||||
return nil, errors.New(msg)
|
||||
}
|
||||
if _, ok := w.(http.Flusher); !ok {
|
||||
return nil, errors.New("gRPC requires a ResponseWriter supporting http.Flusher")
|
||||
msg := "gRPC requires a ResponseWriter supporting http.Flusher"
|
||||
http.Error(w, msg, http.StatusInternalServerError)
|
||||
return nil, errors.New(msg)
|
||||
}
|
||||
|
||||
st := &serverHandlerTransport{
|
||||
@ -75,11 +84,14 @@ func NewServerHandlerTransport(w http.ResponseWriter, r *http.Request, stats sta
|
||||
contentSubtype: contentSubtype,
|
||||
stats: stats,
|
||||
}
|
||||
st.logger = prefixLoggerForServerHandlerTransport(st)
|
||||
|
||||
if v := r.Header.Get("grpc-timeout"); v != "" {
|
||||
to, err := decodeTimeout(v)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "malformed time-out: %v", err)
|
||||
msg := fmt.Sprintf("malformed grpc-timeout: %v", err)
|
||||
http.Error(w, msg, http.StatusBadRequest)
|
||||
return nil, status.Error(codes.Internal, msg)
|
||||
}
|
||||
st.timeoutSet = true
|
||||
st.timeout = to
|
||||
@ -97,7 +109,9 @@ func NewServerHandlerTransport(w http.ResponseWriter, r *http.Request, stats sta
|
||||
for _, v := range vv {
|
||||
v, err := decodeMetadataHeader(k, v)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "malformed binary metadata: %v", err)
|
||||
msg := fmt.Sprintf("malformed binary metadata %q in header %q: %v", v, k, err)
|
||||
http.Error(w, msg, http.StatusBadRequest)
|
||||
return nil, status.Error(codes.Internal, msg)
|
||||
}
|
||||
metakv = append(metakv, k, v)
|
||||
}
|
||||
@ -138,15 +152,19 @@ type serverHandlerTransport struct {
|
||||
// TODO make sure this is consistent across handler_server and http2_server
|
||||
contentSubtype string
|
||||
|
||||
stats stats.Handler
|
||||
stats []stats.Handler
|
||||
logger *grpclog.PrefixLogger
|
||||
}
|
||||
|
||||
func (ht *serverHandlerTransport) Close() {
|
||||
ht.closeOnce.Do(ht.closeCloseChanOnce)
|
||||
func (ht *serverHandlerTransport) Close(err error) {
|
||||
ht.closeOnce.Do(func() {
|
||||
if ht.logger.V(logLevel) {
|
||||
ht.logger.Infof("Closing: %v", err)
|
||||
}
|
||||
close(ht.closedCh)
|
||||
})
|
||||
}
|
||||
|
||||
func (ht *serverHandlerTransport) closeCloseChanOnce() { close(ht.closedCh) }
|
||||
|
||||
func (ht *serverHandlerTransport) RemoteAddr() net.Addr { return strAddr(ht.req.RemoteAddr) }
|
||||
|
||||
// strAddr is a net.Addr backed by either a TCP "ip:port" string, or
|
||||
@ -228,15 +246,15 @@ func (ht *serverHandlerTransport) WriteStatus(s *Stream, st *status.Status) erro
|
||||
})
|
||||
|
||||
if err == nil { // transport has not been closed
|
||||
if ht.stats != nil {
|
||||
// Note: The trailer fields are compressed with hpack after this call returns.
|
||||
// No WireLength field is set here.
|
||||
ht.stats.HandleRPC(s.Context(), &stats.OutTrailer{
|
||||
// Note: The trailer fields are compressed with hpack after this call returns.
|
||||
// No WireLength field is set here.
|
||||
for _, sh := range ht.stats {
|
||||
sh.HandleRPC(s.Context(), &stats.OutTrailer{
|
||||
Trailer: s.trailer.Copy(),
|
||||
})
|
||||
}
|
||||
}
|
||||
ht.Close()
|
||||
ht.Close(errors.New("finished writing status"))
|
||||
return err
|
||||
}
|
||||
|
||||
@ -314,10 +332,10 @@ func (ht *serverHandlerTransport) WriteHeader(s *Stream, md metadata.MD) error {
|
||||
})
|
||||
|
||||
if err == nil {
|
||||
if ht.stats != nil {
|
||||
for _, sh := range ht.stats {
|
||||
// Note: The header fields are compressed with hpack after this call returns.
|
||||
// No WireLength field is set here.
|
||||
ht.stats.HandleRPC(s.Context(), &stats.OutHeader{
|
||||
sh.HandleRPC(s.Context(), &stats.OutHeader{
|
||||
Header: md.Copy(),
|
||||
Compression: s.sendCompress,
|
||||
})
|
||||
@ -346,7 +364,7 @@ func (ht *serverHandlerTransport) HandleStreams(startStream func(*Stream), trace
|
||||
case <-ht.req.Context().Done():
|
||||
}
|
||||
cancel()
|
||||
ht.Close()
|
||||
ht.Close(errors.New("request is done processing"))
|
||||
}()
|
||||
|
||||
req := ht.req
|
||||
@ -369,14 +387,14 @@ func (ht *serverHandlerTransport) HandleStreams(startStream func(*Stream), trace
|
||||
}
|
||||
ctx = metadata.NewIncomingContext(ctx, ht.headerMD)
|
||||
s.ctx = peer.NewContext(ctx, pr)
|
||||
if ht.stats != nil {
|
||||
s.ctx = ht.stats.TagRPC(s.ctx, &stats.RPCTagInfo{FullMethodName: s.method})
|
||||
for _, sh := range ht.stats {
|
||||
s.ctx = sh.TagRPC(s.ctx, &stats.RPCTagInfo{FullMethodName: s.method})
|
||||
inHeader := &stats.InHeader{
|
||||
FullMethod: s.method,
|
||||
RemoteAddr: ht.RemoteAddr(),
|
||||
Compression: s.recvCompress,
|
||||
}
|
||||
ht.stats.HandleRPC(s.ctx, inHeader)
|
||||
sh.HandleRPC(s.ctx, inHeader)
|
||||
}
|
||||
s.trReader = &transportReader{
|
||||
reader: &recvBufferReader{ctx: s.ctx, ctxDone: s.ctx.Done(), recv: s.buf, freeBuffer: func(*bytes.Buffer) {}},
|
||||
@ -435,17 +453,17 @@ func (ht *serverHandlerTransport) IncrMsgSent() {}
|
||||
|
||||
func (ht *serverHandlerTransport) IncrMsgRecv() {}
|
||||
|
||||
func (ht *serverHandlerTransport) Drain() {
|
||||
func (ht *serverHandlerTransport) Drain(debugData string) {
|
||||
panic("Drain() is not implemented")
|
||||
}
|
||||
|
||||
// mapRecvMsgError returns the non-nil err into the appropriate
|
||||
// error value as expected by callers of *grpc.parser.recvMsg.
|
||||
// In particular, in can only be:
|
||||
// * io.EOF
|
||||
// * io.ErrUnexpectedEOF
|
||||
// * of type transport.ConnectionError
|
||||
// * an error from the status package
|
||||
// - io.EOF
|
||||
// - io.ErrUnexpectedEOF
|
||||
// - of type transport.ConnectionError
|
||||
// - an error from the status package
|
||||
func mapRecvMsgError(err error) error {
|
||||
if err == io.EOF || err == io.ErrUnexpectedEOF {
|
||||
return err
|
||||
|
432
src/runtime/vendor/google.golang.org/grpc/internal/transport/http2_client.go
generated
vendored
432
src/runtime/vendor/google.golang.org/grpc/internal/transport/http2_client.go
generated
vendored
@ -38,8 +38,11 @@ import (
|
||||
"google.golang.org/grpc/credentials"
|
||||
"google.golang.org/grpc/internal/channelz"
|
||||
icredentials "google.golang.org/grpc/internal/credentials"
|
||||
"google.golang.org/grpc/internal/grpclog"
|
||||
"google.golang.org/grpc/internal/grpcsync"
|
||||
"google.golang.org/grpc/internal/grpcutil"
|
||||
imetadata "google.golang.org/grpc/internal/metadata"
|
||||
istatus "google.golang.org/grpc/internal/status"
|
||||
"google.golang.org/grpc/internal/syscall"
|
||||
"google.golang.org/grpc/internal/transport/networktype"
|
||||
"google.golang.org/grpc/keepalive"
|
||||
@ -57,11 +60,15 @@ var clientConnectionCounter uint64
|
||||
|
||||
// http2Client implements the ClientTransport interface with HTTP2.
|
||||
type http2Client struct {
|
||||
lastRead int64 // Keep this field 64-bit aligned. Accessed atomically.
|
||||
ctx context.Context
|
||||
cancel context.CancelFunc
|
||||
ctxDone <-chan struct{} // Cache the ctx.Done() chan.
|
||||
userAgent string
|
||||
lastRead int64 // Keep this field 64-bit aligned. Accessed atomically.
|
||||
ctx context.Context
|
||||
cancel context.CancelFunc
|
||||
ctxDone <-chan struct{} // Cache the ctx.Done() chan.
|
||||
userAgent string
|
||||
// address contains the resolver returned address for this transport.
|
||||
// If the `ServerName` field is set, it takes precedence over `CallHdr.Host`
|
||||
// passed to `NewStream`, when determining the :authority header.
|
||||
address resolver.Address
|
||||
md metadata.MD
|
||||
conn net.Conn // underlying communication channel
|
||||
loopy *loopyWriter
|
||||
@ -78,6 +85,7 @@ type http2Client struct {
|
||||
framer *framer
|
||||
// controlBuf delivers all the control related tasks (e.g., window
|
||||
// updates, reset streams, and various settings) to the controller.
|
||||
// Do not access controlBuf with mu held.
|
||||
controlBuf *controlBuffer
|
||||
fc *trInFlow
|
||||
// The scheme used: https if TLS is on, http otherwise.
|
||||
@ -90,7 +98,7 @@ type http2Client struct {
|
||||
kp keepalive.ClientParameters
|
||||
keepaliveEnabled bool
|
||||
|
||||
statsHandler stats.Handler
|
||||
statsHandlers []stats.Handler
|
||||
|
||||
initialWindowSize int32
|
||||
|
||||
@ -98,17 +106,15 @@ type http2Client struct {
|
||||
maxSendHeaderListSize *uint32
|
||||
|
||||
bdpEst *bdpEstimator
|
||||
// onPrefaceReceipt is a callback that client transport calls upon
|
||||
// receiving server preface to signal that a succefull HTTP2
|
||||
// connection was established.
|
||||
onPrefaceReceipt func()
|
||||
|
||||
maxConcurrentStreams uint32
|
||||
streamQuota int64
|
||||
streamsQuotaAvailable chan struct{}
|
||||
waitingStreams uint32
|
||||
nextID uint32
|
||||
registeredCompressors string
|
||||
|
||||
// Do not access controlBuf with mu held.
|
||||
mu sync.Mutex // guard the following variables
|
||||
state transportState
|
||||
activeStreams map[uint32]*Stream
|
||||
@ -135,12 +141,12 @@ type http2Client struct {
|
||||
channelzID *channelz.Identifier
|
||||
czData *channelzData
|
||||
|
||||
onGoAway func(GoAwayReason)
|
||||
onClose func()
|
||||
onClose func(GoAwayReason)
|
||||
|
||||
bufferPool *bufferPool
|
||||
|
||||
connectionID uint64
|
||||
logger *grpclog.PrefixLogger
|
||||
}
|
||||
|
||||
func dial(ctx context.Context, fn func(context.Context, string) (net.Conn, error), addr resolver.Address, useProxy bool, grpcUA string) (net.Conn, error) {
|
||||
@ -192,7 +198,7 @@ func isTemporary(err error) bool {
|
||||
// newHTTP2Client constructs a connected ClientTransport to addr based on HTTP2
|
||||
// and starts to receive messages on it. Non-nil error returns if construction
|
||||
// fails.
|
||||
func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts ConnectOptions, onPrefaceReceipt func(), onGoAway func(GoAwayReason), onClose func()) (_ *http2Client, err error) {
|
||||
func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts ConnectOptions, onClose func(GoAwayReason)) (_ *http2Client, err error) {
|
||||
scheme := "http"
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
defer func() {
|
||||
@ -212,14 +218,40 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts
|
||||
if opts.FailOnNonTempDialError {
|
||||
return nil, connectionErrorf(isTemporary(err), err, "transport: error while dialing: %v", err)
|
||||
}
|
||||
return nil, connectionErrorf(true, err, "transport: Error while dialing %v", err)
|
||||
return nil, connectionErrorf(true, err, "transport: Error while dialing: %v", err)
|
||||
}
|
||||
|
||||
// Any further errors will close the underlying connection
|
||||
defer func(conn net.Conn) {
|
||||
if err != nil {
|
||||
conn.Close()
|
||||
}
|
||||
}(conn)
|
||||
|
||||
// The following defer and goroutine monitor the connectCtx for cancelation
|
||||
// and deadline. On context expiration, the connection is hard closed and
|
||||
// this function will naturally fail as a result. Otherwise, the defer
|
||||
// waits for the goroutine to exit to prevent the context from being
|
||||
// monitored (and to prevent the connection from ever being closed) after
|
||||
// returning from this function.
|
||||
ctxMonitorDone := grpcsync.NewEvent()
|
||||
newClientCtx, newClientDone := context.WithCancel(connectCtx)
|
||||
defer func() {
|
||||
newClientDone() // Awaken the goroutine below if connectCtx hasn't expired.
|
||||
<-ctxMonitorDone.Done() // Wait for the goroutine below to exit.
|
||||
}()
|
||||
go func(conn net.Conn) {
|
||||
defer ctxMonitorDone.Fire() // Signal this goroutine has exited.
|
||||
<-newClientCtx.Done() // Block until connectCtx expires or the defer above executes.
|
||||
if err := connectCtx.Err(); err != nil {
|
||||
// connectCtx expired before exiting the function. Hard close the connection.
|
||||
if logger.V(logLevel) {
|
||||
logger.Infof("Aborting due to connect deadline expiring: %v", err)
|
||||
}
|
||||
conn.Close()
|
||||
}
|
||||
}(conn)
|
||||
|
||||
kp := opts.KeepaliveParams
|
||||
// Validate keepalive parameters.
|
||||
if kp.Time == 0 {
|
||||
@ -251,15 +283,7 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts
|
||||
}
|
||||
}
|
||||
if transportCreds != nil {
|
||||
rawConn := conn
|
||||
// Pull the deadline from the connectCtx, which will be used for
|
||||
// timeouts in the authentication protocol handshake. Can ignore the
|
||||
// boolean as the deadline will return the zero value, which will make
|
||||
// the conn not timeout on I/O operations.
|
||||
deadline, _ := connectCtx.Deadline()
|
||||
rawConn.SetDeadline(deadline)
|
||||
conn, authInfo, err = transportCreds.ClientHandshake(connectCtx, addr.ServerName, rawConn)
|
||||
rawConn.SetDeadline(time.Time{})
|
||||
conn, authInfo, err = transportCreds.ClientHandshake(connectCtx, addr.ServerName, conn)
|
||||
if err != nil {
|
||||
return nil, connectionErrorf(isTemporary(err), err, "transport: authentication handshake failed: %v", err)
|
||||
}
|
||||
@ -297,6 +321,8 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts
|
||||
ctxDone: ctx.Done(), // Cache Done chan.
|
||||
cancel: cancel,
|
||||
userAgent: opts.UserAgent,
|
||||
registeredCompressors: grpcutil.RegisteredCompressors(),
|
||||
address: addr,
|
||||
conn: conn,
|
||||
remoteAddr: conn.RemoteAddr(),
|
||||
localAddr: conn.LocalAddr(),
|
||||
@ -304,26 +330,27 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts
|
||||
readerDone: make(chan struct{}),
|
||||
writerDone: make(chan struct{}),
|
||||
goAway: make(chan struct{}),
|
||||
framer: newFramer(conn, writeBufSize, readBufSize, maxHeaderListSize),
|
||||
framer: newFramer(conn, writeBufSize, readBufSize, opts.SharedWriteBuffer, maxHeaderListSize),
|
||||
fc: &trInFlow{limit: uint32(icwz)},
|
||||
scheme: scheme,
|
||||
activeStreams: make(map[uint32]*Stream),
|
||||
isSecure: isSecure,
|
||||
perRPCCreds: perRPCCreds,
|
||||
kp: kp,
|
||||
statsHandler: opts.StatsHandler,
|
||||
statsHandlers: opts.StatsHandlers,
|
||||
initialWindowSize: initialWindowSize,
|
||||
onPrefaceReceipt: onPrefaceReceipt,
|
||||
nextID: 1,
|
||||
maxConcurrentStreams: defaultMaxStreamsClient,
|
||||
streamQuota: defaultMaxStreamsClient,
|
||||
streamsQuotaAvailable: make(chan struct{}, 1),
|
||||
czData: new(channelzData),
|
||||
onGoAway: onGoAway,
|
||||
onClose: onClose,
|
||||
keepaliveEnabled: keepaliveEnabled,
|
||||
bufferPool: newBufferPool(),
|
||||
onClose: onClose,
|
||||
}
|
||||
t.logger = prefixLoggerForClientTransport(t)
|
||||
// Add peer information to the http2client context.
|
||||
t.ctx = peer.NewContext(t.ctx, t.getPeer())
|
||||
|
||||
if md, ok := addr.Metadata.(*metadata.MD); ok {
|
||||
t.md = *md
|
||||
@ -341,15 +368,15 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts
|
||||
updateFlowControl: t.updateFlowControl,
|
||||
}
|
||||
}
|
||||
if t.statsHandler != nil {
|
||||
t.ctx = t.statsHandler.TagConn(t.ctx, &stats.ConnTagInfo{
|
||||
for _, sh := range t.statsHandlers {
|
||||
t.ctx = sh.TagConn(t.ctx, &stats.ConnTagInfo{
|
||||
RemoteAddr: t.remoteAddr,
|
||||
LocalAddr: t.localAddr,
|
||||
})
|
||||
connBegin := &stats.ConnBegin{
|
||||
Client: true,
|
||||
}
|
||||
t.statsHandler.HandleConn(t.ctx, connBegin)
|
||||
sh.HandleConn(t.ctx, connBegin)
|
||||
}
|
||||
t.channelzID, err = channelz.RegisterNormalSocket(t, opts.ChannelzParentID, fmt.Sprintf("%s -> %s", t.localAddr, t.remoteAddr))
|
||||
if err != nil {
|
||||
@ -359,21 +386,32 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts
|
||||
t.kpDormancyCond = sync.NewCond(&t.mu)
|
||||
go t.keepalive()
|
||||
}
|
||||
// Start the reader goroutine for incoming message. Each transport has
|
||||
// a dedicated goroutine which reads HTTP2 frame from network. Then it
|
||||
// dispatches the frame to the corresponding stream entity.
|
||||
go t.reader()
|
||||
|
||||
// Start the reader goroutine for incoming messages. Each transport has a
|
||||
// dedicated goroutine which reads HTTP2 frames from the network. Then it
|
||||
// dispatches the frame to the corresponding stream entity. When the
|
||||
// server preface is received, readerErrCh is closed. If an error occurs
|
||||
// first, an error is pushed to the channel. This must be checked before
|
||||
// returning from this function.
|
||||
readerErrCh := make(chan error, 1)
|
||||
go t.reader(readerErrCh)
|
||||
defer func() {
|
||||
if err == nil {
|
||||
err = <-readerErrCh
|
||||
}
|
||||
if err != nil {
|
||||
t.Close(err)
|
||||
}
|
||||
}()
|
||||
|
||||
// Send connection preface to server.
|
||||
n, err := t.conn.Write(clientPreface)
|
||||
if err != nil {
|
||||
err = connectionErrorf(true, err, "transport: failed to write client preface: %v", err)
|
||||
t.Close(err)
|
||||
return nil, err
|
||||
}
|
||||
if n != len(clientPreface) {
|
||||
err = connectionErrorf(true, nil, "transport: preface mismatch, wrote %d bytes; want %d", n, len(clientPreface))
|
||||
t.Close(err)
|
||||
return nil, err
|
||||
}
|
||||
var ss []http2.Setting
|
||||
@ -393,14 +431,12 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts
|
||||
err = t.framer.fr.WriteSettings(ss...)
|
||||
if err != nil {
|
||||
err = connectionErrorf(true, err, "transport: failed to write initial settings frame: %v", err)
|
||||
t.Close(err)
|
||||
return nil, err
|
||||
}
|
||||
// Adjust the connection flow control window if needed.
|
||||
if delta := uint32(icwz - defaultWindowSize); delta > 0 {
|
||||
if err := t.framer.fr.WriteWindowUpdate(0, delta); err != nil {
|
||||
err = connectionErrorf(true, err, "transport: failed to write window update: %v", err)
|
||||
t.Close(err)
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
@ -411,17 +447,8 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts
|
||||
return nil, err
|
||||
}
|
||||
go func() {
|
||||
t.loopy = newLoopyWriter(clientSide, t.framer, t.controlBuf, t.bdpEst)
|
||||
err := t.loopy.run()
|
||||
if err != nil {
|
||||
if logger.V(logLevel) {
|
||||
logger.Errorf("transport: loopyWriter.run returning. Err: %v", err)
|
||||
}
|
||||
}
|
||||
// Do not close the transport. Let reader goroutine handle it since
|
||||
// there might be data in the buffers.
|
||||
t.conn.Close()
|
||||
t.controlBuf.finish()
|
||||
t.loopy = newLoopyWriter(clientSide, t.framer, t.controlBuf, t.bdpEst, t.conn, t.logger)
|
||||
t.loopy.run()
|
||||
close(t.writerDone)
|
||||
}()
|
||||
return t, nil
|
||||
@ -467,7 +494,7 @@ func (t *http2Client) newStream(ctx context.Context, callHdr *CallHdr) *Stream {
|
||||
func (t *http2Client) getPeer() *peer.Peer {
|
||||
return &peer.Peer{
|
||||
Addr: t.remoteAddr,
|
||||
AuthInfo: t.authInfo,
|
||||
AuthInfo: t.authInfo, // Can be nil
|
||||
}
|
||||
}
|
||||
|
||||
@ -503,9 +530,22 @@ func (t *http2Client) createHeaderFields(ctx context.Context, callHdr *CallHdr)
|
||||
headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-previous-rpc-attempts", Value: strconv.Itoa(callHdr.PreviousAttempts)})
|
||||
}
|
||||
|
||||
registeredCompressors := t.registeredCompressors
|
||||
if callHdr.SendCompress != "" {
|
||||
headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-encoding", Value: callHdr.SendCompress})
|
||||
headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-accept-encoding", Value: callHdr.SendCompress})
|
||||
// Include the outgoing compressor name when compressor is not registered
|
||||
// via encoding.RegisterCompressor. This is possible when client uses
|
||||
// WithCompressor dial option.
|
||||
if !grpcutil.IsCompressorNameRegistered(callHdr.SendCompress) {
|
||||
if registeredCompressors != "" {
|
||||
registeredCompressors += ","
|
||||
}
|
||||
registeredCompressors += callHdr.SendCompress
|
||||
}
|
||||
}
|
||||
|
||||
if registeredCompressors != "" {
|
||||
headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-accept-encoding", Value: registeredCompressors})
|
||||
}
|
||||
if dl, ok := ctx.Deadline(); ok {
|
||||
// Send out timeout regardless its value. The server can detect timeout context by itself.
|
||||
@ -585,7 +625,11 @@ func (t *http2Client) getTrAuthData(ctx context.Context, audience string) (map[s
|
||||
for _, c := range t.perRPCCreds {
|
||||
data, err := c.GetRequestMetadata(ctx, audience)
|
||||
if err != nil {
|
||||
if _, ok := status.FromError(err); ok {
|
||||
if st, ok := status.FromError(err); ok {
|
||||
// Restrict the code to the list allowed by gRFC A54.
|
||||
if istatus.IsRestrictedControlPlaneCode(st) {
|
||||
err = status.Errorf(codes.Internal, "transport: received per-RPC creds error with illegal status: %v", err)
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@ -614,7 +658,14 @@ func (t *http2Client) getCallAuthData(ctx context.Context, audience string, call
|
||||
}
|
||||
data, err := callCreds.GetRequestMetadata(ctx, audience)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "transport: %v", err)
|
||||
if st, ok := status.FromError(err); ok {
|
||||
// Restrict the code to the list allowed by gRFC A54.
|
||||
if istatus.IsRestrictedControlPlaneCode(st) {
|
||||
err = status.Errorf(codes.Internal, "transport: received per-RPC creds error with illegal status: %v", err)
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
return nil, status.Errorf(codes.Internal, "transport: per-RPC creds failed due to error: %v", err)
|
||||
}
|
||||
callAuthData = make(map[string]string, len(data))
|
||||
for k, v := range data {
|
||||
@ -630,13 +681,13 @@ func (t *http2Client) getCallAuthData(ctx context.Context, audience string, call
|
||||
// NewStream errors result in transparent retry, as they mean nothing went onto
|
||||
// the wire. However, there are two notable exceptions:
|
||||
//
|
||||
// 1. If the stream headers violate the max header list size allowed by the
|
||||
// server. It's possible this could succeed on another transport, even if
|
||||
// it's unlikely, but do not transparently retry.
|
||||
// 2. If the credentials errored when requesting their headers. In this case,
|
||||
// it's possible a retry can fix the problem, but indefinitely transparently
|
||||
// retrying is not appropriate as it is likely the credentials, if they can
|
||||
// eventually succeed, would need I/O to do so.
|
||||
// 1. If the stream headers violate the max header list size allowed by the
|
||||
// server. It's possible this could succeed on another transport, even if
|
||||
// it's unlikely, but do not transparently retry.
|
||||
// 2. If the credentials errored when requesting their headers. In this case,
|
||||
// it's possible a retry can fix the problem, but indefinitely transparently
|
||||
// retrying is not appropriate as it is likely the credentials, if they can
|
||||
// eventually succeed, would need I/O to do so.
|
||||
type NewStreamError struct {
|
||||
Err error
|
||||
|
||||
@ -651,6 +702,18 @@ func (e NewStreamError) Error() string {
|
||||
// streams. All non-nil errors returned will be *NewStreamError.
|
||||
func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (*Stream, error) {
|
||||
ctx = peer.NewContext(ctx, t.getPeer())
|
||||
|
||||
// ServerName field of the resolver returned address takes precedence over
|
||||
// Host field of CallHdr to determine the :authority header. This is because,
|
||||
// the ServerName field takes precedence for server authentication during
|
||||
// TLS handshake, and the :authority header should match the value used
|
||||
// for server authentication.
|
||||
if t.address.ServerName != "" {
|
||||
newCallHdr := *callHdr
|
||||
newCallHdr.Host = t.address.ServerName
|
||||
callHdr = &newCallHdr
|
||||
}
|
||||
|
||||
headerFields, err := t.createHeaderFields(ctx, callHdr)
|
||||
if err != nil {
|
||||
return nil, &NewStreamError{Err: err, AllowTransparentRetry: false}
|
||||
@ -675,17 +738,13 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (*Stream,
|
||||
endStream: false,
|
||||
initStream: func(id uint32) error {
|
||||
t.mu.Lock()
|
||||
if state := t.state; state != reachable {
|
||||
// TODO: handle transport closure in loopy instead and remove this
|
||||
// initStream is never called when transport is draining.
|
||||
if t.state == closing {
|
||||
t.mu.Unlock()
|
||||
// Do a quick cleanup.
|
||||
err := error(errStreamDrain)
|
||||
if state == closing {
|
||||
err = ErrConnClosing
|
||||
}
|
||||
cleanup(err)
|
||||
return err
|
||||
cleanup(ErrConnClosing)
|
||||
return ErrConnClosing
|
||||
}
|
||||
t.activeStreams[id] = s
|
||||
if channelz.IsOn() {
|
||||
atomic.AddInt64(&t.czData.streamsStarted, 1)
|
||||
atomic.StoreInt64(&t.czData.lastStreamCreatedTime, time.Now().UnixNano())
|
||||
@ -702,7 +761,8 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (*Stream,
|
||||
}
|
||||
firstTry := true
|
||||
var ch chan struct{}
|
||||
checkForStreamQuota := func(it interface{}) bool {
|
||||
transportDrainRequired := false
|
||||
checkForStreamQuota := func(it any) bool {
|
||||
if t.streamQuota <= 0 { // Can go negative if server decreases it.
|
||||
if firstTry {
|
||||
t.waitingStreams++
|
||||
@ -717,8 +777,20 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (*Stream,
|
||||
h := it.(*headerFrame)
|
||||
h.streamID = t.nextID
|
||||
t.nextID += 2
|
||||
|
||||
// Drain client transport if nextID > MaxStreamID which signals gRPC that
|
||||
// the connection is closed and a new one must be created for subsequent RPCs.
|
||||
transportDrainRequired = t.nextID > MaxStreamID
|
||||
|
||||
s.id = h.streamID
|
||||
s.fc = &inFlow{limit: uint32(t.initialWindowSize)}
|
||||
t.mu.Lock()
|
||||
if t.state == draining || t.activeStreams == nil { // Can be niled from Close().
|
||||
t.mu.Unlock()
|
||||
return false // Don't create a stream if the transport is already closed.
|
||||
}
|
||||
t.activeStreams[s.id] = s
|
||||
t.mu.Unlock()
|
||||
if t.streamQuota > 0 && t.waitingStreams > 0 {
|
||||
select {
|
||||
case t.streamsQuotaAvailable <- struct{}{}:
|
||||
@ -728,7 +800,7 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (*Stream,
|
||||
return true
|
||||
}
|
||||
var hdrListSizeErr error
|
||||
checkForHeaderListSize := func(it interface{}) bool {
|
||||
checkForHeaderListSize := func(it any) bool {
|
||||
if t.maxSendHeaderListSize == nil {
|
||||
return true
|
||||
}
|
||||
@ -743,14 +815,8 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (*Stream,
|
||||
return true
|
||||
}
|
||||
for {
|
||||
success, err := t.controlBuf.executeAndPut(func(it interface{}) bool {
|
||||
if !checkForStreamQuota(it) {
|
||||
return false
|
||||
}
|
||||
if !checkForHeaderListSize(it) {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
success, err := t.controlBuf.executeAndPut(func(it any) bool {
|
||||
return checkForHeaderListSize(it) && checkForStreamQuota(it)
|
||||
}, hdr)
|
||||
if err != nil {
|
||||
// Connection closed.
|
||||
@ -773,24 +839,33 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (*Stream,
|
||||
return nil, &NewStreamError{Err: ErrConnClosing, AllowTransparentRetry: true}
|
||||
}
|
||||
}
|
||||
if t.statsHandler != nil {
|
||||
if len(t.statsHandlers) != 0 {
|
||||
header, ok := metadata.FromOutgoingContext(ctx)
|
||||
if ok {
|
||||
header.Set("user-agent", t.userAgent)
|
||||
} else {
|
||||
header = metadata.Pairs("user-agent", t.userAgent)
|
||||
}
|
||||
// Note: The header fields are compressed with hpack after this call returns.
|
||||
// No WireLength field is set here.
|
||||
outHeader := &stats.OutHeader{
|
||||
Client: true,
|
||||
FullMethod: callHdr.Method,
|
||||
RemoteAddr: t.remoteAddr,
|
||||
LocalAddr: t.localAddr,
|
||||
Compression: callHdr.SendCompress,
|
||||
Header: header,
|
||||
for _, sh := range t.statsHandlers {
|
||||
// Note: The header fields are compressed with hpack after this call returns.
|
||||
// No WireLength field is set here.
|
||||
// Note: Creating a new stats object to prevent pollution.
|
||||
outHeader := &stats.OutHeader{
|
||||
Client: true,
|
||||
FullMethod: callHdr.Method,
|
||||
RemoteAddr: t.remoteAddr,
|
||||
LocalAddr: t.localAddr,
|
||||
Compression: callHdr.SendCompress,
|
||||
Header: header,
|
||||
}
|
||||
sh.HandleRPC(s.ctx, outHeader)
|
||||
}
|
||||
t.statsHandler.HandleRPC(s.ctx, outHeader)
|
||||
}
|
||||
if transportDrainRequired {
|
||||
if t.logger.V(logLevel) {
|
||||
t.logger.Infof("Draining transport: t.nextID > MaxStreamID")
|
||||
}
|
||||
t.GracefulClose()
|
||||
}
|
||||
return s, nil
|
||||
}
|
||||
@ -852,7 +927,7 @@ func (t *http2Client) closeStream(s *Stream, err error, rst bool, rstCode http2.
|
||||
rst: rst,
|
||||
rstCode: rstCode,
|
||||
}
|
||||
addBackStreamQuota := func(interface{}) bool {
|
||||
addBackStreamQuota := func(any) bool {
|
||||
t.streamQuota++
|
||||
if t.streamQuota > 0 && t.waitingStreams > 0 {
|
||||
select {
|
||||
@ -873,20 +948,21 @@ func (t *http2Client) closeStream(s *Stream, err error, rst bool, rstCode http2.
|
||||
// Close kicks off the shutdown process of the transport. This should be called
|
||||
// only once on a transport. Once it is called, the transport should not be
|
||||
// accessed any more.
|
||||
//
|
||||
// This method blocks until the addrConn that initiated this transport is
|
||||
// re-connected. This happens because t.onClose() begins reconnect logic at the
|
||||
// addrConn level and blocks until the addrConn is successfully connected.
|
||||
func (t *http2Client) Close(err error) {
|
||||
t.mu.Lock()
|
||||
// Make sure we only Close once.
|
||||
// Make sure we only close once.
|
||||
if t.state == closing {
|
||||
t.mu.Unlock()
|
||||
return
|
||||
}
|
||||
// Call t.onClose before setting the state to closing to prevent the client
|
||||
// from attempting to create new streams ASAP.
|
||||
t.onClose()
|
||||
if t.logger.V(logLevel) {
|
||||
t.logger.Infof("Closing: %v", err)
|
||||
}
|
||||
// Call t.onClose ASAP to prevent the client from attempting to create new
|
||||
// streams.
|
||||
if t.state != draining {
|
||||
t.onClose(GoAwayInvalid)
|
||||
}
|
||||
t.state = closing
|
||||
streams := t.activeStreams
|
||||
t.activeStreams = nil
|
||||
@ -916,11 +992,11 @@ func (t *http2Client) Close(err error) {
|
||||
for _, s := range streams {
|
||||
t.closeStream(s, err, false, http2.ErrCodeNo, st, nil, false)
|
||||
}
|
||||
if t.statsHandler != nil {
|
||||
for _, sh := range t.statsHandlers {
|
||||
connEnd := &stats.ConnEnd{
|
||||
Client: true,
|
||||
}
|
||||
t.statsHandler.HandleConn(t.ctx, connEnd)
|
||||
sh.HandleConn(t.ctx, connEnd)
|
||||
}
|
||||
}
|
||||
|
||||
@ -936,11 +1012,15 @@ func (t *http2Client) GracefulClose() {
|
||||
t.mu.Unlock()
|
||||
return
|
||||
}
|
||||
if t.logger.V(logLevel) {
|
||||
t.logger.Infof("GracefulClose called")
|
||||
}
|
||||
t.onClose(GoAwayInvalid)
|
||||
t.state = draining
|
||||
active := len(t.activeStreams)
|
||||
t.mu.Unlock()
|
||||
if active == 0 {
|
||||
t.Close(ErrConnClosing)
|
||||
t.Close(connectionErrorf(true, nil, "no active streams left to process while draining"))
|
||||
return
|
||||
}
|
||||
t.controlBuf.put(&incomingGoAway{})
|
||||
@ -1000,13 +1080,13 @@ func (t *http2Client) updateWindow(s *Stream, n uint32) {
|
||||
// for the transport and the stream based on the current bdp
|
||||
// estimation.
|
||||
func (t *http2Client) updateFlowControl(n uint32) {
|
||||
t.mu.Lock()
|
||||
for _, s := range t.activeStreams {
|
||||
s.fc.newLimit(n)
|
||||
}
|
||||
t.mu.Unlock()
|
||||
updateIWS := func(interface{}) bool {
|
||||
updateIWS := func(any) bool {
|
||||
t.initialWindowSize = int32(n)
|
||||
t.mu.Lock()
|
||||
for _, s := range t.activeStreams {
|
||||
s.fc.newLimit(n)
|
||||
}
|
||||
t.mu.Unlock()
|
||||
return true
|
||||
}
|
||||
t.controlBuf.executeAndPut(updateIWS, &outgoingWindowUpdate{streamID: 0, increment: t.fc.newLimit(n)})
|
||||
@ -1097,8 +1177,8 @@ func (t *http2Client) handleRSTStream(f *http2.RSTStreamFrame) {
|
||||
}
|
||||
statusCode, ok := http2ErrConvTab[f.ErrCode]
|
||||
if !ok {
|
||||
if logger.V(logLevel) {
|
||||
logger.Warningf("transport: http2Client.handleRSTStream found no mapped gRPC status for the received http2 error %v", f.ErrCode)
|
||||
if t.logger.V(logLevel) {
|
||||
t.logger.Infof("Received a RST_STREAM frame with code %q, but found no mapped gRPC status", f.ErrCode)
|
||||
}
|
||||
statusCode = codes.Unknown
|
||||
}
|
||||
@ -1153,7 +1233,7 @@ func (t *http2Client) handleSettings(f *http2.SettingsFrame, isFirst bool) {
|
||||
}
|
||||
updateFuncs = append(updateFuncs, updateStreamQuota)
|
||||
}
|
||||
t.controlBuf.executeAndPut(func(interface{}) bool {
|
||||
t.controlBuf.executeAndPut(func(any) bool {
|
||||
for _, f := range updateFuncs {
|
||||
f()
|
||||
}
|
||||
@ -1180,10 +1260,12 @@ func (t *http2Client) handleGoAway(f *http2.GoAwayFrame) {
|
||||
t.mu.Unlock()
|
||||
return
|
||||
}
|
||||
if f.ErrCode == http2.ErrCodeEnhanceYourCalm {
|
||||
if logger.V(logLevel) {
|
||||
logger.Infof("Client received GoAway with http2.ErrCodeEnhanceYourCalm.")
|
||||
}
|
||||
if f.ErrCode == http2.ErrCodeEnhanceYourCalm && string(f.DebugData()) == "too_many_pings" {
|
||||
// When a client receives a GOAWAY with error code ENHANCE_YOUR_CALM and debug
|
||||
// data equal to ASCII "too_many_pings", it should log the occurrence at a log level that is
|
||||
// enabled by default and double the configure KEEPALIVE_TIME used for new connections
|
||||
// on that channel.
|
||||
logger.Errorf("Client received GoAway with error code ENHANCE_YOUR_CALM and debug data equal to ASCII \"too_many_pings\".")
|
||||
}
|
||||
id := f.LastStreamID
|
||||
if id > 0 && id%2 == 0 {
|
||||
@ -1212,12 +1294,14 @@ func (t *http2Client) handleGoAway(f *http2.GoAwayFrame) {
|
||||
default:
|
||||
t.setGoAwayReason(f)
|
||||
close(t.goAway)
|
||||
t.controlBuf.put(&incomingGoAway{})
|
||||
defer t.controlBuf.put(&incomingGoAway{}) // Defer as t.mu is currently held.
|
||||
// Notify the clientconn about the GOAWAY before we set the state to
|
||||
// draining, to allow the client to stop attempting to create streams
|
||||
// before disallowing new streams on this connection.
|
||||
t.onGoAway(t.goAwayReason)
|
||||
t.state = draining
|
||||
if t.state != draining {
|
||||
t.onClose(t.goAwayReason)
|
||||
t.state = draining
|
||||
}
|
||||
}
|
||||
// All streams with IDs greater than the GoAwayId
|
||||
// and smaller than the previous GoAway ID should be killed.
|
||||
@ -1225,24 +1309,35 @@ func (t *http2Client) handleGoAway(f *http2.GoAwayFrame) {
|
||||
if upperLimit == 0 { // This is the first GoAway Frame.
|
||||
upperLimit = math.MaxUint32 // Kill all streams after the GoAway ID.
|
||||
}
|
||||
|
||||
t.prevGoAwayID = id
|
||||
if len(t.activeStreams) == 0 {
|
||||
t.mu.Unlock()
|
||||
t.Close(connectionErrorf(true, nil, "received goaway and there are no active streams"))
|
||||
return
|
||||
}
|
||||
|
||||
streamsToClose := make([]*Stream, 0)
|
||||
for streamID, stream := range t.activeStreams {
|
||||
if streamID > id && streamID <= upperLimit {
|
||||
// The stream was unprocessed by the server.
|
||||
atomic.StoreUint32(&stream.unprocessed, 1)
|
||||
t.closeStream(stream, errStreamDrain, false, http2.ErrCodeNo, statusGoAway, nil, false)
|
||||
if streamID > id && streamID <= upperLimit {
|
||||
atomic.StoreUint32(&stream.unprocessed, 1)
|
||||
streamsToClose = append(streamsToClose, stream)
|
||||
}
|
||||
}
|
||||
}
|
||||
t.prevGoAwayID = id
|
||||
active := len(t.activeStreams)
|
||||
t.mu.Unlock()
|
||||
if active == 0 {
|
||||
t.Close(connectionErrorf(true, nil, "received goaway and there are no active streams"))
|
||||
// Called outside t.mu because closeStream can take controlBuf's mu, which
|
||||
// could induce deadlock and is not allowed.
|
||||
for _, stream := range streamsToClose {
|
||||
t.closeStream(stream, errStreamDrain, false, http2.ErrCodeNo, statusGoAway, nil, false)
|
||||
}
|
||||
}
|
||||
|
||||
// setGoAwayReason sets the value of t.goAwayReason based
|
||||
// on the GoAway frame received.
|
||||
// It expects a lock on transport's mutext to be held by
|
||||
// It expects a lock on transport's mutex to be held by
|
||||
// the caller.
|
||||
func (t *http2Client) setGoAwayReason(f *http2.GoAwayFrame) {
|
||||
t.goAwayReason = GoAwayNoReason
|
||||
@ -1410,14 +1505,15 @@ func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) {
|
||||
return
|
||||
}
|
||||
|
||||
isHeader := false
|
||||
|
||||
// If headerChan hasn't been closed yet
|
||||
if atomic.CompareAndSwapUint32(&s.headerChanClosed, 0, 1) {
|
||||
s.headerValid = true
|
||||
if !endStream {
|
||||
// HEADERS frame block carries a Response-Headers.
|
||||
isHeader = true
|
||||
// For headers, set them in s.header and close headerChan. For trailers or
|
||||
// trailers-only, closeStream will set the trailers and close headerChan as
|
||||
// needed.
|
||||
if !endStream {
|
||||
// If headerChan hasn't been closed yet (expected, given we checked it
|
||||
// above, but something else could have potentially closed the whole
|
||||
// stream).
|
||||
if atomic.CompareAndSwapUint32(&s.headerChanClosed, 0, 1) {
|
||||
s.headerValid = true
|
||||
// These values can be set without any synchronization because
|
||||
// stream goroutine will read it only after seeing a closed
|
||||
// headerChan which we'll close after setting this.
|
||||
@ -1425,29 +1521,26 @@ func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) {
|
||||
if len(mdata) > 0 {
|
||||
s.header = mdata
|
||||
}
|
||||
} else {
|
||||
// HEADERS frame block carries a Trailers-Only.
|
||||
s.noHeaders = true
|
||||
close(s.headerChan)
|
||||
}
|
||||
close(s.headerChan)
|
||||
}
|
||||
|
||||
if t.statsHandler != nil {
|
||||
if isHeader {
|
||||
for _, sh := range t.statsHandlers {
|
||||
if !endStream {
|
||||
inHeader := &stats.InHeader{
|
||||
Client: true,
|
||||
WireLength: int(frame.Header().Length),
|
||||
Header: metadata.MD(mdata).Copy(),
|
||||
Compression: s.recvCompress,
|
||||
}
|
||||
t.statsHandler.HandleRPC(s.ctx, inHeader)
|
||||
sh.HandleRPC(s.ctx, inHeader)
|
||||
} else {
|
||||
inTrailer := &stats.InTrailer{
|
||||
Client: true,
|
||||
WireLength: int(frame.Header().Length),
|
||||
Trailer: metadata.MD(mdata).Copy(),
|
||||
}
|
||||
t.statsHandler.HandleRPC(s.ctx, inTrailer)
|
||||
sh.HandleRPC(s.ctx, inTrailer)
|
||||
}
|
||||
}
|
||||
|
||||
@ -1459,38 +1552,41 @@ func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) {
|
||||
statusGen = status.New(rawStatusCode, grpcMessage)
|
||||
}
|
||||
|
||||
// if client received END_STREAM from server while stream was still active, send RST_STREAM
|
||||
rst := s.getState() == streamActive
|
||||
t.closeStream(s, io.EOF, rst, http2.ErrCodeNo, statusGen, mdata, true)
|
||||
// If client received END_STREAM from server while stream was still active,
|
||||
// send RST_STREAM.
|
||||
rstStream := s.getState() == streamActive
|
||||
t.closeStream(s, io.EOF, rstStream, http2.ErrCodeNo, statusGen, mdata, true)
|
||||
}
|
||||
|
||||
// reader runs as a separate goroutine in charge of reading data from network
|
||||
// connection.
|
||||
//
|
||||
// TODO(zhaoq): currently one reader per transport. Investigate whether this is
|
||||
// optimal.
|
||||
// TODO(zhaoq): Check the validity of the incoming frame sequence.
|
||||
func (t *http2Client) reader() {
|
||||
defer close(t.readerDone)
|
||||
// Check the validity of server preface.
|
||||
// readServerPreface reads and handles the initial settings frame from the
|
||||
// server.
|
||||
func (t *http2Client) readServerPreface() error {
|
||||
frame, err := t.framer.fr.ReadFrame()
|
||||
if err != nil {
|
||||
err = connectionErrorf(true, err, "error reading server preface: %v", err)
|
||||
t.Close(err) // this kicks off resetTransport, so must be last before return
|
||||
return
|
||||
}
|
||||
t.conn.SetReadDeadline(time.Time{}) // reset deadline once we get the settings frame (we didn't time out, yay!)
|
||||
if t.keepaliveEnabled {
|
||||
atomic.StoreInt64(&t.lastRead, time.Now().UnixNano())
|
||||
return connectionErrorf(true, err, "error reading server preface: %v", err)
|
||||
}
|
||||
sf, ok := frame.(*http2.SettingsFrame)
|
||||
if !ok {
|
||||
// this kicks off resetTransport, so must be last before return
|
||||
t.Close(connectionErrorf(true, nil, "initial http2 frame from server is not a settings frame: %T", frame))
|
||||
return connectionErrorf(true, nil, "initial http2 frame from server is not a settings frame: %T", frame)
|
||||
}
|
||||
t.handleSettings(sf, true)
|
||||
return nil
|
||||
}
|
||||
|
||||
// reader verifies the server preface and reads all subsequent data from
|
||||
// network connection. If the server preface is not read successfully, an
|
||||
// error is pushed to errCh; otherwise errCh is closed with no error.
|
||||
func (t *http2Client) reader(errCh chan<- error) {
|
||||
defer close(t.readerDone)
|
||||
|
||||
if err := t.readServerPreface(); err != nil {
|
||||
errCh <- err
|
||||
return
|
||||
}
|
||||
t.onPrefaceReceipt()
|
||||
t.handleSettings(sf, true)
|
||||
close(errCh)
|
||||
if t.keepaliveEnabled {
|
||||
atomic.StoreInt64(&t.lastRead, time.Now().UnixNano())
|
||||
}
|
||||
|
||||
// loop to keep reading incoming messages on this transport.
|
||||
for {
|
||||
@ -1693,3 +1789,9 @@ func (t *http2Client) getOutFlowWindow() int64 {
|
||||
return -2
|
||||
}
|
||||
}
|
||||
|
||||
func (t *http2Client) stateForTesting() transportState {
|
||||
t.mu.Lock()
|
||||
defer t.mu.Unlock()
|
||||
return t.state
|
||||
}
|
||||
|
305
src/runtime/vendor/google.golang.org/grpc/internal/transport/http2_server.go
generated
vendored
305
src/runtime/vendor/google.golang.org/grpc/internal/transport/http2_server.go
generated
vendored
@ -21,6 +21,7 @@ package transport
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"math"
|
||||
@ -34,13 +35,16 @@ import (
|
||||
"github.com/golang/protobuf/proto"
|
||||
"golang.org/x/net/http2"
|
||||
"golang.org/x/net/http2/hpack"
|
||||
"google.golang.org/grpc/internal/grpclog"
|
||||
"google.golang.org/grpc/internal/grpcutil"
|
||||
"google.golang.org/grpc/internal/pretty"
|
||||
"google.golang.org/grpc/internal/syscall"
|
||||
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/credentials"
|
||||
"google.golang.org/grpc/internal/channelz"
|
||||
"google.golang.org/grpc/internal/grpcrand"
|
||||
"google.golang.org/grpc/internal/grpcsync"
|
||||
"google.golang.org/grpc/keepalive"
|
||||
"google.golang.org/grpc/metadata"
|
||||
"google.golang.org/grpc/peer"
|
||||
@ -82,7 +86,7 @@ type http2Server struct {
|
||||
// updates, reset streams, and various settings) to the controller.
|
||||
controlBuf *controlBuffer
|
||||
fc *trInFlow
|
||||
stats stats.Handler
|
||||
stats []stats.Handler
|
||||
// Keepalive and max-age parameters for the server.
|
||||
kp keepalive.ServerParameters
|
||||
// Keepalive enforcement policy.
|
||||
@ -101,13 +105,13 @@ type http2Server struct {
|
||||
|
||||
mu sync.Mutex // guard the following
|
||||
|
||||
// drainChan is initialized when Drain() is called the first time.
|
||||
// After which the server writes out the first GoAway(with ID 2^31-1) frame.
|
||||
// Then an independent goroutine will be launched to later send the second GoAway.
|
||||
// During this time we don't want to write another first GoAway(with ID 2^31 -1) frame.
|
||||
// Thus call to Drain() will be a no-op if drainChan is already initialized since draining is
|
||||
// already underway.
|
||||
drainChan chan struct{}
|
||||
// drainEvent is initialized when Drain() is called the first time. After
|
||||
// which the server writes out the first GoAway(with ID 2^31-1) frame. Then
|
||||
// an independent goroutine will be launched to later send the second
|
||||
// GoAway. During this time we don't want to write another first GoAway(with
|
||||
// ID 2^31 -1) frame. Thus call to Drain() will be a no-op if drainEvent is
|
||||
// already initialized since draining is already underway.
|
||||
drainEvent *grpcsync.Event
|
||||
state transportState
|
||||
activeStreams map[uint32]*Stream
|
||||
// idle is the time instant when the connection went idle.
|
||||
@ -127,6 +131,8 @@ type http2Server struct {
|
||||
// This lock may not be taken if mu is already held.
|
||||
maxStreamMu sync.Mutex
|
||||
maxStreamID uint32 // max stream ID ever seen
|
||||
|
||||
logger *grpclog.PrefixLogger
|
||||
}
|
||||
|
||||
// NewServerTransport creates a http2 transport with conn and configuration
|
||||
@ -159,21 +165,16 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport,
|
||||
if config.MaxHeaderListSize != nil {
|
||||
maxHeaderListSize = *config.MaxHeaderListSize
|
||||
}
|
||||
framer := newFramer(conn, writeBufSize, readBufSize, maxHeaderListSize)
|
||||
framer := newFramer(conn, writeBufSize, readBufSize, config.SharedWriteBuffer, maxHeaderListSize)
|
||||
// Send initial settings as connection preface to client.
|
||||
isettings := []http2.Setting{{
|
||||
ID: http2.SettingMaxFrameSize,
|
||||
Val: http2MaxFrameLen,
|
||||
}}
|
||||
// TODO(zhaoq): Have a better way to signal "no limit" because 0 is
|
||||
// permitted in the HTTP2 spec.
|
||||
maxStreams := config.MaxStreams
|
||||
if maxStreams == 0 {
|
||||
maxStreams = math.MaxUint32
|
||||
} else {
|
||||
if config.MaxStreams != math.MaxUint32 {
|
||||
isettings = append(isettings, http2.Setting{
|
||||
ID: http2.SettingMaxConcurrentStreams,
|
||||
Val: maxStreams,
|
||||
Val: config.MaxStreams,
|
||||
})
|
||||
}
|
||||
dynamicWindow := true
|
||||
@ -232,7 +233,7 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport,
|
||||
kp.Timeout = defaultServerKeepaliveTimeout
|
||||
}
|
||||
if kp.Time != infinity {
|
||||
if err = syscall.SetTCPUserTimeout(conn, kp.Timeout); err != nil {
|
||||
if err = syscall.SetTCPUserTimeout(rawConn, kp.Timeout); err != nil {
|
||||
return nil, connectionErrorf(false, err, "transport: failed to set TCP_USER_TIMEOUT: %v", err)
|
||||
}
|
||||
}
|
||||
@ -252,12 +253,12 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport,
|
||||
framer: framer,
|
||||
readerDone: make(chan struct{}),
|
||||
writerDone: make(chan struct{}),
|
||||
maxStreams: maxStreams,
|
||||
maxStreams: config.MaxStreams,
|
||||
inTapHandle: config.InTapHandle,
|
||||
fc: &trInFlow{limit: uint32(icwz)},
|
||||
state: reachable,
|
||||
activeStreams: make(map[uint32]*Stream),
|
||||
stats: config.StatsHandler,
|
||||
stats: config.StatsHandlers,
|
||||
kp: kp,
|
||||
idle: time.Now(),
|
||||
kep: kep,
|
||||
@ -265,6 +266,10 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport,
|
||||
czData: new(channelzData),
|
||||
bufferPool: newBufferPool(),
|
||||
}
|
||||
t.logger = prefixLoggerForServerTransport(t)
|
||||
// Add peer information to the http2server context.
|
||||
t.ctx = peer.NewContext(t.ctx, t.getPeer())
|
||||
|
||||
t.controlBuf = newControlBuffer(t.done)
|
||||
if dynamicWindow {
|
||||
t.bdpEst = &bdpEstimator{
|
||||
@ -272,13 +277,13 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport,
|
||||
updateFlowControl: t.updateFlowControl,
|
||||
}
|
||||
}
|
||||
if t.stats != nil {
|
||||
t.ctx = t.stats.TagConn(t.ctx, &stats.ConnTagInfo{
|
||||
for _, sh := range t.stats {
|
||||
t.ctx = sh.TagConn(t.ctx, &stats.ConnTagInfo{
|
||||
RemoteAddr: t.remoteAddr,
|
||||
LocalAddr: t.localAddr,
|
||||
})
|
||||
connBegin := &stats.ConnBegin{}
|
||||
t.stats.HandleConn(t.ctx, connBegin)
|
||||
sh.HandleConn(t.ctx, connBegin)
|
||||
}
|
||||
t.channelzID, err = channelz.RegisterNormalSocket(t, config.ChannelzParentID, fmt.Sprintf("%s -> %s", t.remoteAddr, t.localAddr))
|
||||
if err != nil {
|
||||
@ -290,7 +295,7 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport,
|
||||
|
||||
defer func() {
|
||||
if err != nil {
|
||||
t.Close()
|
||||
t.Close(err)
|
||||
}
|
||||
}()
|
||||
|
||||
@ -326,23 +331,18 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport,
|
||||
t.handleSettings(sf)
|
||||
|
||||
go func() {
|
||||
t.loopy = newLoopyWriter(serverSide, t.framer, t.controlBuf, t.bdpEst)
|
||||
t.loopy = newLoopyWriter(serverSide, t.framer, t.controlBuf, t.bdpEst, t.conn, t.logger)
|
||||
t.loopy.ssGoAwayHandler = t.outgoingGoAwayHandler
|
||||
if err := t.loopy.run(); err != nil {
|
||||
if logger.V(logLevel) {
|
||||
logger.Errorf("transport: loopyWriter.run returning. Err: %v", err)
|
||||
}
|
||||
}
|
||||
t.conn.Close()
|
||||
t.controlBuf.finish()
|
||||
t.loopy.run()
|
||||
close(t.writerDone)
|
||||
}()
|
||||
go t.keepalive()
|
||||
return t, nil
|
||||
}
|
||||
|
||||
// operateHeader takes action on the decoded headers.
|
||||
func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(*Stream), traceCtx func(context.Context, string) context.Context) (fatal bool) {
|
||||
// operateHeaders takes action on the decoded headers. Returns an error if fatal
|
||||
// error encountered and transport needs to close, otherwise returns nil.
|
||||
func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(*Stream), traceCtx func(context.Context, string) context.Context) error {
|
||||
// Acquire max stream ID lock for entire duration
|
||||
t.maxStreamMu.Lock()
|
||||
defer t.maxStreamMu.Unlock()
|
||||
@ -358,15 +358,12 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(
|
||||
rstCode: http2.ErrCodeFrameSize,
|
||||
onWrite: func() {},
|
||||
})
|
||||
return false
|
||||
return nil
|
||||
}
|
||||
|
||||
if streamID%2 != 1 || streamID <= t.maxStreamID {
|
||||
// illegal gRPC stream id.
|
||||
if logger.V(logLevel) {
|
||||
logger.Errorf("transport: http2Server.HandleStreams received an illegal stream id: %v", streamID)
|
||||
}
|
||||
return true
|
||||
return fmt.Errorf("received an illegal stream id: %v. headers frame: %+v", streamID, frame)
|
||||
}
|
||||
t.maxStreamID = streamID
|
||||
|
||||
@ -378,13 +375,14 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(
|
||||
fc: &inFlow{limit: uint32(t.initialWindowSize)},
|
||||
}
|
||||
var (
|
||||
// If a gRPC Response-Headers has already been received, then it means
|
||||
// that the peer is speaking gRPC and we are in gRPC mode.
|
||||
isGRPC = false
|
||||
mdata = make(map[string][]string)
|
||||
httpMethod string
|
||||
// headerError is set if an error is encountered while parsing the headers
|
||||
headerError bool
|
||||
// if false, content-type was missing or invalid
|
||||
isGRPC = false
|
||||
contentType = ""
|
||||
mdata = make(metadata.MD, len(frame.Fields))
|
||||
httpMethod string
|
||||
// these are set if an error is encountered while parsing the headers
|
||||
protocolError bool
|
||||
headerError *status.Status
|
||||
|
||||
timeoutSet bool
|
||||
timeout time.Duration
|
||||
@ -395,11 +393,23 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(
|
||||
case "content-type":
|
||||
contentSubtype, validContentType := grpcutil.ContentSubtype(hf.Value)
|
||||
if !validContentType {
|
||||
contentType = hf.Value
|
||||
break
|
||||
}
|
||||
mdata[hf.Name] = append(mdata[hf.Name], hf.Value)
|
||||
s.contentSubtype = contentSubtype
|
||||
isGRPC = true
|
||||
|
||||
case "grpc-accept-encoding":
|
||||
mdata[hf.Name] = append(mdata[hf.Name], hf.Value)
|
||||
if hf.Value == "" {
|
||||
continue
|
||||
}
|
||||
compressors := hf.Value
|
||||
if s.clientAdvertisedCompressors != "" {
|
||||
compressors = s.clientAdvertisedCompressors + "," + compressors
|
||||
}
|
||||
s.clientAdvertisedCompressors = compressors
|
||||
case "grpc-encoding":
|
||||
s.recvCompress = hf.Value
|
||||
case ":method":
|
||||
@ -410,23 +420,23 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(
|
||||
timeoutSet = true
|
||||
var err error
|
||||
if timeout, err = decodeTimeout(hf.Value); err != nil {
|
||||
headerError = true
|
||||
headerError = status.Newf(codes.Internal, "malformed grpc-timeout: %v", err)
|
||||
}
|
||||
// "Transports must consider requests containing the Connection header
|
||||
// as malformed." - A41
|
||||
case "connection":
|
||||
if logger.V(logLevel) {
|
||||
logger.Errorf("transport: http2Server.operateHeaders parsed a :connection header which makes a request malformed as per the HTTP/2 spec")
|
||||
if t.logger.V(logLevel) {
|
||||
t.logger.Infof("Received a HEADERS frame with a :connection header which makes the request malformed, as per the HTTP/2 spec")
|
||||
}
|
||||
headerError = true
|
||||
protocolError = true
|
||||
default:
|
||||
if isReservedHeader(hf.Name) && !isWhitelistedHeader(hf.Name) {
|
||||
break
|
||||
}
|
||||
v, err := decodeMetadataHeader(hf.Name, hf.Value)
|
||||
if err != nil {
|
||||
headerError = true
|
||||
logger.Warningf("Failed to decode metadata header (%q, %q): %v", hf.Name, hf.Value, err)
|
||||
headerError = status.Newf(codes.Internal, "malformed binary metadata %q in header %q: %v", hf.Value, hf.Name, err)
|
||||
t.logger.Warningf("Failed to decode metadata header (%q, %q): %v", hf.Name, hf.Value, err)
|
||||
break
|
||||
}
|
||||
mdata[hf.Name] = append(mdata[hf.Name], v)
|
||||
@ -440,27 +450,47 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(
|
||||
// error, this takes precedence over a client not speaking gRPC.
|
||||
if len(mdata[":authority"]) > 1 || len(mdata["host"]) > 1 {
|
||||
errMsg := fmt.Sprintf("num values of :authority: %v, num values of host: %v, both must only have 1 value as per HTTP/2 spec", len(mdata[":authority"]), len(mdata["host"]))
|
||||
if logger.V(logLevel) {
|
||||
logger.Errorf("transport: %v", errMsg)
|
||||
if t.logger.V(logLevel) {
|
||||
t.logger.Infof("Aborting the stream early: %v", errMsg)
|
||||
}
|
||||
t.controlBuf.put(&earlyAbortStream{
|
||||
httpStatus: 400,
|
||||
httpStatus: http.StatusBadRequest,
|
||||
streamID: streamID,
|
||||
contentSubtype: s.contentSubtype,
|
||||
status: status.New(codes.Internal, errMsg),
|
||||
rst: !frame.StreamEnded(),
|
||||
})
|
||||
return false
|
||||
return nil
|
||||
}
|
||||
|
||||
if !isGRPC || headerError {
|
||||
if protocolError {
|
||||
t.controlBuf.put(&cleanupStream{
|
||||
streamID: streamID,
|
||||
rst: true,
|
||||
rstCode: http2.ErrCodeProtocol,
|
||||
onWrite: func() {},
|
||||
})
|
||||
return false
|
||||
return nil
|
||||
}
|
||||
if !isGRPC {
|
||||
t.controlBuf.put(&earlyAbortStream{
|
||||
httpStatus: http.StatusUnsupportedMediaType,
|
||||
streamID: streamID,
|
||||
contentSubtype: s.contentSubtype,
|
||||
status: status.Newf(codes.InvalidArgument, "invalid gRPC request content-type %q", contentType),
|
||||
rst: !frame.StreamEnded(),
|
||||
})
|
||||
return nil
|
||||
}
|
||||
if headerError != nil {
|
||||
t.controlBuf.put(&earlyAbortStream{
|
||||
httpStatus: http.StatusBadRequest,
|
||||
streamID: streamID,
|
||||
contentSubtype: s.contentSubtype,
|
||||
status: headerError,
|
||||
rst: !frame.StreamEnded(),
|
||||
})
|
||||
return nil
|
||||
}
|
||||
|
||||
// "If :authority is missing, Host must be renamed to :authority." - A41
|
||||
@ -485,14 +515,7 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(
|
||||
} else {
|
||||
s.ctx, s.cancel = context.WithCancel(t.ctx)
|
||||
}
|
||||
pr := &peer.Peer{
|
||||
Addr: t.remoteAddr,
|
||||
}
|
||||
// Attach Auth info if there is any.
|
||||
if t.authInfo != nil {
|
||||
pr.AuthInfo = t.authInfo
|
||||
}
|
||||
s.ctx = peer.NewContext(s.ctx, pr)
|
||||
|
||||
// Attach the received metadata to the context.
|
||||
if len(mdata) > 0 {
|
||||
s.ctx = metadata.NewIncomingContext(s.ctx, mdata)
|
||||
@ -507,7 +530,7 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(
|
||||
if t.state != reachable {
|
||||
t.mu.Unlock()
|
||||
s.cancel()
|
||||
return false
|
||||
return nil
|
||||
}
|
||||
if uint32(len(t.activeStreams)) >= t.maxStreams {
|
||||
t.mu.Unlock()
|
||||
@ -518,13 +541,13 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(
|
||||
onWrite: func() {},
|
||||
})
|
||||
s.cancel()
|
||||
return false
|
||||
return nil
|
||||
}
|
||||
if httpMethod != http.MethodPost {
|
||||
t.mu.Unlock()
|
||||
errMsg := fmt.Sprintf("http2Server.operateHeaders parsed a :method field: %v which should be POST", httpMethod)
|
||||
if logger.V(logLevel) {
|
||||
logger.Infof("transport: %v", errMsg)
|
||||
errMsg := fmt.Sprintf("Received a HEADERS frame with :method %q which should be POST", httpMethod)
|
||||
if t.logger.V(logLevel) {
|
||||
t.logger.Infof("Aborting the stream early: %v", errMsg)
|
||||
}
|
||||
t.controlBuf.put(&earlyAbortStream{
|
||||
httpStatus: 405,
|
||||
@ -534,14 +557,14 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(
|
||||
rst: !frame.StreamEnded(),
|
||||
})
|
||||
s.cancel()
|
||||
return false
|
||||
return nil
|
||||
}
|
||||
if t.inTapHandle != nil {
|
||||
var err error
|
||||
if s.ctx, err = t.inTapHandle(s.ctx, &tap.Info{FullMethodName: s.method}); err != nil {
|
||||
t.mu.Unlock()
|
||||
if logger.V(logLevel) {
|
||||
logger.Infof("transport: http2Server.operateHeaders got an error from InTapHandle: %v", err)
|
||||
if t.logger.V(logLevel) {
|
||||
t.logger.Infof("Aborting the stream early due to InTapHandle failure: %v", err)
|
||||
}
|
||||
stat, ok := status.FromError(err)
|
||||
if !ok {
|
||||
@ -554,7 +577,7 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(
|
||||
status: stat,
|
||||
rst: !frame.StreamEnded(),
|
||||
})
|
||||
return false
|
||||
return nil
|
||||
}
|
||||
}
|
||||
t.activeStreams[streamID] = s
|
||||
@ -570,17 +593,17 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(
|
||||
t.adjustWindow(s, uint32(n))
|
||||
}
|
||||
s.ctx = traceCtx(s.ctx, s.method)
|
||||
if t.stats != nil {
|
||||
s.ctx = t.stats.TagRPC(s.ctx, &stats.RPCTagInfo{FullMethodName: s.method})
|
||||
for _, sh := range t.stats {
|
||||
s.ctx = sh.TagRPC(s.ctx, &stats.RPCTagInfo{FullMethodName: s.method})
|
||||
inHeader := &stats.InHeader{
|
||||
FullMethod: s.method,
|
||||
RemoteAddr: t.remoteAddr,
|
||||
LocalAddr: t.localAddr,
|
||||
Compression: s.recvCompress,
|
||||
WireLength: int(frame.Header().Length),
|
||||
Header: metadata.MD(mdata).Copy(),
|
||||
Header: mdata.Copy(),
|
||||
}
|
||||
t.stats.HandleRPC(s.ctx, inHeader)
|
||||
sh.HandleRPC(s.ctx, inHeader)
|
||||
}
|
||||
s.ctxDone = s.ctx.Done()
|
||||
s.wq = newWriteQuota(defaultWriteQuota, s.ctxDone)
|
||||
@ -601,7 +624,7 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(
|
||||
wq: s.wq,
|
||||
})
|
||||
handle(s)
|
||||
return false
|
||||
return nil
|
||||
}
|
||||
|
||||
// HandleStreams receives incoming streams using the given handler. This is
|
||||
@ -615,8 +638,8 @@ func (t *http2Server) HandleStreams(handle func(*Stream), traceCtx func(context.
|
||||
atomic.StoreInt64(&t.lastRead, time.Now().UnixNano())
|
||||
if err != nil {
|
||||
if se, ok := err.(http2.StreamError); ok {
|
||||
if logger.V(logLevel) {
|
||||
logger.Warningf("transport: http2Server.HandleStreams encountered http2.StreamError: %v", se)
|
||||
if t.logger.V(logLevel) {
|
||||
t.logger.Warningf("Encountered http2.StreamError: %v", se)
|
||||
}
|
||||
t.mu.Lock()
|
||||
s := t.activeStreams[se.StreamID]
|
||||
@ -634,19 +657,16 @@ func (t *http2Server) HandleStreams(handle func(*Stream), traceCtx func(context.
|
||||
continue
|
||||
}
|
||||
if err == io.EOF || err == io.ErrUnexpectedEOF {
|
||||
t.Close()
|
||||
t.Close(err)
|
||||
return
|
||||
}
|
||||
if logger.V(logLevel) {
|
||||
logger.Warningf("transport: http2Server.HandleStreams failed to read frame: %v", err)
|
||||
}
|
||||
t.Close()
|
||||
t.Close(err)
|
||||
return
|
||||
}
|
||||
switch frame := frame.(type) {
|
||||
case *http2.MetaHeadersFrame:
|
||||
if t.operateHeaders(frame, handle, traceCtx) {
|
||||
t.Close()
|
||||
if err := t.operateHeaders(frame, handle, traceCtx); err != nil {
|
||||
t.Close(err)
|
||||
break
|
||||
}
|
||||
case *http2.DataFrame:
|
||||
@ -662,8 +682,8 @@ func (t *http2Server) HandleStreams(handle func(*Stream), traceCtx func(context.
|
||||
case *http2.GoAwayFrame:
|
||||
// TODO: Handle GoAway from the client appropriately.
|
||||
default:
|
||||
if logger.V(logLevel) {
|
||||
logger.Errorf("transport: http2Server.HandleStreams found unhandled frame type %v.", frame)
|
||||
if t.logger.V(logLevel) {
|
||||
t.logger.Infof("Received unsupported frame type %T", frame)
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -830,7 +850,7 @@ func (t *http2Server) handleSettings(f *http2.SettingsFrame) {
|
||||
}
|
||||
return nil
|
||||
})
|
||||
t.controlBuf.executeAndPut(func(interface{}) bool {
|
||||
t.controlBuf.executeAndPut(func(any) bool {
|
||||
for _, f := range updateFuncs {
|
||||
f()
|
||||
}
|
||||
@ -847,8 +867,8 @@ const (
|
||||
|
||||
func (t *http2Server) handlePing(f *http2.PingFrame) {
|
||||
if f.IsAck() {
|
||||
if f.Data == goAwayPing.data && t.drainChan != nil {
|
||||
close(t.drainChan)
|
||||
if f.Data == goAwayPing.data && t.drainEvent != nil {
|
||||
t.drainEvent.Fire()
|
||||
return
|
||||
}
|
||||
// Maybe it's a BDP ping.
|
||||
@ -890,10 +910,7 @@ func (t *http2Server) handlePing(f *http2.PingFrame) {
|
||||
|
||||
if t.pingStrikes > maxPingStrikes {
|
||||
// Send goaway and close the connection.
|
||||
if logger.V(logLevel) {
|
||||
logger.Errorf("transport: Got too many pings from the client, closing the connection.")
|
||||
}
|
||||
t.controlBuf.put(&goAway{code: http2.ErrCodeEnhanceYourCalm, debugData: []byte("too_many_pings"), closeConn: true})
|
||||
t.controlBuf.put(&goAway{code: http2.ErrCodeEnhanceYourCalm, debugData: []byte("too_many_pings"), closeConn: errors.New("got too many pings from the client")})
|
||||
}
|
||||
}
|
||||
|
||||
@ -917,7 +934,7 @@ func appendHeaderFieldsFromMD(headerFields []hpack.HeaderField, md metadata.MD)
|
||||
return headerFields
|
||||
}
|
||||
|
||||
func (t *http2Server) checkForHeaderListSize(it interface{}) bool {
|
||||
func (t *http2Server) checkForHeaderListSize(it any) bool {
|
||||
if t.maxSendHeaderListSize == nil {
|
||||
return true
|
||||
}
|
||||
@ -925,8 +942,8 @@ func (t *http2Server) checkForHeaderListSize(it interface{}) bool {
|
||||
var sz int64
|
||||
for _, f := range hdrFrame.hf {
|
||||
if sz += int64(f.Size()); sz > int64(*t.maxSendHeaderListSize) {
|
||||
if logger.V(logLevel) {
|
||||
logger.Errorf("header list size to send violates the maximum size (%d bytes) set by client", *t.maxSendHeaderListSize)
|
||||
if t.logger.V(logLevel) {
|
||||
t.logger.Infof("Header list size to send violates the maximum size (%d bytes) set by client", *t.maxSendHeaderListSize)
|
||||
}
|
||||
return false
|
||||
}
|
||||
@ -945,15 +962,16 @@ func (t *http2Server) streamContextErr(s *Stream) error {
|
||||
|
||||
// WriteHeader sends the header metadata md back to the client.
|
||||
func (t *http2Server) WriteHeader(s *Stream, md metadata.MD) error {
|
||||
if s.updateHeaderSent() {
|
||||
return ErrIllegalHeaderWrite
|
||||
}
|
||||
|
||||
s.hdrMu.Lock()
|
||||
defer s.hdrMu.Unlock()
|
||||
if s.getState() == streamDone {
|
||||
return t.streamContextErr(s)
|
||||
}
|
||||
|
||||
s.hdrMu.Lock()
|
||||
if s.updateHeaderSent() {
|
||||
return ErrIllegalHeaderWrite
|
||||
}
|
||||
|
||||
if md.Len() > 0 {
|
||||
if s.header.Len() > 0 {
|
||||
s.header = metadata.Join(s.header, md)
|
||||
@ -962,10 +980,8 @@ func (t *http2Server) WriteHeader(s *Stream, md metadata.MD) error {
|
||||
}
|
||||
}
|
||||
if err := t.writeHeaderLocked(s); err != nil {
|
||||
s.hdrMu.Unlock()
|
||||
return status.Convert(err).Err()
|
||||
}
|
||||
s.hdrMu.Unlock()
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -996,14 +1012,14 @@ func (t *http2Server) writeHeaderLocked(s *Stream) error {
|
||||
t.closeStream(s, true, http2.ErrCodeInternal, false)
|
||||
return ErrHeaderListSizeLimitViolation
|
||||
}
|
||||
if t.stats != nil {
|
||||
for _, sh := range t.stats {
|
||||
// Note: Headers are compressed with hpack after this call returns.
|
||||
// No WireLength field is set here.
|
||||
outHeader := &stats.OutHeader{
|
||||
Header: s.header.Copy(),
|
||||
Compression: s.sendCompress,
|
||||
}
|
||||
t.stats.HandleRPC(s.Context(), outHeader)
|
||||
sh.HandleRPC(s.Context(), outHeader)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@ -1013,17 +1029,19 @@ func (t *http2Server) writeHeaderLocked(s *Stream) error {
|
||||
// TODO(zhaoq): Now it indicates the end of entire stream. Revisit if early
|
||||
// OK is adopted.
|
||||
func (t *http2Server) WriteStatus(s *Stream, st *status.Status) error {
|
||||
s.hdrMu.Lock()
|
||||
defer s.hdrMu.Unlock()
|
||||
|
||||
if s.getState() == streamDone {
|
||||
return nil
|
||||
}
|
||||
s.hdrMu.Lock()
|
||||
|
||||
// TODO(mmukhi): Benchmark if the performance gets better if count the metadata and other header fields
|
||||
// first and create a slice of that exact size.
|
||||
headerFields := make([]hpack.HeaderField, 0, 2) // grpc-status and grpc-message will be there if none else.
|
||||
if !s.updateHeaderSent() { // No headers have been sent.
|
||||
if len(s.header) > 0 { // Send a separate header frame.
|
||||
if err := t.writeHeaderLocked(s); err != nil {
|
||||
s.hdrMu.Unlock()
|
||||
return err
|
||||
}
|
||||
} else { // Send a trailer only response.
|
||||
@ -1038,7 +1056,7 @@ func (t *http2Server) WriteStatus(s *Stream, st *status.Status) error {
|
||||
stBytes, err := proto.Marshal(p)
|
||||
if err != nil {
|
||||
// TODO: return error instead, when callers are able to handle it.
|
||||
logger.Errorf("transport: failed to marshal rpc status: %v, error: %v", p, err)
|
||||
t.logger.Errorf("Failed to marshal rpc status: %s, error: %v", pretty.ToJSON(p), err)
|
||||
} else {
|
||||
headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-status-details-bin", Value: encodeBinHeader(stBytes)})
|
||||
}
|
||||
@ -1052,7 +1070,7 @@ func (t *http2Server) WriteStatus(s *Stream, st *status.Status) error {
|
||||
endStream: true,
|
||||
onWrite: t.setResetPingStrikes,
|
||||
}
|
||||
s.hdrMu.Unlock()
|
||||
|
||||
success, err := t.controlBuf.execute(t.checkForHeaderListSize, trailingHeader)
|
||||
if !success {
|
||||
if err != nil {
|
||||
@ -1064,10 +1082,10 @@ func (t *http2Server) WriteStatus(s *Stream, st *status.Status) error {
|
||||
// Send a RST_STREAM after the trailers if the client has not already half-closed.
|
||||
rst := s.getState() == streamActive
|
||||
t.finishStream(s, rst, http2.ErrCodeNo, trailingHeader, true)
|
||||
if t.stats != nil {
|
||||
for _, sh := range t.stats {
|
||||
// Note: The trailer fields are compressed with hpack after this call returns.
|
||||
// No WireLength field is set here.
|
||||
t.stats.HandleRPC(s.Context(), &stats.OutTrailer{
|
||||
sh.HandleRPC(s.Context(), &stats.OutTrailer{
|
||||
Trailer: s.trailer.Copy(),
|
||||
})
|
||||
}
|
||||
@ -1143,20 +1161,20 @@ func (t *http2Server) keepalive() {
|
||||
if val <= 0 {
|
||||
// The connection has been idle for a duration of keepalive.MaxConnectionIdle or more.
|
||||
// Gracefully close the connection.
|
||||
t.Drain()
|
||||
t.Drain("max_idle")
|
||||
return
|
||||
}
|
||||
idleTimer.Reset(val)
|
||||
case <-ageTimer.C:
|
||||
t.Drain()
|
||||
t.Drain("max_age")
|
||||
ageTimer.Reset(t.kp.MaxConnectionAgeGrace)
|
||||
select {
|
||||
case <-ageTimer.C:
|
||||
// Close the connection after grace period.
|
||||
if logger.V(logLevel) {
|
||||
logger.Infof("transport: closing server transport due to maximum connection age.")
|
||||
if t.logger.V(logLevel) {
|
||||
t.logger.Infof("Closing server transport due to maximum connection age")
|
||||
}
|
||||
t.Close()
|
||||
t.controlBuf.put(closeConnection{})
|
||||
case <-t.done:
|
||||
}
|
||||
return
|
||||
@ -1172,10 +1190,7 @@ func (t *http2Server) keepalive() {
|
||||
continue
|
||||
}
|
||||
if outstandingPing && kpTimeoutLeft <= 0 {
|
||||
if logger.V(logLevel) {
|
||||
logger.Infof("transport: closing server transport due to idleness.")
|
||||
}
|
||||
t.Close()
|
||||
t.Close(fmt.Errorf("keepalive ping not acked within timeout %s", t.kp.Time))
|
||||
return
|
||||
}
|
||||
if !outstandingPing {
|
||||
@ -1202,29 +1217,32 @@ func (t *http2Server) keepalive() {
|
||||
// Close starts shutting down the http2Server transport.
|
||||
// TODO(zhaoq): Now the destruction is not blocked on any pending streams. This
|
||||
// could cause some resource issue. Revisit this later.
|
||||
func (t *http2Server) Close() {
|
||||
func (t *http2Server) Close(err error) {
|
||||
t.mu.Lock()
|
||||
if t.state == closing {
|
||||
t.mu.Unlock()
|
||||
return
|
||||
}
|
||||
if t.logger.V(logLevel) {
|
||||
t.logger.Infof("Closing: %v", err)
|
||||
}
|
||||
t.state = closing
|
||||
streams := t.activeStreams
|
||||
t.activeStreams = nil
|
||||
t.mu.Unlock()
|
||||
t.controlBuf.finish()
|
||||
close(t.done)
|
||||
if err := t.conn.Close(); err != nil && logger.V(logLevel) {
|
||||
logger.Infof("transport: error closing conn during Close: %v", err)
|
||||
if err := t.conn.Close(); err != nil && t.logger.V(logLevel) {
|
||||
t.logger.Infof("Error closing underlying net.Conn during Close: %v", err)
|
||||
}
|
||||
channelz.RemoveEntry(t.channelzID)
|
||||
// Cancel all active streams.
|
||||
for _, s := range streams {
|
||||
s.cancel()
|
||||
}
|
||||
if t.stats != nil {
|
||||
for _, sh := range t.stats {
|
||||
connEnd := &stats.ConnEnd{}
|
||||
t.stats.HandleConn(t.ctx, connEnd)
|
||||
sh.HandleConn(t.ctx, connEnd)
|
||||
}
|
||||
}
|
||||
|
||||
@ -1295,14 +1313,14 @@ func (t *http2Server) RemoteAddr() net.Addr {
|
||||
return t.remoteAddr
|
||||
}
|
||||
|
||||
func (t *http2Server) Drain() {
|
||||
func (t *http2Server) Drain(debugData string) {
|
||||
t.mu.Lock()
|
||||
defer t.mu.Unlock()
|
||||
if t.drainChan != nil {
|
||||
if t.drainEvent != nil {
|
||||
return
|
||||
}
|
||||
t.drainChan = make(chan struct{})
|
||||
t.controlBuf.put(&goAway{code: http2.ErrCodeNo, debugData: []byte{}, headsUp: true})
|
||||
t.drainEvent = grpcsync.NewEvent()
|
||||
t.controlBuf.put(&goAway{code: http2.ErrCodeNo, debugData: []byte(debugData), headsUp: true})
|
||||
}
|
||||
|
||||
var goAwayPing = &ping{data: [8]byte{1, 6, 1, 8, 0, 3, 3, 9}}
|
||||
@ -1322,19 +1340,17 @@ func (t *http2Server) outgoingGoAwayHandler(g *goAway) (bool, error) {
|
||||
// Stop accepting more streams now.
|
||||
t.state = draining
|
||||
sid := t.maxStreamID
|
||||
retErr := g.closeConn
|
||||
if len(t.activeStreams) == 0 {
|
||||
g.closeConn = true
|
||||
retErr = errors.New("second GOAWAY written and no active streams left to process")
|
||||
}
|
||||
t.mu.Unlock()
|
||||
t.maxStreamMu.Unlock()
|
||||
if err := t.framer.fr.WriteGoAway(sid, g.code, g.debugData); err != nil {
|
||||
return false, err
|
||||
}
|
||||
if g.closeConn {
|
||||
// Abruptly close the connection following the GoAway (via
|
||||
// loopywriter). But flush out what's inside the buffer first.
|
||||
t.framer.writer.Flush()
|
||||
return false, fmt.Errorf("transport: Connection closing")
|
||||
if retErr != nil {
|
||||
return false, retErr
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
@ -1346,7 +1362,7 @@ func (t *http2Server) outgoingGoAwayHandler(g *goAway) (bool, error) {
|
||||
// originated before the GoAway reaches the client.
|
||||
// After getting the ack or timer expiration send out another GoAway this
|
||||
// time with an ID of the max stream server intends to process.
|
||||
if err := t.framer.fr.WriteGoAway(math.MaxUint32, http2.ErrCodeNo, []byte{}); err != nil {
|
||||
if err := t.framer.fr.WriteGoAway(math.MaxUint32, http2.ErrCodeNo, g.debugData); err != nil {
|
||||
return false, err
|
||||
}
|
||||
if err := t.framer.fr.WritePing(false, goAwayPing.data); err != nil {
|
||||
@ -1356,7 +1372,7 @@ func (t *http2Server) outgoingGoAwayHandler(g *goAway) (bool, error) {
|
||||
timer := time.NewTimer(time.Minute)
|
||||
defer timer.Stop()
|
||||
select {
|
||||
case <-t.drainChan:
|
||||
case <-t.drainEvent.Done():
|
||||
case <-timer.C:
|
||||
case <-t.done:
|
||||
return
|
||||
@ -1415,6 +1431,13 @@ func (t *http2Server) getOutFlowWindow() int64 {
|
||||
}
|
||||
}
|
||||
|
||||
func (t *http2Server) getPeer() *peer.Peer {
|
||||
return &peer.Peer{
|
||||
Addr: t.remoteAddr,
|
||||
AuthInfo: t.authInfo, // Can be nil
|
||||
}
|
||||
}
|
||||
|
||||
func getJitter(v time.Duration) time.Duration {
|
||||
if v == infinity {
|
||||
return 0
|
||||
|
119
src/runtime/vendor/google.golang.org/grpc/internal/transport/http_util.go
generated
vendored
119
src/runtime/vendor/google.golang.org/grpc/internal/transport/http_util.go
generated
vendored
@ -20,8 +20,8 @@ package transport
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"encoding/base64"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"math"
|
||||
@ -30,6 +30,7 @@ import (
|
||||
"net/url"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
"unicode/utf8"
|
||||
|
||||
@ -38,21 +39,14 @@ import (
|
||||
"golang.org/x/net/http2/hpack"
|
||||
spb "google.golang.org/genproto/googleapis/rpc/status"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/grpclog"
|
||||
"google.golang.org/grpc/status"
|
||||
)
|
||||
|
||||
const (
|
||||
// http2MaxFrameLen specifies the max length of a HTTP2 frame.
|
||||
http2MaxFrameLen = 16384 // 16KB frame
|
||||
// http://http2.github.io/http2-spec/#SettingValues
|
||||
// https://httpwg.org/specs/rfc7540.html#SettingValues
|
||||
http2InitHeaderTableSize = 4096
|
||||
// baseContentType is the base content-type for gRPC. This is a valid
|
||||
// content-type on it's own, but can also include a content-subtype such as
|
||||
// "proto" as a suffix after "+" or ";". See
|
||||
// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests
|
||||
// for more details.
|
||||
|
||||
)
|
||||
|
||||
var (
|
||||
@ -92,7 +86,6 @@ var (
|
||||
// 504 Gateway timeout - UNAVAILABLE.
|
||||
http.StatusGatewayTimeout: codes.Unavailable,
|
||||
}
|
||||
logger = grpclog.Component("transport")
|
||||
)
|
||||
|
||||
// isReservedHeader checks whether hdr belongs to HTTP2 headers
|
||||
@ -257,13 +250,13 @@ func encodeGrpcMessage(msg string) string {
|
||||
}
|
||||
|
||||
func encodeGrpcMessageUnchecked(msg string) string {
|
||||
var buf bytes.Buffer
|
||||
var sb strings.Builder
|
||||
for len(msg) > 0 {
|
||||
r, size := utf8.DecodeRuneInString(msg)
|
||||
for _, b := range []byte(string(r)) {
|
||||
if size > 1 {
|
||||
// If size > 1, r is not ascii. Always do percent encoding.
|
||||
buf.WriteString(fmt.Sprintf("%%%02X", b))
|
||||
fmt.Fprintf(&sb, "%%%02X", b)
|
||||
continue
|
||||
}
|
||||
|
||||
@ -272,14 +265,14 @@ func encodeGrpcMessageUnchecked(msg string) string {
|
||||
//
|
||||
// fmt.Sprintf("%%%02X", utf8.RuneError) gives "%FFFD".
|
||||
if b >= spaceByte && b <= tildeByte && b != percentByte {
|
||||
buf.WriteByte(b)
|
||||
sb.WriteByte(b)
|
||||
} else {
|
||||
buf.WriteString(fmt.Sprintf("%%%02X", b))
|
||||
fmt.Fprintf(&sb, "%%%02X", b)
|
||||
}
|
||||
}
|
||||
msg = msg[size:]
|
||||
}
|
||||
return buf.String()
|
||||
return sb.String()
|
||||
}
|
||||
|
||||
// decodeGrpcMessage decodes the msg encoded by encodeGrpcMessage.
|
||||
@ -297,41 +290,45 @@ func decodeGrpcMessage(msg string) string {
|
||||
}
|
||||
|
||||
func decodeGrpcMessageUnchecked(msg string) string {
|
||||
var buf bytes.Buffer
|
||||
var sb strings.Builder
|
||||
lenMsg := len(msg)
|
||||
for i := 0; i < lenMsg; i++ {
|
||||
c := msg[i]
|
||||
if c == percentByte && i+2 < lenMsg {
|
||||
parsed, err := strconv.ParseUint(msg[i+1:i+3], 16, 8)
|
||||
if err != nil {
|
||||
buf.WriteByte(c)
|
||||
sb.WriteByte(c)
|
||||
} else {
|
||||
buf.WriteByte(byte(parsed))
|
||||
sb.WriteByte(byte(parsed))
|
||||
i += 2
|
||||
}
|
||||
} else {
|
||||
buf.WriteByte(c)
|
||||
sb.WriteByte(c)
|
||||
}
|
||||
}
|
||||
return buf.String()
|
||||
return sb.String()
|
||||
}
|
||||
|
||||
type bufWriter struct {
|
||||
pool *sync.Pool
|
||||
buf []byte
|
||||
offset int
|
||||
batchSize int
|
||||
conn net.Conn
|
||||
err error
|
||||
|
||||
onFlush func()
|
||||
}
|
||||
|
||||
func newBufWriter(conn net.Conn, batchSize int) *bufWriter {
|
||||
return &bufWriter{
|
||||
buf: make([]byte, batchSize*2),
|
||||
func newBufWriter(conn net.Conn, batchSize int, pool *sync.Pool) *bufWriter {
|
||||
w := &bufWriter{
|
||||
batchSize: batchSize,
|
||||
conn: conn,
|
||||
pool: pool,
|
||||
}
|
||||
// this indicates that we should use non shared buf
|
||||
if pool == nil {
|
||||
w.buf = make([]byte, batchSize)
|
||||
}
|
||||
return w
|
||||
}
|
||||
|
||||
func (w *bufWriter) Write(b []byte) (n int, err error) {
|
||||
@ -339,7 +336,12 @@ func (w *bufWriter) Write(b []byte) (n int, err error) {
|
||||
return 0, w.err
|
||||
}
|
||||
if w.batchSize == 0 { // Buffer has been disabled.
|
||||
return w.conn.Write(b)
|
||||
n, err = w.conn.Write(b)
|
||||
return n, toIOError(err)
|
||||
}
|
||||
if w.buf == nil {
|
||||
b := w.pool.Get().(*[]byte)
|
||||
w.buf = *b
|
||||
}
|
||||
for len(b) > 0 {
|
||||
nn := copy(w.buf[w.offset:], b)
|
||||
@ -347,33 +349,64 @@ func (w *bufWriter) Write(b []byte) (n int, err error) {
|
||||
w.offset += nn
|
||||
n += nn
|
||||
if w.offset >= w.batchSize {
|
||||
err = w.Flush()
|
||||
err = w.flushKeepBuffer()
|
||||
}
|
||||
}
|
||||
return n, err
|
||||
}
|
||||
|
||||
func (w *bufWriter) Flush() error {
|
||||
err := w.flushKeepBuffer()
|
||||
// Only release the buffer if we are in a "shared" mode
|
||||
if w.buf != nil && w.pool != nil {
|
||||
b := w.buf
|
||||
w.pool.Put(&b)
|
||||
w.buf = nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func (w *bufWriter) flushKeepBuffer() error {
|
||||
if w.err != nil {
|
||||
return w.err
|
||||
}
|
||||
if w.offset == 0 {
|
||||
return nil
|
||||
}
|
||||
if w.onFlush != nil {
|
||||
w.onFlush()
|
||||
}
|
||||
_, w.err = w.conn.Write(w.buf[:w.offset])
|
||||
w.err = toIOError(w.err)
|
||||
w.offset = 0
|
||||
return w.err
|
||||
}
|
||||
|
||||
type ioError struct {
|
||||
error
|
||||
}
|
||||
|
||||
func (i ioError) Unwrap() error {
|
||||
return i.error
|
||||
}
|
||||
|
||||
func isIOError(err error) bool {
|
||||
return errors.As(err, &ioError{})
|
||||
}
|
||||
|
||||
func toIOError(err error) error {
|
||||
if err == nil {
|
||||
return nil
|
||||
}
|
||||
return ioError{error: err}
|
||||
}
|
||||
|
||||
type framer struct {
|
||||
writer *bufWriter
|
||||
fr *http2.Framer
|
||||
}
|
||||
|
||||
func newFramer(conn net.Conn, writeBufferSize, readBufferSize int, maxHeaderListSize uint32) *framer {
|
||||
var writeBufferPoolMap map[int]*sync.Pool = make(map[int]*sync.Pool)
|
||||
var writeBufferMutex sync.Mutex
|
||||
|
||||
func newFramer(conn net.Conn, writeBufferSize, readBufferSize int, sharedWriteBuffer bool, maxHeaderListSize uint32) *framer {
|
||||
if writeBufferSize < 0 {
|
||||
writeBufferSize = 0
|
||||
}
|
||||
@ -381,7 +414,11 @@ func newFramer(conn net.Conn, writeBufferSize, readBufferSize int, maxHeaderList
|
||||
if readBufferSize > 0 {
|
||||
r = bufio.NewReaderSize(r, readBufferSize)
|
||||
}
|
||||
w := newBufWriter(conn, writeBufferSize)
|
||||
var pool *sync.Pool
|
||||
if sharedWriteBuffer {
|
||||
pool = getWriteBufferPool(writeBufferSize)
|
||||
}
|
||||
w := newBufWriter(conn, writeBufferSize, pool)
|
||||
f := &framer{
|
||||
writer: w,
|
||||
fr: http2.NewFramer(w, r),
|
||||
@ -395,6 +432,24 @@ func newFramer(conn net.Conn, writeBufferSize, readBufferSize int, maxHeaderList
|
||||
return f
|
||||
}
|
||||
|
||||
func getWriteBufferPool(writeBufferSize int) *sync.Pool {
|
||||
writeBufferMutex.Lock()
|
||||
defer writeBufferMutex.Unlock()
|
||||
size := writeBufferSize * 2
|
||||
pool, ok := writeBufferPoolMap[size]
|
||||
if ok {
|
||||
return pool
|
||||
}
|
||||
pool = &sync.Pool{
|
||||
New: func() any {
|
||||
b := make([]byte, size)
|
||||
return &b
|
||||
},
|
||||
}
|
||||
writeBufferPoolMap[size] = pool
|
||||
return pool
|
||||
}
|
||||
|
||||
// parseDialTarget returns the network and address to pass to dialer.
|
||||
func parseDialTarget(target string) (string, string) {
|
||||
net := "tcp"
|
||||
|
40
src/runtime/vendor/google.golang.org/grpc/internal/transport/logging.go
generated
vendored
Normal file
40
src/runtime/vendor/google.golang.org/grpc/internal/transport/logging.go
generated
vendored
Normal file
@ -0,0 +1,40 @@
|
||||
/*
|
||||
*
|
||||
* Copyright 2023 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
package transport
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"google.golang.org/grpc/grpclog"
|
||||
internalgrpclog "google.golang.org/grpc/internal/grpclog"
|
||||
)
|
||||
|
||||
var logger = grpclog.Component("transport")
|
||||
|
||||
func prefixLoggerForServerTransport(p *http2Server) *internalgrpclog.PrefixLogger {
|
||||
return internalgrpclog.NewPrefixLogger(logger, fmt.Sprintf("[server-transport %p] ", p))
|
||||
}
|
||||
|
||||
func prefixLoggerForServerHandlerTransport(p *serverHandlerTransport) *internalgrpclog.PrefixLogger {
|
||||
return internalgrpclog.NewPrefixLogger(logger, fmt.Sprintf("[server-handler-transport %p] ", p))
|
||||
}
|
||||
|
||||
func prefixLoggerForClientTransport(p *http2Client) *internalgrpclog.PrefixLogger {
|
||||
return internalgrpclog.NewPrefixLogger(logger, fmt.Sprintf("[client-transport %p] ", p))
|
||||
}
|
48
src/runtime/vendor/google.golang.org/grpc/internal/transport/transport.go
generated
vendored
48
src/runtime/vendor/google.golang.org/grpc/internal/transport/transport.go
generated
vendored
@ -52,7 +52,7 @@ type bufferPool struct {
|
||||
func newBufferPool() *bufferPool {
|
||||
return &bufferPool{
|
||||
pool: sync.Pool{
|
||||
New: func() interface{} {
|
||||
New: func() any {
|
||||
return new(bytes.Buffer)
|
||||
},
|
||||
},
|
||||
@ -253,6 +253,9 @@ type Stream struct {
|
||||
fc *inFlow
|
||||
wq *writeQuota
|
||||
|
||||
// Holds compressor names passed in grpc-accept-encoding metadata from the
|
||||
// client. This is empty for the client side stream.
|
||||
clientAdvertisedCompressors string
|
||||
// Callback to state application's intentions to read data. This
|
||||
// is used to adjust flow control, if needed.
|
||||
requestRead func(int)
|
||||
@ -341,8 +344,24 @@ func (s *Stream) RecvCompress() string {
|
||||
}
|
||||
|
||||
// SetSendCompress sets the compression algorithm to the stream.
|
||||
func (s *Stream) SetSendCompress(str string) {
|
||||
s.sendCompress = str
|
||||
func (s *Stream) SetSendCompress(name string) error {
|
||||
if s.isHeaderSent() || s.getState() == streamDone {
|
||||
return errors.New("transport: set send compressor called after headers sent or stream done")
|
||||
}
|
||||
|
||||
s.sendCompress = name
|
||||
return nil
|
||||
}
|
||||
|
||||
// SendCompress returns the send compressor name.
|
||||
func (s *Stream) SendCompress() string {
|
||||
return s.sendCompress
|
||||
}
|
||||
|
||||
// ClientAdvertisedCompressors returns the compressor names advertised by the
|
||||
// client via grpc-accept-encoding header.
|
||||
func (s *Stream) ClientAdvertisedCompressors() string {
|
||||
return s.clientAdvertisedCompressors
|
||||
}
|
||||
|
||||
// Done returns a channel which is closed when it receives the final status
|
||||
@ -366,9 +385,11 @@ func (s *Stream) Header() (metadata.MD, error) {
|
||||
return s.header.Copy(), nil
|
||||
}
|
||||
s.waitOnHeader()
|
||||
if !s.headerValid {
|
||||
|
||||
if !s.headerValid || s.noHeaders {
|
||||
return nil, s.status.Err()
|
||||
}
|
||||
|
||||
return s.header.Copy(), nil
|
||||
}
|
||||
|
||||
@ -523,13 +544,14 @@ type ServerConfig struct {
|
||||
ConnectionTimeout time.Duration
|
||||
Credentials credentials.TransportCredentials
|
||||
InTapHandle tap.ServerInHandle
|
||||
StatsHandler stats.Handler
|
||||
StatsHandlers []stats.Handler
|
||||
KeepaliveParams keepalive.ServerParameters
|
||||
KeepalivePolicy keepalive.EnforcementPolicy
|
||||
InitialWindowSize int32
|
||||
InitialConnWindowSize int32
|
||||
WriteBufferSize int
|
||||
ReadBufferSize int
|
||||
SharedWriteBuffer bool
|
||||
ChannelzParentID *channelz.Identifier
|
||||
MaxHeaderListSize *uint32
|
||||
HeaderTableSize *uint32
|
||||
@ -553,8 +575,8 @@ type ConnectOptions struct {
|
||||
CredsBundle credentials.Bundle
|
||||
// KeepaliveParams stores the keepalive parameters.
|
||||
KeepaliveParams keepalive.ClientParameters
|
||||
// StatsHandler stores the handler for stats.
|
||||
StatsHandler stats.Handler
|
||||
// StatsHandlers stores the handler for stats.
|
||||
StatsHandlers []stats.Handler
|
||||
// InitialWindowSize sets the initial window size for a stream.
|
||||
InitialWindowSize int32
|
||||
// InitialConnWindowSize sets the initial window size for a connection.
|
||||
@ -563,6 +585,8 @@ type ConnectOptions struct {
|
||||
WriteBufferSize int
|
||||
// ReadBufferSize sets the size of read buffer, which in turn determines how much data can be read at most for one read syscall.
|
||||
ReadBufferSize int
|
||||
// SharedWriteBuffer indicates whether connections should reuse write buffer
|
||||
SharedWriteBuffer bool
|
||||
// ChannelzParentID sets the addrConn id which initiate the creation of this client transport.
|
||||
ChannelzParentID *channelz.Identifier
|
||||
// MaxHeaderListSize sets the max (uncompressed) size of header list that is prepared to be received.
|
||||
@ -573,8 +597,8 @@ type ConnectOptions struct {
|
||||
|
||||
// NewClientTransport establishes the transport with the required ConnectOptions
|
||||
// and returns it to the caller.
|
||||
func NewClientTransport(connectCtx, ctx context.Context, addr resolver.Address, opts ConnectOptions, onPrefaceReceipt func(), onGoAway func(GoAwayReason), onClose func()) (ClientTransport, error) {
|
||||
return newHTTP2Client(connectCtx, ctx, addr, opts, onPrefaceReceipt, onGoAway, onClose)
|
||||
func NewClientTransport(connectCtx, ctx context.Context, addr resolver.Address, opts ConnectOptions, onClose func(GoAwayReason)) (ClientTransport, error) {
|
||||
return newHTTP2Client(connectCtx, ctx, addr, opts, onClose)
|
||||
}
|
||||
|
||||
// Options provides additional hints and information for message
|
||||
@ -691,13 +715,13 @@ type ServerTransport interface {
|
||||
// Close tears down the transport. Once it is called, the transport
|
||||
// should not be accessed any more. All the pending streams and their
|
||||
// handlers will be terminated asynchronously.
|
||||
Close()
|
||||
Close(err error)
|
||||
|
||||
// RemoteAddr returns the remote network address.
|
||||
RemoteAddr() net.Addr
|
||||
|
||||
// Drain notifies the client this ServerTransport stops accepting new RPCs.
|
||||
Drain()
|
||||
Drain(debugData string)
|
||||
|
||||
// IncrMsgSent increments the number of message sent through this transport.
|
||||
IncrMsgSent()
|
||||
@ -707,7 +731,7 @@ type ServerTransport interface {
|
||||
}
|
||||
|
||||
// connectionErrorf creates an ConnectionError with the specified error description.
|
||||
func connectionErrorf(temp bool, e error, format string, a ...interface{}) ConnectionError {
|
||||
func connectionErrorf(temp bool, e error, format string, a ...any) ConnectionError {
|
||||
return ConnectionError{
|
||||
Desc: fmt.Sprintf(format, a...),
|
||||
temp: temp,
|
||||
|
86
src/runtime/vendor/google.golang.org/grpc/metadata/metadata.go
generated
vendored
86
src/runtime/vendor/google.golang.org/grpc/metadata/metadata.go
generated
vendored
@ -41,16 +41,17 @@ type MD map[string][]string
|
||||
// New creates an MD from a given key-value map.
|
||||
//
|
||||
// Only the following ASCII characters are allowed in keys:
|
||||
// - digits: 0-9
|
||||
// - uppercase letters: A-Z (normalized to lower)
|
||||
// - lowercase letters: a-z
|
||||
// - special characters: -_.
|
||||
// - digits: 0-9
|
||||
// - uppercase letters: A-Z (normalized to lower)
|
||||
// - lowercase letters: a-z
|
||||
// - special characters: -_.
|
||||
//
|
||||
// Uppercase letters are automatically converted to lowercase.
|
||||
//
|
||||
// Keys beginning with "grpc-" are reserved for grpc-internal use only and may
|
||||
// result in errors if set in metadata.
|
||||
func New(m map[string]string) MD {
|
||||
md := MD{}
|
||||
md := make(MD, len(m))
|
||||
for k, val := range m {
|
||||
key := strings.ToLower(k)
|
||||
md[key] = append(md[key], val)
|
||||
@ -62,10 +63,11 @@ func New(m map[string]string) MD {
|
||||
// Pairs panics if len(kv) is odd.
|
||||
//
|
||||
// Only the following ASCII characters are allowed in keys:
|
||||
// - digits: 0-9
|
||||
// - uppercase letters: A-Z (normalized to lower)
|
||||
// - lowercase letters: a-z
|
||||
// - special characters: -_.
|
||||
// - digits: 0-9
|
||||
// - uppercase letters: A-Z (normalized to lower)
|
||||
// - lowercase letters: a-z
|
||||
// - special characters: -_.
|
||||
//
|
||||
// Uppercase letters are automatically converted to lowercase.
|
||||
//
|
||||
// Keys beginning with "grpc-" are reserved for grpc-internal use only and may
|
||||
@ -74,7 +76,7 @@ func Pairs(kv ...string) MD {
|
||||
if len(kv)%2 == 1 {
|
||||
panic(fmt.Sprintf("metadata: Pairs got the odd number of input pairs for metadata: %d", len(kv)))
|
||||
}
|
||||
md := MD{}
|
||||
md := make(MD, len(kv)/2)
|
||||
for i := 0; i < len(kv); i += 2 {
|
||||
key := strings.ToLower(kv[i])
|
||||
md[key] = append(md[key], kv[i+1])
|
||||
@ -89,7 +91,11 @@ func (md MD) Len() int {
|
||||
|
||||
// Copy returns a copy of md.
|
||||
func (md MD) Copy() MD {
|
||||
return Join(md)
|
||||
out := make(MD, len(md))
|
||||
for k, v := range md {
|
||||
out[k] = copyOf(v)
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
// Get obtains the values for a given key.
|
||||
@ -169,8 +175,11 @@ func AppendToOutgoingContext(ctx context.Context, kv ...string) context.Context
|
||||
md, _ := ctx.Value(mdOutgoingKey{}).(rawMD)
|
||||
added := make([][]string, len(md.added)+1)
|
||||
copy(added, md.added)
|
||||
added[len(added)-1] = make([]string, len(kv))
|
||||
copy(added[len(added)-1], kv)
|
||||
kvCopy := make([]string, 0, len(kv))
|
||||
for i := 0; i < len(kv); i += 2 {
|
||||
kvCopy = append(kvCopy, strings.ToLower(kv[i]), kv[i+1])
|
||||
}
|
||||
added[len(added)-1] = kvCopy
|
||||
return context.WithValue(ctx, mdOutgoingKey{}, rawMD{md: md.md, added: added})
|
||||
}
|
||||
|
||||
@ -182,19 +191,51 @@ func FromIncomingContext(ctx context.Context) (MD, bool) {
|
||||
if !ok {
|
||||
return nil, false
|
||||
}
|
||||
out := MD{}
|
||||
out := make(MD, len(md))
|
||||
for k, v := range md {
|
||||
// We need to manually convert all keys to lower case, because MD is a
|
||||
// map, and there's no guarantee that the MD attached to the context is
|
||||
// created using our helper functions.
|
||||
key := strings.ToLower(k)
|
||||
s := make([]string, len(v))
|
||||
copy(s, v)
|
||||
out[key] = s
|
||||
out[key] = copyOf(v)
|
||||
}
|
||||
return out, true
|
||||
}
|
||||
|
||||
// ValueFromIncomingContext returns the metadata value corresponding to the metadata
|
||||
// key from the incoming metadata if it exists. Key must be lower-case.
|
||||
//
|
||||
// # Experimental
|
||||
//
|
||||
// Notice: This API is EXPERIMENTAL and may be changed or removed in a
|
||||
// later release.
|
||||
func ValueFromIncomingContext(ctx context.Context, key string) []string {
|
||||
md, ok := ctx.Value(mdIncomingKey{}).(MD)
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
|
||||
if v, ok := md[key]; ok {
|
||||
return copyOf(v)
|
||||
}
|
||||
for k, v := range md {
|
||||
// We need to manually convert all keys to lower case, because MD is a
|
||||
// map, and there's no guarantee that the MD attached to the context is
|
||||
// created using our helper functions.
|
||||
if strings.ToLower(k) == key {
|
||||
return copyOf(v)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// the returned slice must not be modified in place
|
||||
func copyOf(v []string) []string {
|
||||
vals := make([]string, len(v))
|
||||
copy(vals, v)
|
||||
return vals
|
||||
}
|
||||
|
||||
// FromOutgoingContextRaw returns the un-merged, intermediary contents of rawMD.
|
||||
//
|
||||
// Remember to perform strings.ToLower on the keys, for both the returned MD (MD
|
||||
@ -222,15 +263,18 @@ func FromOutgoingContext(ctx context.Context) (MD, bool) {
|
||||
return nil, false
|
||||
}
|
||||
|
||||
out := MD{}
|
||||
mdSize := len(raw.md)
|
||||
for i := range raw.added {
|
||||
mdSize += len(raw.added[i]) / 2
|
||||
}
|
||||
|
||||
out := make(MD, mdSize)
|
||||
for k, v := range raw.md {
|
||||
// We need to manually convert all keys to lower case, because MD is a
|
||||
// map, and there's no guarantee that the MD attached to the context is
|
||||
// created using our helper functions.
|
||||
key := strings.ToLower(k)
|
||||
s := make([]string, len(v))
|
||||
copy(s, v)
|
||||
out[key] = s
|
||||
out[key] = copyOf(v)
|
||||
}
|
||||
for _, added := range raw.added {
|
||||
if len(added)%2 == 1 {
|
||||
|
101
src/runtime/vendor/google.golang.org/grpc/picker_wrapper.go
generated
vendored
101
src/runtime/vendor/google.golang.org/grpc/picker_wrapper.go
generated
vendored
@ -26,27 +26,38 @@ import (
|
||||
"google.golang.org/grpc/balancer"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/internal/channelz"
|
||||
istatus "google.golang.org/grpc/internal/status"
|
||||
"google.golang.org/grpc/internal/transport"
|
||||
"google.golang.org/grpc/stats"
|
||||
"google.golang.org/grpc/status"
|
||||
)
|
||||
|
||||
// pickerWrapper is a wrapper of balancer.Picker. It blocks on certain pick
|
||||
// actions and unblock when there's a picker update.
|
||||
type pickerWrapper struct {
|
||||
mu sync.Mutex
|
||||
done bool
|
||||
blockingCh chan struct{}
|
||||
picker balancer.Picker
|
||||
mu sync.Mutex
|
||||
done bool
|
||||
idle bool
|
||||
blockingCh chan struct{}
|
||||
picker balancer.Picker
|
||||
statsHandlers []stats.Handler // to record blocking picker calls
|
||||
}
|
||||
|
||||
func newPickerWrapper() *pickerWrapper {
|
||||
return &pickerWrapper{blockingCh: make(chan struct{})}
|
||||
func newPickerWrapper(statsHandlers []stats.Handler) *pickerWrapper {
|
||||
return &pickerWrapper{
|
||||
blockingCh: make(chan struct{}),
|
||||
statsHandlers: statsHandlers,
|
||||
}
|
||||
}
|
||||
|
||||
// updatePicker is called by UpdateBalancerState. It unblocks all blocked pick.
|
||||
func (pw *pickerWrapper) updatePicker(p balancer.Picker) {
|
||||
pw.mu.Lock()
|
||||
if pw.done {
|
||||
if pw.done || pw.idle {
|
||||
// There is a small window where a picker update from the LB policy can
|
||||
// race with the channel going to idle mode. If the picker is idle here,
|
||||
// it is because the channel asked it to do so, and therefore it is sage
|
||||
// to ignore the update from the LB policy.
|
||||
pw.mu.Unlock()
|
||||
return
|
||||
}
|
||||
@ -57,12 +68,16 @@ func (pw *pickerWrapper) updatePicker(p balancer.Picker) {
|
||||
pw.mu.Unlock()
|
||||
}
|
||||
|
||||
func doneChannelzWrapper(acw *acBalancerWrapper, done func(balancer.DoneInfo)) func(balancer.DoneInfo) {
|
||||
acw.mu.Lock()
|
||||
ac := acw.ac
|
||||
acw.mu.Unlock()
|
||||
// doneChannelzWrapper performs the following:
|
||||
// - increments the calls started channelz counter
|
||||
// - wraps the done function in the passed in result to increment the calls
|
||||
// failed or calls succeeded channelz counter before invoking the actual
|
||||
// done function.
|
||||
func doneChannelzWrapper(acbw *acBalancerWrapper, result *balancer.PickResult) {
|
||||
ac := acbw.ac
|
||||
ac.incrCallsStarted()
|
||||
return func(b balancer.DoneInfo) {
|
||||
done := result.Done
|
||||
result.Done = func(b balancer.DoneInfo) {
|
||||
if b.Err != nil && b.Err != io.EOF {
|
||||
ac.incrCallsFailed()
|
||||
} else {
|
||||
@ -81,15 +96,16 @@ func doneChannelzWrapper(acw *acBalancerWrapper, done func(balancer.DoneInfo)) f
|
||||
// - the current picker returns other errors and failfast is false.
|
||||
// - the subConn returned by the current picker is not READY
|
||||
// When one of these situations happens, pick blocks until the picker gets updated.
|
||||
func (pw *pickerWrapper) pick(ctx context.Context, failfast bool, info balancer.PickInfo) (transport.ClientTransport, func(balancer.DoneInfo), error) {
|
||||
func (pw *pickerWrapper) pick(ctx context.Context, failfast bool, info balancer.PickInfo) (transport.ClientTransport, balancer.PickResult, error) {
|
||||
var ch chan struct{}
|
||||
|
||||
var lastPickErr error
|
||||
|
||||
for {
|
||||
pw.mu.Lock()
|
||||
if pw.done {
|
||||
pw.mu.Unlock()
|
||||
return nil, nil, ErrClientConnClosing
|
||||
return nil, balancer.PickResult{}, ErrClientConnClosing
|
||||
}
|
||||
|
||||
if pw.picker == nil {
|
||||
@ -110,28 +126,45 @@ func (pw *pickerWrapper) pick(ctx context.Context, failfast bool, info balancer.
|
||||
}
|
||||
switch ctx.Err() {
|
||||
case context.DeadlineExceeded:
|
||||
return nil, nil, status.Error(codes.DeadlineExceeded, errStr)
|
||||
return nil, balancer.PickResult{}, status.Error(codes.DeadlineExceeded, errStr)
|
||||
case context.Canceled:
|
||||
return nil, nil, status.Error(codes.Canceled, errStr)
|
||||
return nil, balancer.PickResult{}, status.Error(codes.Canceled, errStr)
|
||||
}
|
||||
case <-ch:
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
// If the channel is set, it means that the pick call had to wait for a
|
||||
// new picker at some point. Either it's the first iteration and this
|
||||
// function received the first picker, or a picker errored with
|
||||
// ErrNoSubConnAvailable or errored with failfast set to false, which
|
||||
// will trigger a continue to the next iteration. In the first case this
|
||||
// conditional will hit if this call had to block (the channel is set).
|
||||
// In the second case, the only way it will get to this conditional is
|
||||
// if there is a new picker.
|
||||
if ch != nil {
|
||||
for _, sh := range pw.statsHandlers {
|
||||
sh.HandleRPC(ctx, &stats.PickerUpdated{})
|
||||
}
|
||||
}
|
||||
|
||||
ch = pw.blockingCh
|
||||
p := pw.picker
|
||||
pw.mu.Unlock()
|
||||
|
||||
pickResult, err := p.Pick(info)
|
||||
|
||||
if err != nil {
|
||||
if err == balancer.ErrNoSubConnAvailable {
|
||||
continue
|
||||
}
|
||||
if _, ok := status.FromError(err); ok {
|
||||
if st, ok := status.FromError(err); ok {
|
||||
// Status error: end the RPC unconditionally with this status.
|
||||
return nil, nil, dropError{error: err}
|
||||
// First restrict the code to the list allowed by gRFC A54.
|
||||
if istatus.IsRestrictedControlPlaneCode(st) {
|
||||
err = status.Errorf(codes.Internal, "received picker error with illegal status: %v", err)
|
||||
}
|
||||
return nil, balancer.PickResult{}, dropError{error: err}
|
||||
}
|
||||
// For all other errors, wait for ready RPCs should block and other
|
||||
// RPCs should fail with unavailable.
|
||||
@ -139,19 +172,20 @@ func (pw *pickerWrapper) pick(ctx context.Context, failfast bool, info balancer.
|
||||
lastPickErr = err
|
||||
continue
|
||||
}
|
||||
return nil, nil, status.Error(codes.Unavailable, err.Error())
|
||||
return nil, balancer.PickResult{}, status.Error(codes.Unavailable, err.Error())
|
||||
}
|
||||
|
||||
acw, ok := pickResult.SubConn.(*acBalancerWrapper)
|
||||
acbw, ok := pickResult.SubConn.(*acBalancerWrapper)
|
||||
if !ok {
|
||||
logger.Errorf("subconn returned from pick is type %T, not *acBalancerWrapper", pickResult.SubConn)
|
||||
continue
|
||||
}
|
||||
if t := acw.getAddrConn().getReadyTransport(); t != nil {
|
||||
if t := acbw.ac.getReadyTransport(); t != nil {
|
||||
if channelz.IsOn() {
|
||||
return t, doneChannelzWrapper(acw, pickResult.Done), nil
|
||||
doneChannelzWrapper(acbw, &pickResult)
|
||||
return t, pickResult, nil
|
||||
}
|
||||
return t, pickResult.Done, nil
|
||||
return t, pickResult, nil
|
||||
}
|
||||
if pickResult.Done != nil {
|
||||
// Calling done with nil error, no bytes sent and no bytes received.
|
||||
@ -176,6 +210,25 @@ func (pw *pickerWrapper) close() {
|
||||
close(pw.blockingCh)
|
||||
}
|
||||
|
||||
func (pw *pickerWrapper) enterIdleMode() {
|
||||
pw.mu.Lock()
|
||||
defer pw.mu.Unlock()
|
||||
if pw.done {
|
||||
return
|
||||
}
|
||||
pw.idle = true
|
||||
}
|
||||
|
||||
func (pw *pickerWrapper) exitIdleMode() {
|
||||
pw.mu.Lock()
|
||||
defer pw.mu.Unlock()
|
||||
if pw.done {
|
||||
return
|
||||
}
|
||||
pw.blockingCh = make(chan struct{})
|
||||
pw.idle = false
|
||||
}
|
||||
|
||||
// dropError is a wrapper error that indicates the LB policy wishes to drop the
|
||||
// RPC and not retry it.
|
||||
type dropError struct {
|
||||
|
120
src/runtime/vendor/google.golang.org/grpc/pickfirst.go
generated
vendored
120
src/runtime/vendor/google.golang.org/grpc/pickfirst.go
generated
vendored
@ -19,15 +19,25 @@
|
||||
package grpc
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
"google.golang.org/grpc/balancer"
|
||||
"google.golang.org/grpc/connectivity"
|
||||
"google.golang.org/grpc/internal/envconfig"
|
||||
internalgrpclog "google.golang.org/grpc/internal/grpclog"
|
||||
"google.golang.org/grpc/internal/grpcrand"
|
||||
"google.golang.org/grpc/internal/pretty"
|
||||
"google.golang.org/grpc/resolver"
|
||||
"google.golang.org/grpc/serviceconfig"
|
||||
)
|
||||
|
||||
// PickFirstBalancerName is the name of the pick_first balancer.
|
||||
const PickFirstBalancerName = "pick_first"
|
||||
const (
|
||||
// PickFirstBalancerName is the name of the pick_first balancer.
|
||||
PickFirstBalancerName = "pick_first"
|
||||
logPrefix = "[pick-first-lb %p] "
|
||||
)
|
||||
|
||||
func newPickfirstBuilder() balancer.Builder {
|
||||
return &pickfirstBuilder{}
|
||||
@ -36,22 +46,55 @@ func newPickfirstBuilder() balancer.Builder {
|
||||
type pickfirstBuilder struct{}
|
||||
|
||||
func (*pickfirstBuilder) Build(cc balancer.ClientConn, opt balancer.BuildOptions) balancer.Balancer {
|
||||
return &pickfirstBalancer{cc: cc}
|
||||
b := &pickfirstBalancer{cc: cc}
|
||||
b.logger = internalgrpclog.NewPrefixLogger(logger, fmt.Sprintf(logPrefix, b))
|
||||
return b
|
||||
}
|
||||
|
||||
func (*pickfirstBuilder) Name() string {
|
||||
return PickFirstBalancerName
|
||||
}
|
||||
|
||||
type pfConfig struct {
|
||||
serviceconfig.LoadBalancingConfig `json:"-"`
|
||||
|
||||
// If set to true, instructs the LB policy to shuffle the order of the list
|
||||
// of addresses received from the name resolver before attempting to
|
||||
// connect to them.
|
||||
ShuffleAddressList bool `json:"shuffleAddressList"`
|
||||
}
|
||||
|
||||
func (*pickfirstBuilder) ParseConfig(js json.RawMessage) (serviceconfig.LoadBalancingConfig, error) {
|
||||
if !envconfig.PickFirstLBConfig {
|
||||
// Prior to supporting loadbalancing configuration, the pick_first LB
|
||||
// policy did not implement the balancer.ConfigParser interface. This
|
||||
// meant that if a non-empty configuration was passed to it, the service
|
||||
// config unmarshaling code would throw a warning log, but would
|
||||
// continue using the pick_first LB policy. The code below ensures the
|
||||
// same behavior is retained if the env var is not set.
|
||||
if string(js) != "{}" {
|
||||
logger.Warningf("Ignoring non-empty balancer configuration %q for the pick_first LB policy", string(js))
|
||||
}
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
var cfg pfConfig
|
||||
if err := json.Unmarshal(js, &cfg); err != nil {
|
||||
return nil, fmt.Errorf("pickfirst: unable to unmarshal LB policy config: %s, error: %v", string(js), err)
|
||||
}
|
||||
return cfg, nil
|
||||
}
|
||||
|
||||
type pickfirstBalancer struct {
|
||||
logger *internalgrpclog.PrefixLogger
|
||||
state connectivity.State
|
||||
cc balancer.ClientConn
|
||||
subConn balancer.SubConn
|
||||
}
|
||||
|
||||
func (b *pickfirstBalancer) ResolverError(err error) {
|
||||
if logger.V(2) {
|
||||
logger.Infof("pickfirstBalancer: ResolverError called with error %v", err)
|
||||
if b.logger.V(2) {
|
||||
b.logger.Infof("Received error from the name resolver: %v", err)
|
||||
}
|
||||
if b.subConn == nil {
|
||||
b.state = connectivity.TransientFailure
|
||||
@ -69,28 +112,49 @@ func (b *pickfirstBalancer) ResolverError(err error) {
|
||||
}
|
||||
|
||||
func (b *pickfirstBalancer) UpdateClientConnState(state balancer.ClientConnState) error {
|
||||
if len(state.ResolverState.Addresses) == 0 {
|
||||
addrs := state.ResolverState.Addresses
|
||||
if len(addrs) == 0 {
|
||||
// The resolver reported an empty address list. Treat it like an error by
|
||||
// calling b.ResolverError.
|
||||
if b.subConn != nil {
|
||||
// Remove the old subConn. All addresses were removed, so it is no longer
|
||||
// valid.
|
||||
b.cc.RemoveSubConn(b.subConn)
|
||||
// Shut down the old subConn. All addresses were removed, so it is
|
||||
// no longer valid.
|
||||
b.subConn.Shutdown()
|
||||
b.subConn = nil
|
||||
}
|
||||
b.ResolverError(errors.New("produced zero addresses"))
|
||||
return balancer.ErrBadResolverState
|
||||
}
|
||||
|
||||
// We don't have to guard this block with the env var because ParseConfig
|
||||
// already does so.
|
||||
cfg, ok := state.BalancerConfig.(pfConfig)
|
||||
if state.BalancerConfig != nil && !ok {
|
||||
return fmt.Errorf("pickfirst: received illegal BalancerConfig (type %T): %v", state.BalancerConfig, state.BalancerConfig)
|
||||
}
|
||||
if cfg.ShuffleAddressList {
|
||||
addrs = append([]resolver.Address{}, addrs...)
|
||||
grpcrand.Shuffle(len(addrs), func(i, j int) { addrs[i], addrs[j] = addrs[j], addrs[i] })
|
||||
}
|
||||
|
||||
if b.logger.V(2) {
|
||||
b.logger.Infof("Received new config %s, resolver state %s", pretty.ToJSON(cfg), pretty.ToJSON(state.ResolverState))
|
||||
}
|
||||
|
||||
if b.subConn != nil {
|
||||
b.cc.UpdateAddresses(b.subConn, state.ResolverState.Addresses)
|
||||
b.cc.UpdateAddresses(b.subConn, addrs)
|
||||
return nil
|
||||
}
|
||||
|
||||
subConn, err := b.cc.NewSubConn(state.ResolverState.Addresses, balancer.NewSubConnOptions{})
|
||||
var subConn balancer.SubConn
|
||||
subConn, err := b.cc.NewSubConn(addrs, balancer.NewSubConnOptions{
|
||||
StateListener: func(state balancer.SubConnState) {
|
||||
b.updateSubConnState(subConn, state)
|
||||
},
|
||||
})
|
||||
if err != nil {
|
||||
if logger.V(2) {
|
||||
logger.Errorf("pickfirstBalancer: failed to NewSubConn: %v", err)
|
||||
if b.logger.V(2) {
|
||||
b.logger.Infof("Failed to create new SubConn: %v", err)
|
||||
}
|
||||
b.state = connectivity.TransientFailure
|
||||
b.cc.UpdateState(balancer.State{
|
||||
@ -102,24 +166,29 @@ func (b *pickfirstBalancer) UpdateClientConnState(state balancer.ClientConnState
|
||||
b.subConn = subConn
|
||||
b.state = connectivity.Idle
|
||||
b.cc.UpdateState(balancer.State{
|
||||
ConnectivityState: connectivity.Idle,
|
||||
Picker: &picker{result: balancer.PickResult{SubConn: b.subConn}},
|
||||
ConnectivityState: connectivity.Connecting,
|
||||
Picker: &picker{err: balancer.ErrNoSubConnAvailable},
|
||||
})
|
||||
b.subConn.Connect()
|
||||
return nil
|
||||
}
|
||||
|
||||
// UpdateSubConnState is unused as a StateListener is always registered when
|
||||
// creating SubConns.
|
||||
func (b *pickfirstBalancer) UpdateSubConnState(subConn balancer.SubConn, state balancer.SubConnState) {
|
||||
if logger.V(2) {
|
||||
logger.Infof("pickfirstBalancer: UpdateSubConnState: %p, %v", subConn, state)
|
||||
b.logger.Errorf("UpdateSubConnState(%v, %+v) called unexpectedly", subConn, state)
|
||||
}
|
||||
|
||||
func (b *pickfirstBalancer) updateSubConnState(subConn balancer.SubConn, state balancer.SubConnState) {
|
||||
if b.logger.V(2) {
|
||||
b.logger.Infof("Received SubConn state update: %p, %+v", subConn, state)
|
||||
}
|
||||
if b.subConn != subConn {
|
||||
if logger.V(2) {
|
||||
logger.Infof("pickfirstBalancer: ignored state change because subConn is not recognized")
|
||||
if b.logger.V(2) {
|
||||
b.logger.Infof("Ignored state change because subConn is not recognized")
|
||||
}
|
||||
return
|
||||
}
|
||||
b.state = state.ConnectivityState
|
||||
if state.ConnectivityState == connectivity.Shutdown {
|
||||
b.subConn = nil
|
||||
return
|
||||
@ -132,11 +201,21 @@ func (b *pickfirstBalancer) UpdateSubConnState(subConn balancer.SubConn, state b
|
||||
Picker: &picker{result: balancer.PickResult{SubConn: subConn}},
|
||||
})
|
||||
case connectivity.Connecting:
|
||||
if b.state == connectivity.TransientFailure {
|
||||
// We stay in TransientFailure until we are Ready. See A62.
|
||||
return
|
||||
}
|
||||
b.cc.UpdateState(balancer.State{
|
||||
ConnectivityState: state.ConnectivityState,
|
||||
Picker: &picker{err: balancer.ErrNoSubConnAvailable},
|
||||
})
|
||||
case connectivity.Idle:
|
||||
if b.state == connectivity.TransientFailure {
|
||||
// We stay in TransientFailure until we are Ready. Also kick the
|
||||
// subConn out of Idle into Connecting. See A62.
|
||||
b.subConn.Connect()
|
||||
return
|
||||
}
|
||||
b.cc.UpdateState(balancer.State{
|
||||
ConnectivityState: state.ConnectivityState,
|
||||
Picker: &idlePicker{subConn: subConn},
|
||||
@ -147,6 +226,7 @@ func (b *pickfirstBalancer) UpdateSubConnState(subConn balancer.SubConn, state b
|
||||
Picker: &picker{err: state.ConnectionError},
|
||||
})
|
||||
}
|
||||
b.state = state.ConnectivityState
|
||||
}
|
||||
|
||||
func (b *pickfirstBalancer) Close() {
|
||||
|
4
src/runtime/vendor/google.golang.org/grpc/preloader.go
generated
vendored
4
src/runtime/vendor/google.golang.org/grpc/preloader.go
generated
vendored
@ -25,7 +25,7 @@ import (
|
||||
|
||||
// PreparedMsg is responsible for creating a Marshalled and Compressed object.
|
||||
//
|
||||
// Experimental
|
||||
// # Experimental
|
||||
//
|
||||
// Notice: This type is EXPERIMENTAL and may be changed or removed in a
|
||||
// later release.
|
||||
@ -37,7 +37,7 @@ type PreparedMsg struct {
|
||||
}
|
||||
|
||||
// Encode marshalls and compresses the message using the codec and compressor for the stream.
|
||||
func (p *PreparedMsg) Encode(s Stream, msg interface{}) error {
|
||||
func (p *PreparedMsg) Encode(s Stream, msg any) error {
|
||||
ctx := s.Context()
|
||||
rpcInfo, ok := rpcInfoFromContext(ctx)
|
||||
if !ok {
|
||||
|
14
src/runtime/vendor/google.golang.org/grpc/regenerate.sh
generated
vendored
14
src/runtime/vendor/google.golang.org/grpc/regenerate.sh
generated
vendored
@ -57,7 +57,8 @@ LEGACY_SOURCES=(
|
||||
${WORKDIR}/grpc-proto/grpc/health/v1/health.proto
|
||||
${WORKDIR}/grpc-proto/grpc/lb/v1/load_balancer.proto
|
||||
profiling/proto/service.proto
|
||||
reflection/grpc_reflection_v1alpha/reflection.proto
|
||||
${WORKDIR}/grpc-proto/grpc/reflection/v1alpha/reflection.proto
|
||||
${WORKDIR}/grpc-proto/grpc/reflection/v1/reflection.proto
|
||||
)
|
||||
|
||||
# Generates only the new gRPC Service symbols
|
||||
@ -68,7 +69,6 @@ SOURCES=(
|
||||
${WORKDIR}/grpc-proto/grpc/gcp/transport_security_common.proto
|
||||
${WORKDIR}/grpc-proto/grpc/lookup/v1/rls.proto
|
||||
${WORKDIR}/grpc-proto/grpc/lookup/v1/rls_config.proto
|
||||
${WORKDIR}/grpc-proto/grpc/service_config/service_config.proto
|
||||
${WORKDIR}/grpc-proto/grpc/testing/*.proto
|
||||
${WORKDIR}/grpc-proto/grpc/core/*.proto
|
||||
)
|
||||
@ -80,8 +80,7 @@ SOURCES=(
|
||||
# Note that the protos listed here are all for testing purposes. All protos to
|
||||
# be used externally should have a go_package option (and they don't need to be
|
||||
# listed here).
|
||||
OPTS=Mgrpc/service_config/service_config.proto=/internal/proto/grpc_service_config,\
|
||||
Mgrpc/core/stats.proto=google.golang.org/grpc/interop/grpc_testing/core,\
|
||||
OPTS=Mgrpc/core/stats.proto=google.golang.org/grpc/interop/grpc_testing/core,\
|
||||
Mgrpc/testing/benchmark_service.proto=google.golang.org/grpc/interop/grpc_testing,\
|
||||
Mgrpc/testing/stats.proto=google.golang.org/grpc/interop/grpc_testing,\
|
||||
Mgrpc/testing/report_qps_scenario_service.proto=google.golang.org/grpc/interop/grpc_testing,\
|
||||
@ -121,11 +120,4 @@ mv ${WORKDIR}/out/google.golang.org/grpc/lookup/grpc_lookup_v1/* ${WORKDIR}/out/
|
||||
# see grpc_testing_not_regenerate/README.md for details.
|
||||
rm ${WORKDIR}/out/google.golang.org/grpc/reflection/grpc_testing_not_regenerate/*.pb.go
|
||||
|
||||
# grpc/service_config/service_config.proto does not have a go_package option.
|
||||
mv ${WORKDIR}/out/grpc/service_config/service_config.pb.go internal/proto/grpc_service_config
|
||||
|
||||
# grpc/testing does not have a go_package option.
|
||||
mv ${WORKDIR}/out/grpc/testing/*.pb.go interop/grpc_testing/
|
||||
mv ${WORKDIR}/out/grpc/core/*.pb.go interop/grpc_testing/core/
|
||||
|
||||
cp -R ${WORKDIR}/out/google.golang.org/grpc/* .
|
||||
|
61
src/runtime/vendor/google.golang.org/grpc/resolver/map.go
generated
vendored
61
src/runtime/vendor/google.golang.org/grpc/resolver/map.go
generated
vendored
@ -20,7 +20,7 @@ package resolver
|
||||
|
||||
type addressMapEntry struct {
|
||||
addr Address
|
||||
value interface{}
|
||||
value any
|
||||
}
|
||||
|
||||
// AddressMap is a map of addresses to arbitrary values taking into account
|
||||
@ -28,25 +28,40 @@ type addressMapEntry struct {
|
||||
// Multiple accesses may not be performed concurrently. Must be created via
|
||||
// NewAddressMap; do not construct directly.
|
||||
type AddressMap struct {
|
||||
m map[string]addressMapEntryList
|
||||
// The underlying map is keyed by an Address with fields that we don't care
|
||||
// about being set to their zero values. The only fields that we care about
|
||||
// are `Addr`, `ServerName` and `Attributes`. Since we need to be able to
|
||||
// distinguish between addresses with same `Addr` and `ServerName`, but
|
||||
// different `Attributes`, we cannot store the `Attributes` in the map key.
|
||||
//
|
||||
// The comparison operation for structs work as follows:
|
||||
// Struct values are comparable if all their fields are comparable. Two
|
||||
// struct values are equal if their corresponding non-blank fields are equal.
|
||||
//
|
||||
// The value type of the map contains a slice of addresses which match the key
|
||||
// in their `Addr` and `ServerName` fields and contain the corresponding value
|
||||
// associated with them.
|
||||
m map[Address]addressMapEntryList
|
||||
}
|
||||
|
||||
func toMapKey(addr *Address) Address {
|
||||
return Address{Addr: addr.Addr, ServerName: addr.ServerName}
|
||||
}
|
||||
|
||||
type addressMapEntryList []*addressMapEntry
|
||||
|
||||
// NewAddressMap creates a new AddressMap.
|
||||
func NewAddressMap() *AddressMap {
|
||||
return &AddressMap{m: make(map[string]addressMapEntryList)}
|
||||
return &AddressMap{m: make(map[Address]addressMapEntryList)}
|
||||
}
|
||||
|
||||
// find returns the index of addr in the addressMapEntry slice, or -1 if not
|
||||
// present.
|
||||
func (l addressMapEntryList) find(addr Address) int {
|
||||
if len(l) == 0 {
|
||||
return -1
|
||||
}
|
||||
for i, entry := range l {
|
||||
if entry.addr.ServerName == addr.ServerName &&
|
||||
entry.addr.Attributes.Equal(addr.Attributes) {
|
||||
// Attributes are the only thing to match on here, since `Addr` and
|
||||
// `ServerName` are already equal.
|
||||
if entry.addr.Attributes.Equal(addr.Attributes) {
|
||||
return i
|
||||
}
|
||||
}
|
||||
@ -54,8 +69,9 @@ func (l addressMapEntryList) find(addr Address) int {
|
||||
}
|
||||
|
||||
// Get returns the value for the address in the map, if present.
|
||||
func (a *AddressMap) Get(addr Address) (value interface{}, ok bool) {
|
||||
entryList := a.m[addr.Addr]
|
||||
func (a *AddressMap) Get(addr Address) (value any, ok bool) {
|
||||
addrKey := toMapKey(&addr)
|
||||
entryList := a.m[addrKey]
|
||||
if entry := entryList.find(addr); entry != -1 {
|
||||
return entryList[entry].value, true
|
||||
}
|
||||
@ -63,18 +79,20 @@ func (a *AddressMap) Get(addr Address) (value interface{}, ok bool) {
|
||||
}
|
||||
|
||||
// Set updates or adds the value to the address in the map.
|
||||
func (a *AddressMap) Set(addr Address, value interface{}) {
|
||||
entryList := a.m[addr.Addr]
|
||||
func (a *AddressMap) Set(addr Address, value any) {
|
||||
addrKey := toMapKey(&addr)
|
||||
entryList := a.m[addrKey]
|
||||
if entry := entryList.find(addr); entry != -1 {
|
||||
a.m[addr.Addr][entry].value = value
|
||||
entryList[entry].value = value
|
||||
return
|
||||
}
|
||||
a.m[addr.Addr] = append(a.m[addr.Addr], &addressMapEntry{addr: addr, value: value})
|
||||
a.m[addrKey] = append(entryList, &addressMapEntry{addr: addr, value: value})
|
||||
}
|
||||
|
||||
// Delete removes addr from the map.
|
||||
func (a *AddressMap) Delete(addr Address) {
|
||||
entryList := a.m[addr.Addr]
|
||||
addrKey := toMapKey(&addr)
|
||||
entryList := a.m[addrKey]
|
||||
entry := entryList.find(addr)
|
||||
if entry == -1 {
|
||||
return
|
||||
@ -85,7 +103,7 @@ func (a *AddressMap) Delete(addr Address) {
|
||||
copy(entryList[entry:], entryList[entry+1:])
|
||||
entryList = entryList[:len(entryList)-1]
|
||||
}
|
||||
a.m[addr.Addr] = entryList
|
||||
a.m[addrKey] = entryList
|
||||
}
|
||||
|
||||
// Len returns the number of entries in the map.
|
||||
@ -107,3 +125,14 @@ func (a *AddressMap) Keys() []Address {
|
||||
}
|
||||
return ret
|
||||
}
|
||||
|
||||
// Values returns a slice of all current map values.
|
||||
func (a *AddressMap) Values() []any {
|
||||
ret := make([]any, 0, a.Len())
|
||||
for _, entryList := range a.m {
|
||||
for _, entry := range entryList {
|
||||
ret = append(ret, entry.value)
|
||||
}
|
||||
}
|
||||
return ret
|
||||
}
|
||||
|
144
src/runtime/vendor/google.golang.org/grpc/resolver/resolver.go
generated
vendored
144
src/runtime/vendor/google.golang.org/grpc/resolver/resolver.go
generated
vendored
@ -22,12 +22,13 @@ package resolver
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net"
|
||||
"net/url"
|
||||
"strings"
|
||||
|
||||
"google.golang.org/grpc/attributes"
|
||||
"google.golang.org/grpc/credentials"
|
||||
"google.golang.org/grpc/internal/pretty"
|
||||
"google.golang.org/grpc/serviceconfig"
|
||||
)
|
||||
|
||||
@ -40,8 +41,9 @@ var (
|
||||
|
||||
// TODO(bar) install dns resolver in init(){}.
|
||||
|
||||
// Register registers the resolver builder to the resolver map. b.Scheme will be
|
||||
// used as the scheme registered with this builder.
|
||||
// Register registers the resolver builder to the resolver map. b.Scheme will
|
||||
// be used as the scheme registered with this builder. The registry is case
|
||||
// sensitive, and schemes should not contain any uppercase characters.
|
||||
//
|
||||
// NOTE: this function must only be called during initialization time (i.e. in
|
||||
// an init() function), and is not thread-safe. If multiple Resolvers are
|
||||
@ -75,28 +77,9 @@ func GetDefaultScheme() string {
|
||||
return defaultScheme
|
||||
}
|
||||
|
||||
// AddressType indicates the address type returned by name resolution.
|
||||
//
|
||||
// Deprecated: use Attributes in Address instead.
|
||||
type AddressType uint8
|
||||
|
||||
const (
|
||||
// Backend indicates the address is for a backend server.
|
||||
//
|
||||
// Deprecated: use Attributes in Address instead.
|
||||
Backend AddressType = iota
|
||||
// GRPCLB indicates the address is for a grpclb load balancer.
|
||||
//
|
||||
// Deprecated: to select the GRPCLB load balancing policy, use a service
|
||||
// config with a corresponding loadBalancingConfig. To supply balancer
|
||||
// addresses to the GRPCLB load balancing policy, set State.Attributes
|
||||
// using balancer/grpclb/state.Set.
|
||||
GRPCLB
|
||||
)
|
||||
|
||||
// Address represents a server the client connects to.
|
||||
//
|
||||
// Experimental
|
||||
// # Experimental
|
||||
//
|
||||
// Notice: This type is EXPERIMENTAL and may be changed or removed in a
|
||||
// later release.
|
||||
@ -109,9 +92,6 @@ type Address struct {
|
||||
// the address, instead of the hostname from the Dial target string. In most cases,
|
||||
// this should not be set.
|
||||
//
|
||||
// If Type is GRPCLB, ServerName should be the name of the remote load
|
||||
// balancer, not the name of the backend.
|
||||
//
|
||||
// WARNING: ServerName must only be populated with trusted values. It
|
||||
// is insecure to populate it with data from untrusted inputs since untrusted
|
||||
// values could be used to bypass the authority checks performed by TLS.
|
||||
@ -122,34 +102,46 @@ type Address struct {
|
||||
Attributes *attributes.Attributes
|
||||
|
||||
// BalancerAttributes contains arbitrary data about this address intended
|
||||
// for consumption by the LB policy. These attribes do not affect SubConn
|
||||
// for consumption by the LB policy. These attributes do not affect SubConn
|
||||
// creation, connection establishment, handshaking, etc.
|
||||
BalancerAttributes *attributes.Attributes
|
||||
|
||||
// Type is the type of this address.
|
||||
//
|
||||
// Deprecated: use Attributes instead.
|
||||
Type AddressType
|
||||
// Deprecated: when an Address is inside an Endpoint, this field should not
|
||||
// be used, and it will eventually be removed entirely.
|
||||
BalancerAttributes *attributes.Attributes
|
||||
|
||||
// Metadata is the information associated with Addr, which may be used
|
||||
// to make load balancing decision.
|
||||
//
|
||||
// Deprecated: use Attributes instead.
|
||||
Metadata interface{}
|
||||
Metadata any
|
||||
}
|
||||
|
||||
// Equal returns whether a and o are identical. Metadata is compared directly,
|
||||
// not with any recursive introspection.
|
||||
//
|
||||
// This method compares all fields of the address. When used to tell apart
|
||||
// addresses during subchannel creation or connection establishment, it might be
|
||||
// more appropriate for the caller to implement custom equality logic.
|
||||
func (a Address) Equal(o Address) bool {
|
||||
return a.Addr == o.Addr && a.ServerName == o.ServerName &&
|
||||
a.Attributes.Equal(o.Attributes) &&
|
||||
a.BalancerAttributes.Equal(o.BalancerAttributes) &&
|
||||
a.Type == o.Type && a.Metadata == o.Metadata
|
||||
a.Metadata == o.Metadata
|
||||
}
|
||||
|
||||
// String returns JSON formatted string representation of the address.
|
||||
func (a Address) String() string {
|
||||
return pretty.ToJSON(a)
|
||||
var sb strings.Builder
|
||||
sb.WriteString(fmt.Sprintf("{Addr: %q, ", a.Addr))
|
||||
sb.WriteString(fmt.Sprintf("ServerName: %q, ", a.ServerName))
|
||||
if a.Attributes != nil {
|
||||
sb.WriteString(fmt.Sprintf("Attributes: %v, ", a.Attributes.String()))
|
||||
}
|
||||
if a.BalancerAttributes != nil {
|
||||
sb.WriteString(fmt.Sprintf("BalancerAttributes: %v", a.BalancerAttributes.String()))
|
||||
}
|
||||
sb.WriteString("}")
|
||||
return sb.String()
|
||||
}
|
||||
|
||||
// BuildOptions includes additional information for the builder to create
|
||||
@ -178,11 +170,37 @@ type BuildOptions struct {
|
||||
Dialer func(context.Context, string) (net.Conn, error)
|
||||
}
|
||||
|
||||
// An Endpoint is one network endpoint, or server, which may have multiple
|
||||
// addresses with which it can be accessed.
|
||||
type Endpoint struct {
|
||||
// Addresses contains a list of addresses used to access this endpoint.
|
||||
Addresses []Address
|
||||
|
||||
// Attributes contains arbitrary data about this endpoint intended for
|
||||
// consumption by the LB policy.
|
||||
Attributes *attributes.Attributes
|
||||
}
|
||||
|
||||
// State contains the current Resolver state relevant to the ClientConn.
|
||||
type State struct {
|
||||
// Addresses is the latest set of resolved addresses for the target.
|
||||
//
|
||||
// If a resolver sets Addresses but does not set Endpoints, one Endpoint
|
||||
// will be created for each Address before the State is passed to the LB
|
||||
// policy. The BalancerAttributes of each entry in Addresses will be set
|
||||
// in Endpoints.Attributes, and be cleared in the Endpoint's Address's
|
||||
// BalancerAttributes.
|
||||
//
|
||||
// Soon, Addresses will be deprecated and replaced fully by Endpoints.
|
||||
Addresses []Address
|
||||
|
||||
// Endpoints is the latest set of resolved endpoints for the target.
|
||||
//
|
||||
// If a resolver produces a State containing Endpoints but not Addresses,
|
||||
// it must take care to ensure the LB policies it selects will support
|
||||
// Endpoints.
|
||||
Endpoints []Endpoint
|
||||
|
||||
// ServiceConfig contains the result from parsing the latest service
|
||||
// config. If it is nil, it indicates no service config is present or the
|
||||
// resolver does not provide service configs.
|
||||
@ -202,6 +220,15 @@ type State struct {
|
||||
// gRPC to add new methods to this interface.
|
||||
type ClientConn interface {
|
||||
// UpdateState updates the state of the ClientConn appropriately.
|
||||
//
|
||||
// If an error is returned, the resolver should try to resolve the
|
||||
// target again. The resolver should use a backoff timer to prevent
|
||||
// overloading the server with requests. If a resolver is certain that
|
||||
// reresolving will not change the result, e.g. because it is
|
||||
// a watch-based resolver, returned errors can be ignored.
|
||||
//
|
||||
// If the resolved State is the same as the last reported one, calling
|
||||
// UpdateState can be omitted.
|
||||
UpdateState(State) error
|
||||
// ReportError notifies the ClientConn that the Resolver encountered an
|
||||
// error. The ClientConn will notify the load balancer and begin calling
|
||||
@ -233,23 +260,7 @@ type ClientConn interface {
|
||||
// target does not contain a scheme or if the parsed scheme is not registered
|
||||
// (i.e. no corresponding resolver available to resolve the endpoint), we will
|
||||
// apply the default scheme, and will attempt to reparse it.
|
||||
//
|
||||
// Examples:
|
||||
//
|
||||
// - "dns://some_authority/foo.bar"
|
||||
// Target{Scheme: "dns", Authority: "some_authority", Endpoint: "foo.bar"}
|
||||
// - "foo.bar"
|
||||
// Target{Scheme: resolver.GetDefaultScheme(), Endpoint: "foo.bar"}
|
||||
// - "unknown_scheme://authority/endpoint"
|
||||
// Target{Scheme: resolver.GetDefaultScheme(), Endpoint: "unknown_scheme://authority/endpoint"}
|
||||
type Target struct {
|
||||
// Deprecated: use URL.Scheme instead.
|
||||
Scheme string
|
||||
// Deprecated: use URL.Host instead.
|
||||
Authority string
|
||||
// Deprecated: use URL.Path or URL.Opaque instead. The latter is set when
|
||||
// the former is empty.
|
||||
Endpoint string
|
||||
// URL contains the parsed dial target with an optional default scheme added
|
||||
// to it if the original dial target contained no scheme or contained an
|
||||
// unregistered scheme. Any query params specified in the original dial
|
||||
@ -257,6 +268,24 @@ type Target struct {
|
||||
URL url.URL
|
||||
}
|
||||
|
||||
// Endpoint retrieves endpoint without leading "/" from either `URL.Path`
|
||||
// or `URL.Opaque`. The latter is used when the former is empty.
|
||||
func (t Target) Endpoint() string {
|
||||
endpoint := t.URL.Path
|
||||
if endpoint == "" {
|
||||
endpoint = t.URL.Opaque
|
||||
}
|
||||
// For targets of the form "[scheme]://[authority]/endpoint, the endpoint
|
||||
// value returned from url.Parse() contains a leading "/". Although this is
|
||||
// in accordance with RFC 3986, we do not want to break existing resolver
|
||||
// implementations which expect the endpoint without the leading "/". So, we
|
||||
// end up stripping the leading "/" here. But this will result in an
|
||||
// incorrect parsing for something like "unix:///path/to/socket". Since we
|
||||
// own the "unix" resolver, we can workaround in the unix resolver by using
|
||||
// the `URL` field.
|
||||
return strings.TrimPrefix(endpoint, "/")
|
||||
}
|
||||
|
||||
// Builder creates a resolver that will be used to watch name resolution updates.
|
||||
type Builder interface {
|
||||
// Build creates a new resolver for the given target.
|
||||
@ -264,8 +293,10 @@ type Builder interface {
|
||||
// gRPC dial calls Build synchronously, and fails if the returned error is
|
||||
// not nil.
|
||||
Build(target Target, cc ClientConn, opts BuildOptions) (Resolver, error)
|
||||
// Scheme returns the scheme supported by this resolver.
|
||||
// Scheme is defined at https://github.com/grpc/grpc/blob/master/doc/naming.md.
|
||||
// Scheme returns the scheme supported by this resolver. Scheme is defined
|
||||
// at https://github.com/grpc/grpc/blob/master/doc/naming.md. The returned
|
||||
// string should not contain uppercase characters, as they will not match
|
||||
// the parsed target's scheme as defined in RFC 3986.
|
||||
Scheme() string
|
||||
}
|
||||
|
||||
@ -283,10 +314,3 @@ type Resolver interface {
|
||||
// Close closes the resolver.
|
||||
Close()
|
||||
}
|
||||
|
||||
// UnregisterForTesting removes the resolver builder with the given scheme from the
|
||||
// resolver map.
|
||||
// This function is for testing only.
|
||||
func UnregisterForTesting(scheme string) {
|
||||
delete(m, scheme)
|
||||
}
|
||||
|
233
src/runtime/vendor/google.golang.org/grpc/resolver_conn_wrapper.go
generated
vendored
233
src/runtime/vendor/google.golang.org/grpc/resolver_conn_wrapper.go
generated
vendored
@ -19,11 +19,11 @@
|
||||
package grpc
|
||||
|
||||
import (
|
||||
"context"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"google.golang.org/grpc/balancer"
|
||||
"google.golang.org/grpc/credentials"
|
||||
"google.golang.org/grpc/internal/channelz"
|
||||
"google.golang.org/grpc/internal/grpcsync"
|
||||
"google.golang.org/grpc/internal/pretty"
|
||||
@ -31,129 +31,200 @@ import (
|
||||
"google.golang.org/grpc/serviceconfig"
|
||||
)
|
||||
|
||||
// resolverStateUpdater wraps the single method used by ccResolverWrapper to
|
||||
// report a state update from the actual resolver implementation.
|
||||
type resolverStateUpdater interface {
|
||||
updateResolverState(s resolver.State, err error) error
|
||||
}
|
||||
|
||||
// ccResolverWrapper is a wrapper on top of cc for resolvers.
|
||||
// It implements resolver.ClientConn interface.
|
||||
type ccResolverWrapper struct {
|
||||
cc *ClientConn
|
||||
resolverMu sync.Mutex
|
||||
resolver resolver.Resolver
|
||||
done *grpcsync.Event
|
||||
curState resolver.State
|
||||
// The following fields are initialized when the wrapper is created and are
|
||||
// read-only afterwards, and therefore can be accessed without a mutex.
|
||||
cc resolverStateUpdater
|
||||
channelzID *channelz.Identifier
|
||||
ignoreServiceConfig bool
|
||||
opts ccResolverWrapperOpts
|
||||
serializer *grpcsync.CallbackSerializer // To serialize all incoming calls.
|
||||
serializerCancel context.CancelFunc // To close the serializer, accessed only from close().
|
||||
|
||||
incomingMu sync.Mutex // Synchronizes all the incoming calls.
|
||||
// All incoming (resolver --> gRPC) calls are guaranteed to execute in a
|
||||
// mutually exclusive manner as they are scheduled on the serializer.
|
||||
// Fields accessed *only* in these serializer callbacks, can therefore be
|
||||
// accessed without a mutex.
|
||||
curState resolver.State
|
||||
|
||||
// mu guards access to the below fields.
|
||||
mu sync.Mutex
|
||||
closed bool
|
||||
resolver resolver.Resolver // Accessed only from outgoing calls.
|
||||
}
|
||||
|
||||
// ccResolverWrapperOpts wraps the arguments to be passed when creating a new
|
||||
// ccResolverWrapper.
|
||||
type ccResolverWrapperOpts struct {
|
||||
target resolver.Target // User specified dial target to resolve.
|
||||
builder resolver.Builder // Resolver builder to use.
|
||||
bOpts resolver.BuildOptions // Resolver build options to use.
|
||||
channelzID *channelz.Identifier // Channelz identifier for the channel.
|
||||
}
|
||||
|
||||
// newCCResolverWrapper uses the resolver.Builder to build a Resolver and
|
||||
// returns a ccResolverWrapper object which wraps the newly built resolver.
|
||||
func newCCResolverWrapper(cc *ClientConn, rb resolver.Builder) (*ccResolverWrapper, error) {
|
||||
func newCCResolverWrapper(cc resolverStateUpdater, opts ccResolverWrapperOpts) (*ccResolverWrapper, error) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
ccr := &ccResolverWrapper{
|
||||
cc: cc,
|
||||
done: grpcsync.NewEvent(),
|
||||
cc: cc,
|
||||
channelzID: opts.channelzID,
|
||||
ignoreServiceConfig: opts.bOpts.DisableServiceConfig,
|
||||
opts: opts,
|
||||
serializer: grpcsync.NewCallbackSerializer(ctx),
|
||||
serializerCancel: cancel,
|
||||
}
|
||||
|
||||
var credsClone credentials.TransportCredentials
|
||||
if creds := cc.dopts.copts.TransportCredentials; creds != nil {
|
||||
credsClone = creds.Clone()
|
||||
}
|
||||
rbo := resolver.BuildOptions{
|
||||
DisableServiceConfig: cc.dopts.disableServiceConfig,
|
||||
DialCreds: credsClone,
|
||||
CredsBundle: cc.dopts.copts.CredsBundle,
|
||||
Dialer: cc.dopts.copts.Dialer,
|
||||
}
|
||||
|
||||
var err error
|
||||
// We need to hold the lock here while we assign to the ccr.resolver field
|
||||
// to guard against a data race caused by the following code path,
|
||||
// rb.Build-->ccr.ReportError-->ccr.poll-->ccr.resolveNow, would end up
|
||||
// accessing ccr.resolver which is being assigned here.
|
||||
ccr.resolverMu.Lock()
|
||||
defer ccr.resolverMu.Unlock()
|
||||
ccr.resolver, err = rb.Build(cc.parsedTarget, ccr, rbo)
|
||||
// Cannot hold the lock at build time because the resolver can send an
|
||||
// update or error inline and these incoming calls grab the lock to schedule
|
||||
// a callback in the serializer.
|
||||
r, err := opts.builder.Build(opts.target, ccr, opts.bOpts)
|
||||
if err != nil {
|
||||
cancel()
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Any error reported by the resolver at build time that leads to a
|
||||
// re-resolution request from the balancer is dropped by grpc until we
|
||||
// return from this function. So, we don't have to handle pending resolveNow
|
||||
// requests here.
|
||||
ccr.mu.Lock()
|
||||
ccr.resolver = r
|
||||
ccr.mu.Unlock()
|
||||
|
||||
return ccr, nil
|
||||
}
|
||||
|
||||
func (ccr *ccResolverWrapper) resolveNow(o resolver.ResolveNowOptions) {
|
||||
ccr.resolverMu.Lock()
|
||||
if !ccr.done.HasFired() {
|
||||
ccr.resolver.ResolveNow(o)
|
||||
ccr.mu.Lock()
|
||||
defer ccr.mu.Unlock()
|
||||
|
||||
// ccr.resolver field is set only after the call to Build() returns. But in
|
||||
// the process of building, the resolver may send an error update which when
|
||||
// propagated to the balancer may result in a re-resolution request.
|
||||
if ccr.closed || ccr.resolver == nil {
|
||||
return
|
||||
}
|
||||
ccr.resolverMu.Unlock()
|
||||
ccr.resolver.ResolveNow(o)
|
||||
}
|
||||
|
||||
func (ccr *ccResolverWrapper) close() {
|
||||
ccr.resolverMu.Lock()
|
||||
ccr.resolver.Close()
|
||||
ccr.done.Fire()
|
||||
ccr.resolverMu.Unlock()
|
||||
ccr.mu.Lock()
|
||||
if ccr.closed {
|
||||
ccr.mu.Unlock()
|
||||
return
|
||||
}
|
||||
|
||||
channelz.Info(logger, ccr.channelzID, "Closing the name resolver")
|
||||
|
||||
// Close the serializer to ensure that no more calls from the resolver are
|
||||
// handled, before actually closing the resolver.
|
||||
ccr.serializerCancel()
|
||||
ccr.closed = true
|
||||
r := ccr.resolver
|
||||
ccr.mu.Unlock()
|
||||
|
||||
// Give enqueued callbacks a chance to finish.
|
||||
<-ccr.serializer.Done()
|
||||
|
||||
// Spawn a goroutine to close the resolver (since it may block trying to
|
||||
// cleanup all allocated resources) and return early.
|
||||
go r.Close()
|
||||
}
|
||||
|
||||
// serializerScheduleLocked is a convenience method to schedule a function to be
|
||||
// run on the serializer while holding ccr.mu.
|
||||
func (ccr *ccResolverWrapper) serializerScheduleLocked(f func(context.Context)) {
|
||||
ccr.mu.Lock()
|
||||
ccr.serializer.Schedule(f)
|
||||
ccr.mu.Unlock()
|
||||
}
|
||||
|
||||
// UpdateState is called by resolver implementations to report new state to gRPC
|
||||
// which includes addresses and service config.
|
||||
func (ccr *ccResolverWrapper) UpdateState(s resolver.State) error {
|
||||
ccr.incomingMu.Lock()
|
||||
defer ccr.incomingMu.Unlock()
|
||||
if ccr.done.HasFired() {
|
||||
errCh := make(chan error, 1)
|
||||
if s.Endpoints == nil {
|
||||
s.Endpoints = make([]resolver.Endpoint, 0, len(s.Addresses))
|
||||
for _, a := range s.Addresses {
|
||||
ep := resolver.Endpoint{Addresses: []resolver.Address{a}, Attributes: a.BalancerAttributes}
|
||||
ep.Addresses[0].BalancerAttributes = nil
|
||||
s.Endpoints = append(s.Endpoints, ep)
|
||||
}
|
||||
}
|
||||
ok := ccr.serializer.Schedule(func(context.Context) {
|
||||
ccr.addChannelzTraceEvent(s)
|
||||
ccr.curState = s
|
||||
if err := ccr.cc.updateResolverState(ccr.curState, nil); err == balancer.ErrBadResolverState {
|
||||
errCh <- balancer.ErrBadResolverState
|
||||
return
|
||||
}
|
||||
errCh <- nil
|
||||
})
|
||||
if !ok {
|
||||
// The only time when Schedule() fail to add the callback to the
|
||||
// serializer is when the serializer is closed, and this happens only
|
||||
// when the resolver wrapper is closed.
|
||||
return nil
|
||||
}
|
||||
ccr.addChannelzTraceEvent(s)
|
||||
ccr.curState = s
|
||||
if err := ccr.cc.updateResolverState(ccr.curState, nil); err == balancer.ErrBadResolverState {
|
||||
return balancer.ErrBadResolverState
|
||||
}
|
||||
return nil
|
||||
return <-errCh
|
||||
}
|
||||
|
||||
// ReportError is called by resolver implementations to report errors
|
||||
// encountered during name resolution to gRPC.
|
||||
func (ccr *ccResolverWrapper) ReportError(err error) {
|
||||
ccr.incomingMu.Lock()
|
||||
defer ccr.incomingMu.Unlock()
|
||||
if ccr.done.HasFired() {
|
||||
return
|
||||
}
|
||||
channelz.Warningf(logger, ccr.cc.channelzID, "ccResolverWrapper: reporting error to cc: %v", err)
|
||||
ccr.cc.updateResolverState(resolver.State{}, err)
|
||||
ccr.serializerScheduleLocked(func(_ context.Context) {
|
||||
channelz.Warningf(logger, ccr.channelzID, "ccResolverWrapper: reporting error to cc: %v", err)
|
||||
ccr.cc.updateResolverState(resolver.State{}, err)
|
||||
})
|
||||
}
|
||||
|
||||
// NewAddress is called by the resolver implementation to send addresses to gRPC.
|
||||
// NewAddress is called by the resolver implementation to send addresses to
|
||||
// gRPC.
|
||||
func (ccr *ccResolverWrapper) NewAddress(addrs []resolver.Address) {
|
||||
ccr.incomingMu.Lock()
|
||||
defer ccr.incomingMu.Unlock()
|
||||
if ccr.done.HasFired() {
|
||||
return
|
||||
}
|
||||
ccr.addChannelzTraceEvent(resolver.State{Addresses: addrs, ServiceConfig: ccr.curState.ServiceConfig})
|
||||
ccr.curState.Addresses = addrs
|
||||
ccr.cc.updateResolverState(ccr.curState, nil)
|
||||
ccr.serializerScheduleLocked(func(_ context.Context) {
|
||||
ccr.addChannelzTraceEvent(resolver.State{Addresses: addrs, ServiceConfig: ccr.curState.ServiceConfig})
|
||||
ccr.curState.Addresses = addrs
|
||||
ccr.cc.updateResolverState(ccr.curState, nil)
|
||||
})
|
||||
}
|
||||
|
||||
// NewServiceConfig is called by the resolver implementation to send service
|
||||
// configs to gRPC.
|
||||
func (ccr *ccResolverWrapper) NewServiceConfig(sc string) {
|
||||
ccr.incomingMu.Lock()
|
||||
defer ccr.incomingMu.Unlock()
|
||||
if ccr.done.HasFired() {
|
||||
return
|
||||
}
|
||||
channelz.Infof(logger, ccr.cc.channelzID, "ccResolverWrapper: got new service config: %s", sc)
|
||||
if ccr.cc.dopts.disableServiceConfig {
|
||||
channelz.Info(logger, ccr.cc.channelzID, "Service config lookups disabled; ignoring config")
|
||||
return
|
||||
}
|
||||
scpr := parseServiceConfig(sc)
|
||||
if scpr.Err != nil {
|
||||
channelz.Warningf(logger, ccr.cc.channelzID, "ccResolverWrapper: error parsing service config: %v", scpr.Err)
|
||||
return
|
||||
}
|
||||
ccr.addChannelzTraceEvent(resolver.State{Addresses: ccr.curState.Addresses, ServiceConfig: scpr})
|
||||
ccr.curState.ServiceConfig = scpr
|
||||
ccr.cc.updateResolverState(ccr.curState, nil)
|
||||
ccr.serializerScheduleLocked(func(_ context.Context) {
|
||||
channelz.Infof(logger, ccr.channelzID, "ccResolverWrapper: got new service config: %s", sc)
|
||||
if ccr.ignoreServiceConfig {
|
||||
channelz.Info(logger, ccr.channelzID, "Service config lookups disabled; ignoring config")
|
||||
return
|
||||
}
|
||||
scpr := parseServiceConfig(sc)
|
||||
if scpr.Err != nil {
|
||||
channelz.Warningf(logger, ccr.channelzID, "ccResolverWrapper: error parsing service config: %v", scpr.Err)
|
||||
return
|
||||
}
|
||||
ccr.addChannelzTraceEvent(resolver.State{Addresses: ccr.curState.Addresses, ServiceConfig: scpr})
|
||||
ccr.curState.ServiceConfig = scpr
|
||||
ccr.cc.updateResolverState(ccr.curState, nil)
|
||||
})
|
||||
}
|
||||
|
||||
// ParseServiceConfig is called by resolver implementations to parse a JSON
|
||||
// representation of the service config.
|
||||
func (ccr *ccResolverWrapper) ParseServiceConfig(scJSON string) *serviceconfig.ParseResult {
|
||||
return parseServiceConfig(scJSON)
|
||||
}
|
||||
|
||||
// addChannelzTraceEvent adds a channelz trace event containing the new
|
||||
// state received from resolver implementations.
|
||||
func (ccr *ccResolverWrapper) addChannelzTraceEvent(s resolver.State) {
|
||||
var updates []string
|
||||
var oldSC, newSC *ServiceConfig
|
||||
@ -172,5 +243,5 @@ func (ccr *ccResolverWrapper) addChannelzTraceEvent(s resolver.State) {
|
||||
} else if len(ccr.curState.Addresses) == 0 && len(s.Addresses) > 0 {
|
||||
updates = append(updates, "resolver returned new addresses")
|
||||
}
|
||||
channelz.Infof(logger, ccr.cc.channelzID, "Resolver state updated: %s (%v)", pretty.ToJSON(s), strings.Join(updates, "; "))
|
||||
channelz.Infof(logger, ccr.channelzID, "Resolver state updated: %s (%v)", pretty.ToJSON(s), strings.Join(updates, "; "))
|
||||
}
|
||||
|
151
src/runtime/vendor/google.golang.org/grpc/rpc_util.go
generated
vendored
151
src/runtime/vendor/google.golang.org/grpc/rpc_util.go
generated
vendored
@ -25,7 +25,6 @@ import (
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"math"
|
||||
"strings"
|
||||
"sync"
|
||||
@ -76,8 +75,8 @@ func NewGZIPCompressorWithLevel(level int) (Compressor, error) {
|
||||
}
|
||||
return &gzipCompressor{
|
||||
pool: sync.Pool{
|
||||
New: func() interface{} {
|
||||
w, err := gzip.NewWriterLevel(ioutil.Discard, level)
|
||||
New: func() any {
|
||||
w, err := gzip.NewWriterLevel(io.Discard, level)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
@ -143,7 +142,7 @@ func (d *gzipDecompressor) Do(r io.Reader) ([]byte, error) {
|
||||
z.Close()
|
||||
d.pool.Put(z)
|
||||
}()
|
||||
return ioutil.ReadAll(z)
|
||||
return io.ReadAll(z)
|
||||
}
|
||||
|
||||
func (d *gzipDecompressor) Type() string {
|
||||
@ -160,6 +159,7 @@ type callInfo struct {
|
||||
contentSubtype string
|
||||
codec baseCodec
|
||||
maxRetryRPCBufferSize int
|
||||
onFinish []func(err error)
|
||||
}
|
||||
|
||||
func defaultCallInfo() *callInfo {
|
||||
@ -198,7 +198,7 @@ func Header(md *metadata.MD) CallOption {
|
||||
// HeaderCallOption is a CallOption for collecting response header metadata.
|
||||
// The metadata field will be populated *after* the RPC completes.
|
||||
//
|
||||
// Experimental
|
||||
// # Experimental
|
||||
//
|
||||
// Notice: This type is EXPERIMENTAL and may be changed or removed in a
|
||||
// later release.
|
||||
@ -220,7 +220,7 @@ func Trailer(md *metadata.MD) CallOption {
|
||||
// TrailerCallOption is a CallOption for collecting response trailer metadata.
|
||||
// The metadata field will be populated *after* the RPC completes.
|
||||
//
|
||||
// Experimental
|
||||
// # Experimental
|
||||
//
|
||||
// Notice: This type is EXPERIMENTAL and may be changed or removed in a
|
||||
// later release.
|
||||
@ -242,7 +242,7 @@ func Peer(p *peer.Peer) CallOption {
|
||||
// PeerCallOption is a CallOption for collecting the identity of the remote
|
||||
// peer. The peer field will be populated *after* the RPC completes.
|
||||
//
|
||||
// Experimental
|
||||
// # Experimental
|
||||
//
|
||||
// Notice: This type is EXPERIMENTAL and may be changed or removed in a
|
||||
// later release.
|
||||
@ -282,7 +282,7 @@ func FailFast(failFast bool) CallOption {
|
||||
// FailFastCallOption is a CallOption for indicating whether an RPC should fail
|
||||
// fast or not.
|
||||
//
|
||||
// Experimental
|
||||
// # Experimental
|
||||
//
|
||||
// Notice: This type is EXPERIMENTAL and may be changed or removed in a
|
||||
// later release.
|
||||
@ -296,8 +296,44 @@ func (o FailFastCallOption) before(c *callInfo) error {
|
||||
}
|
||||
func (o FailFastCallOption) after(c *callInfo, attempt *csAttempt) {}
|
||||
|
||||
// OnFinish returns a CallOption that configures a callback to be called when
|
||||
// the call completes. The error passed to the callback is the status of the
|
||||
// RPC, and may be nil. The onFinish callback provided will only be called once
|
||||
// by gRPC. This is mainly used to be used by streaming interceptors, to be
|
||||
// notified when the RPC completes along with information about the status of
|
||||
// the RPC.
|
||||
//
|
||||
// # Experimental
|
||||
//
|
||||
// Notice: This API is EXPERIMENTAL and may be changed or removed in a
|
||||
// later release.
|
||||
func OnFinish(onFinish func(err error)) CallOption {
|
||||
return OnFinishCallOption{
|
||||
OnFinish: onFinish,
|
||||
}
|
||||
}
|
||||
|
||||
// OnFinishCallOption is CallOption that indicates a callback to be called when
|
||||
// the call completes.
|
||||
//
|
||||
// # Experimental
|
||||
//
|
||||
// Notice: This type is EXPERIMENTAL and may be changed or removed in a
|
||||
// later release.
|
||||
type OnFinishCallOption struct {
|
||||
OnFinish func(error)
|
||||
}
|
||||
|
||||
func (o OnFinishCallOption) before(c *callInfo) error {
|
||||
c.onFinish = append(c.onFinish, o.OnFinish)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (o OnFinishCallOption) after(c *callInfo, attempt *csAttempt) {}
|
||||
|
||||
// MaxCallRecvMsgSize returns a CallOption which sets the maximum message size
|
||||
// in bytes the client can receive.
|
||||
// in bytes the client can receive. If this is not set, gRPC uses the default
|
||||
// 4MB.
|
||||
func MaxCallRecvMsgSize(bytes int) CallOption {
|
||||
return MaxRecvMsgSizeCallOption{MaxRecvMsgSize: bytes}
|
||||
}
|
||||
@ -305,7 +341,7 @@ func MaxCallRecvMsgSize(bytes int) CallOption {
|
||||
// MaxRecvMsgSizeCallOption is a CallOption that indicates the maximum message
|
||||
// size in bytes the client can receive.
|
||||
//
|
||||
// Experimental
|
||||
// # Experimental
|
||||
//
|
||||
// Notice: This type is EXPERIMENTAL and may be changed or removed in a
|
||||
// later release.
|
||||
@ -320,7 +356,8 @@ func (o MaxRecvMsgSizeCallOption) before(c *callInfo) error {
|
||||
func (o MaxRecvMsgSizeCallOption) after(c *callInfo, attempt *csAttempt) {}
|
||||
|
||||
// MaxCallSendMsgSize returns a CallOption which sets the maximum message size
|
||||
// in bytes the client can send.
|
||||
// in bytes the client can send. If this is not set, gRPC uses the default
|
||||
// `math.MaxInt32`.
|
||||
func MaxCallSendMsgSize(bytes int) CallOption {
|
||||
return MaxSendMsgSizeCallOption{MaxSendMsgSize: bytes}
|
||||
}
|
||||
@ -328,7 +365,7 @@ func MaxCallSendMsgSize(bytes int) CallOption {
|
||||
// MaxSendMsgSizeCallOption is a CallOption that indicates the maximum message
|
||||
// size in bytes the client can send.
|
||||
//
|
||||
// Experimental
|
||||
// # Experimental
|
||||
//
|
||||
// Notice: This type is EXPERIMENTAL and may be changed or removed in a
|
||||
// later release.
|
||||
@ -351,7 +388,7 @@ func PerRPCCredentials(creds credentials.PerRPCCredentials) CallOption {
|
||||
// PerRPCCredsCallOption is a CallOption that indicates the per-RPC
|
||||
// credentials to use for the call.
|
||||
//
|
||||
// Experimental
|
||||
// # Experimental
|
||||
//
|
||||
// Notice: This type is EXPERIMENTAL and may be changed or removed in a
|
||||
// later release.
|
||||
@ -369,7 +406,7 @@ func (o PerRPCCredsCallOption) after(c *callInfo, attempt *csAttempt) {}
|
||||
// sending the request. If WithCompressor is also set, UseCompressor has
|
||||
// higher priority.
|
||||
//
|
||||
// Experimental
|
||||
// # Experimental
|
||||
//
|
||||
// Notice: This API is EXPERIMENTAL and may be changed or removed in a
|
||||
// later release.
|
||||
@ -379,7 +416,7 @@ func UseCompressor(name string) CallOption {
|
||||
|
||||
// CompressorCallOption is a CallOption that indicates the compressor to use.
|
||||
//
|
||||
// Experimental
|
||||
// # Experimental
|
||||
//
|
||||
// Notice: This type is EXPERIMENTAL and may be changed or removed in a
|
||||
// later release.
|
||||
@ -416,7 +453,7 @@ func CallContentSubtype(contentSubtype string) CallOption {
|
||||
// ContentSubtypeCallOption is a CallOption that indicates the content-subtype
|
||||
// used for marshaling messages.
|
||||
//
|
||||
// Experimental
|
||||
// # Experimental
|
||||
//
|
||||
// Notice: This type is EXPERIMENTAL and may be changed or removed in a
|
||||
// later release.
|
||||
@ -444,7 +481,7 @@ func (o ContentSubtypeCallOption) after(c *callInfo, attempt *csAttempt) {}
|
||||
// This function is provided for advanced users; prefer to use only
|
||||
// CallContentSubtype to select a registered codec instead.
|
||||
//
|
||||
// Experimental
|
||||
// # Experimental
|
||||
//
|
||||
// Notice: This API is EXPERIMENTAL and may be changed or removed in a
|
||||
// later release.
|
||||
@ -455,7 +492,7 @@ func ForceCodec(codec encoding.Codec) CallOption {
|
||||
// ForceCodecCallOption is a CallOption that indicates the codec used for
|
||||
// marshaling messages.
|
||||
//
|
||||
// Experimental
|
||||
// # Experimental
|
||||
//
|
||||
// Notice: This type is EXPERIMENTAL and may be changed or removed in a
|
||||
// later release.
|
||||
@ -480,7 +517,7 @@ func CallCustomCodec(codec Codec) CallOption {
|
||||
// CustomCodecCallOption is a CallOption that indicates the codec used for
|
||||
// marshaling messages.
|
||||
//
|
||||
// Experimental
|
||||
// # Experimental
|
||||
//
|
||||
// Notice: This type is EXPERIMENTAL and may be changed or removed in a
|
||||
// later release.
|
||||
@ -497,7 +534,7 @@ func (o CustomCodecCallOption) after(c *callInfo, attempt *csAttempt) {}
|
||||
// MaxRetryRPCBufferSize returns a CallOption that limits the amount of memory
|
||||
// used for buffering this RPC's requests for retry purposes.
|
||||
//
|
||||
// Experimental
|
||||
// # Experimental
|
||||
//
|
||||
// Notice: This API is EXPERIMENTAL and may be changed or removed in a
|
||||
// later release.
|
||||
@ -508,7 +545,7 @@ func MaxRetryRPCBufferSize(bytes int) CallOption {
|
||||
// MaxRetryRPCBufferSizeCallOption is a CallOption indicating the amount of
|
||||
// memory to be used for caching this RPC for retry purposes.
|
||||
//
|
||||
// Experimental
|
||||
// # Experimental
|
||||
//
|
||||
// Notice: This type is EXPERIMENTAL and may be changed or removed in a
|
||||
// later release.
|
||||
@ -540,6 +577,9 @@ type parser struct {
|
||||
// The header of a gRPC message. Find more detail at
|
||||
// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md
|
||||
header [5]byte
|
||||
|
||||
// recvBufferPool is the pool of shared receive buffers.
|
||||
recvBufferPool SharedBufferPool
|
||||
}
|
||||
|
||||
// recvMsg reads a complete gRPC message from the stream.
|
||||
@ -548,10 +588,11 @@ type parser struct {
|
||||
// format. The caller owns the returned msg memory.
|
||||
//
|
||||
// If there is an error, possible values are:
|
||||
// * io.EOF, when no messages remain
|
||||
// * io.ErrUnexpectedEOF
|
||||
// * of type transport.ConnectionError
|
||||
// * an error from the status package
|
||||
// - io.EOF, when no messages remain
|
||||
// - io.ErrUnexpectedEOF
|
||||
// - of type transport.ConnectionError
|
||||
// - an error from the status package
|
||||
//
|
||||
// No other error values or types must be returned, which also means
|
||||
// that the underlying io.Reader must not return an incompatible
|
||||
// error.
|
||||
@ -572,9 +613,7 @@ func (p *parser) recvMsg(maxReceiveMessageSize int) (pf payloadFormat, msg []byt
|
||||
if int(length) > maxReceiveMessageSize {
|
||||
return 0, nil, status.Errorf(codes.ResourceExhausted, "grpc: received message larger than max (%d vs. %d)", length, maxReceiveMessageSize)
|
||||
}
|
||||
// TODO(bradfitz,zhaoq): garbage. reuse buffer after proto decoding instead
|
||||
// of making it for each message:
|
||||
msg = make([]byte, int(length))
|
||||
msg = p.recvBufferPool.Get(int(length))
|
||||
if _, err := p.r.Read(msg); err != nil {
|
||||
if err == io.EOF {
|
||||
err = io.ErrUnexpectedEOF
|
||||
@ -587,7 +626,7 @@ func (p *parser) recvMsg(maxReceiveMessageSize int) (pf payloadFormat, msg []byt
|
||||
// encode serializes msg and returns a buffer containing the message, or an
|
||||
// error if it is too large to be transmitted by grpc. If msg is nil, it
|
||||
// generates an empty message.
|
||||
func encode(c baseCodec, msg interface{}) ([]byte, error) {
|
||||
func encode(c baseCodec, msg any) ([]byte, error) {
|
||||
if msg == nil { // NOTE: typed nils will not be caught by this check
|
||||
return nil, nil
|
||||
}
|
||||
@ -654,14 +693,15 @@ func msgHeader(data, compData []byte) (hdr []byte, payload []byte) {
|
||||
return hdr, data
|
||||
}
|
||||
|
||||
func outPayload(client bool, msg interface{}, data, payload []byte, t time.Time) *stats.OutPayload {
|
||||
func outPayload(client bool, msg any, data, payload []byte, t time.Time) *stats.OutPayload {
|
||||
return &stats.OutPayload{
|
||||
Client: client,
|
||||
Payload: msg,
|
||||
Data: data,
|
||||
Length: len(data),
|
||||
WireLength: len(payload) + headerLen,
|
||||
SentTime: t,
|
||||
Client: client,
|
||||
Payload: msg,
|
||||
Data: data,
|
||||
Length: len(data),
|
||||
WireLength: len(payload) + headerLen,
|
||||
CompressedLength: len(payload),
|
||||
SentTime: t,
|
||||
}
|
||||
}
|
||||
|
||||
@ -682,17 +722,17 @@ func checkRecvPayload(pf payloadFormat, recvCompress string, haveCompressor bool
|
||||
}
|
||||
|
||||
type payloadInfo struct {
|
||||
wireLength int // The compressed length got from wire.
|
||||
compressedLength int // The compressed length got from wire.
|
||||
uncompressedBytes []byte
|
||||
}
|
||||
|
||||
func recvAndDecompress(p *parser, s *transport.Stream, dc Decompressor, maxReceiveMessageSize int, payInfo *payloadInfo, compressor encoding.Compressor) ([]byte, error) {
|
||||
pf, d, err := p.recvMsg(maxReceiveMessageSize)
|
||||
pf, buf, err := p.recvMsg(maxReceiveMessageSize)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if payInfo != nil {
|
||||
payInfo.wireLength = len(d)
|
||||
payInfo.compressedLength = len(buf)
|
||||
}
|
||||
|
||||
if st := checkRecvPayload(pf, s.RecvCompress(), compressor != nil || dc != nil); st != nil {
|
||||
@ -704,13 +744,13 @@ func recvAndDecompress(p *parser, s *transport.Stream, dc Decompressor, maxRecei
|
||||
// To match legacy behavior, if the decompressor is set by WithDecompressor or RPCDecompressor,
|
||||
// use this decompressor as the default.
|
||||
if dc != nil {
|
||||
d, err = dc.Do(bytes.NewReader(d))
|
||||
size = len(d)
|
||||
buf, err = dc.Do(bytes.NewReader(buf))
|
||||
size = len(buf)
|
||||
} else {
|
||||
d, size, err = decompress(compressor, d, maxReceiveMessageSize)
|
||||
buf, size, err = decompress(compressor, buf, maxReceiveMessageSize)
|
||||
}
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "grpc: failed to decompress the received message %v", err)
|
||||
return nil, status.Errorf(codes.Internal, "grpc: failed to decompress the received message: %v", err)
|
||||
}
|
||||
if size > maxReceiveMessageSize {
|
||||
// TODO: Revisit the error code. Currently keep it consistent with java
|
||||
@ -718,7 +758,7 @@ func recvAndDecompress(p *parser, s *transport.Stream, dc Decompressor, maxRecei
|
||||
return nil, status.Errorf(codes.ResourceExhausted, "grpc: received message after decompression larger than max (%d vs. %d)", size, maxReceiveMessageSize)
|
||||
}
|
||||
}
|
||||
return d, nil
|
||||
return buf, nil
|
||||
}
|
||||
|
||||
// Using compressor, decompress d, returning data and size.
|
||||
@ -745,23 +785,25 @@ func decompress(compressor encoding.Compressor, d []byte, maxReceiveMessageSize
|
||||
}
|
||||
// Read from LimitReader with limit max+1. So if the underlying
|
||||
// reader is over limit, the result will be bigger than max.
|
||||
d, err = ioutil.ReadAll(io.LimitReader(dcReader, int64(maxReceiveMessageSize)+1))
|
||||
d, err = io.ReadAll(io.LimitReader(dcReader, int64(maxReceiveMessageSize)+1))
|
||||
return d, len(d), err
|
||||
}
|
||||
|
||||
// For the two compressor parameters, both should not be set, but if they are,
|
||||
// dc takes precedence over compressor.
|
||||
// TODO(dfawley): wrap the old compressor/decompressor using the new API?
|
||||
func recv(p *parser, c baseCodec, s *transport.Stream, dc Decompressor, m interface{}, maxReceiveMessageSize int, payInfo *payloadInfo, compressor encoding.Compressor) error {
|
||||
d, err := recvAndDecompress(p, s, dc, maxReceiveMessageSize, payInfo, compressor)
|
||||
func recv(p *parser, c baseCodec, s *transport.Stream, dc Decompressor, m any, maxReceiveMessageSize int, payInfo *payloadInfo, compressor encoding.Compressor) error {
|
||||
buf, err := recvAndDecompress(p, s, dc, maxReceiveMessageSize, payInfo, compressor)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := c.Unmarshal(d, m); err != nil {
|
||||
return status.Errorf(codes.Internal, "grpc: failed to unmarshal the received message %v", err)
|
||||
if err := c.Unmarshal(buf, m); err != nil {
|
||||
return status.Errorf(codes.Internal, "grpc: failed to unmarshal the received message: %v", err)
|
||||
}
|
||||
if payInfo != nil {
|
||||
payInfo.uncompressedBytes = d
|
||||
payInfo.uncompressedBytes = buf
|
||||
} else {
|
||||
p.recvBufferPool.Put(&buf)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@ -821,19 +863,22 @@ func ErrorDesc(err error) string {
|
||||
// Errorf returns nil if c is OK.
|
||||
//
|
||||
// Deprecated: use status.Errorf instead.
|
||||
func Errorf(c codes.Code, format string, a ...interface{}) error {
|
||||
func Errorf(c codes.Code, format string, a ...any) error {
|
||||
return status.Errorf(c, format, a...)
|
||||
}
|
||||
|
||||
var errContextCanceled = status.Error(codes.Canceled, context.Canceled.Error())
|
||||
var errContextDeadline = status.Error(codes.DeadlineExceeded, context.DeadlineExceeded.Error())
|
||||
|
||||
// toRPCErr converts an error into an error from the status package.
|
||||
func toRPCErr(err error) error {
|
||||
switch err {
|
||||
case nil, io.EOF:
|
||||
return err
|
||||
case context.DeadlineExceeded:
|
||||
return status.Error(codes.DeadlineExceeded, err.Error())
|
||||
return errContextDeadline
|
||||
case context.Canceled:
|
||||
return status.Error(codes.Canceled, err.Error())
|
||||
return errContextCanceled
|
||||
case io.ErrUnexpectedEOF:
|
||||
return status.Error(codes.Internal, err.Error())
|
||||
}
|
||||
|
633
src/runtime/vendor/google.golang.org/grpc/server.go
generated
vendored
633
src/runtime/vendor/google.golang.org/grpc/server.go
generated
vendored
File diff suppressed because it is too large
Load Diff
90
src/runtime/vendor/google.golang.org/grpc/service_config.go
generated
vendored
90
src/runtime/vendor/google.golang.org/grpc/service_config.go
generated
vendored
@ -23,8 +23,6 @@ import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"google.golang.org/grpc/codes"
|
||||
@ -57,10 +55,9 @@ type lbConfig struct {
|
||||
type ServiceConfig struct {
|
||||
serviceconfig.Config
|
||||
|
||||
// LB is the load balancer the service providers recommends. The balancer
|
||||
// specified via grpc.WithBalancerName will override this. This is deprecated;
|
||||
// lbConfigs is preferred. If lbConfig and LB are both present, lbConfig
|
||||
// will be used.
|
||||
// LB is the load balancer the service providers recommends. This is
|
||||
// deprecated; lbConfigs is preferred. If lbConfig and LB are both present,
|
||||
// lbConfig will be used.
|
||||
LB *string
|
||||
|
||||
// lbConfig is the service config's load balancing configuration. If
|
||||
@ -107,8 +104,8 @@ type healthCheckConfig struct {
|
||||
|
||||
type jsonRetryPolicy struct {
|
||||
MaxAttempts int
|
||||
InitialBackoff string
|
||||
MaxBackoff string
|
||||
InitialBackoff internalserviceconfig.Duration
|
||||
MaxBackoff internalserviceconfig.Duration
|
||||
BackoffMultiplier float64
|
||||
RetryableStatusCodes []codes.Code
|
||||
}
|
||||
@ -130,50 +127,6 @@ type retryThrottlingPolicy struct {
|
||||
TokenRatio float64
|
||||
}
|
||||
|
||||
func parseDuration(s *string) (*time.Duration, error) {
|
||||
if s == nil {
|
||||
return nil, nil
|
||||
}
|
||||
if !strings.HasSuffix(*s, "s") {
|
||||
return nil, fmt.Errorf("malformed duration %q", *s)
|
||||
}
|
||||
ss := strings.SplitN((*s)[:len(*s)-1], ".", 3)
|
||||
if len(ss) > 2 {
|
||||
return nil, fmt.Errorf("malformed duration %q", *s)
|
||||
}
|
||||
// hasDigits is set if either the whole or fractional part of the number is
|
||||
// present, since both are optional but one is required.
|
||||
hasDigits := false
|
||||
var d time.Duration
|
||||
if len(ss[0]) > 0 {
|
||||
i, err := strconv.ParseInt(ss[0], 10, 32)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("malformed duration %q: %v", *s, err)
|
||||
}
|
||||
d = time.Duration(i) * time.Second
|
||||
hasDigits = true
|
||||
}
|
||||
if len(ss) == 2 && len(ss[1]) > 0 {
|
||||
if len(ss[1]) > 9 {
|
||||
return nil, fmt.Errorf("malformed duration %q", *s)
|
||||
}
|
||||
f, err := strconv.ParseInt(ss[1], 10, 64)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("malformed duration %q: %v", *s, err)
|
||||
}
|
||||
for i := 9; i > len(ss[1]); i-- {
|
||||
f *= 10
|
||||
}
|
||||
d += time.Duration(f)
|
||||
hasDigits = true
|
||||
}
|
||||
if !hasDigits {
|
||||
return nil, fmt.Errorf("malformed duration %q", *s)
|
||||
}
|
||||
|
||||
return &d, nil
|
||||
}
|
||||
|
||||
type jsonName struct {
|
||||
Service string
|
||||
Method string
|
||||
@ -202,7 +155,7 @@ func (j jsonName) generatePath() (string, error) {
|
||||
type jsonMC struct {
|
||||
Name *[]jsonName
|
||||
WaitForReady *bool
|
||||
Timeout *string
|
||||
Timeout *internalserviceconfig.Duration
|
||||
MaxRequestMessageBytes *int64
|
||||
MaxResponseMessageBytes *int64
|
||||
RetryPolicy *jsonRetryPolicy
|
||||
@ -227,7 +180,7 @@ func parseServiceConfig(js string) *serviceconfig.ParseResult {
|
||||
var rsc jsonSC
|
||||
err := json.Unmarshal([]byte(js), &rsc)
|
||||
if err != nil {
|
||||
logger.Warningf("grpc: parseServiceConfig error unmarshaling %s due to %v", js, err)
|
||||
logger.Warningf("grpc: unmarshaling service config %s: %v", js, err)
|
||||
return &serviceconfig.ParseResult{Err: err}
|
||||
}
|
||||
sc := ServiceConfig{
|
||||
@ -253,18 +206,13 @@ func parseServiceConfig(js string) *serviceconfig.ParseResult {
|
||||
if m.Name == nil {
|
||||
continue
|
||||
}
|
||||
d, err := parseDuration(m.Timeout)
|
||||
if err != nil {
|
||||
logger.Warningf("grpc: parseServiceConfig error unmarshaling %s due to %v", js, err)
|
||||
return &serviceconfig.ParseResult{Err: err}
|
||||
}
|
||||
|
||||
mc := MethodConfig{
|
||||
WaitForReady: m.WaitForReady,
|
||||
Timeout: d,
|
||||
Timeout: (*time.Duration)(m.Timeout),
|
||||
}
|
||||
if mc.RetryPolicy, err = convertRetryPolicy(m.RetryPolicy); err != nil {
|
||||
logger.Warningf("grpc: parseServiceConfig error unmarshaling %s due to %v", js, err)
|
||||
logger.Warningf("grpc: unmarshaling service config %s: %v", js, err)
|
||||
return &serviceconfig.ParseResult{Err: err}
|
||||
}
|
||||
if m.MaxRequestMessageBytes != nil {
|
||||
@ -284,13 +232,13 @@ func parseServiceConfig(js string) *serviceconfig.ParseResult {
|
||||
for i, n := range *m.Name {
|
||||
path, err := n.generatePath()
|
||||
if err != nil {
|
||||
logger.Warningf("grpc: parseServiceConfig error unmarshaling %s due to methodConfig[%d]: %v", js, i, err)
|
||||
logger.Warningf("grpc: error unmarshaling service config %s due to methodConfig[%d]: %v", js, i, err)
|
||||
return &serviceconfig.ParseResult{Err: err}
|
||||
}
|
||||
|
||||
if _, ok := paths[path]; ok {
|
||||
err = errDuplicatedName
|
||||
logger.Warningf("grpc: parseServiceConfig error unmarshaling %s due to methodConfig[%d]: %v", js, i, err)
|
||||
logger.Warningf("grpc: error unmarshaling service config %s due to methodConfig[%d]: %v", js, i, err)
|
||||
return &serviceconfig.ParseResult{Err: err}
|
||||
}
|
||||
paths[path] = struct{}{}
|
||||
@ -313,18 +261,10 @@ func convertRetryPolicy(jrp *jsonRetryPolicy) (p *internalserviceconfig.RetryPol
|
||||
if jrp == nil {
|
||||
return nil, nil
|
||||
}
|
||||
ib, err := parseDuration(&jrp.InitialBackoff)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
mb, err := parseDuration(&jrp.MaxBackoff)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if jrp.MaxAttempts <= 1 ||
|
||||
*ib <= 0 ||
|
||||
*mb <= 0 ||
|
||||
jrp.InitialBackoff <= 0 ||
|
||||
jrp.MaxBackoff <= 0 ||
|
||||
jrp.BackoffMultiplier <= 0 ||
|
||||
len(jrp.RetryableStatusCodes) == 0 {
|
||||
logger.Warningf("grpc: ignoring retry policy %v due to illegal configuration", jrp)
|
||||
@ -333,8 +273,8 @@ func convertRetryPolicy(jrp *jsonRetryPolicy) (p *internalserviceconfig.RetryPol
|
||||
|
||||
rp := &internalserviceconfig.RetryPolicy{
|
||||
MaxAttempts: jrp.MaxAttempts,
|
||||
InitialBackoff: *ib,
|
||||
MaxBackoff: *mb,
|
||||
InitialBackoff: time.Duration(jrp.InitialBackoff),
|
||||
MaxBackoff: time.Duration(jrp.MaxBackoff),
|
||||
BackoffMultiplier: jrp.BackoffMultiplier,
|
||||
RetryableStatusCodes: make(map[codes.Code]bool),
|
||||
}
|
||||
|
2
src/runtime/vendor/google.golang.org/grpc/serviceconfig/serviceconfig.go
generated
vendored
2
src/runtime/vendor/google.golang.org/grpc/serviceconfig/serviceconfig.go
generated
vendored
@ -19,7 +19,7 @@
|
||||
// Package serviceconfig defines types and methods for operating on gRPC
|
||||
// service configs.
|
||||
//
|
||||
// Experimental
|
||||
// # Experimental
|
||||
//
|
||||
// Notice: This package is EXPERIMENTAL and may be changed or removed in a
|
||||
// later release.
|
||||
|
154
src/runtime/vendor/google.golang.org/grpc/shared_buffer_pool.go
generated
vendored
Normal file
154
src/runtime/vendor/google.golang.org/grpc/shared_buffer_pool.go
generated
vendored
Normal file
@ -0,0 +1,154 @@
|
||||
/*
|
||||
*
|
||||
* Copyright 2023 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
package grpc
|
||||
|
||||
import "sync"
|
||||
|
||||
// SharedBufferPool is a pool of buffers that can be shared, resulting in
|
||||
// decreased memory allocation. Currently, in gRPC-go, it is only utilized
|
||||
// for parsing incoming messages.
|
||||
//
|
||||
// # Experimental
|
||||
//
|
||||
// Notice: This API is EXPERIMENTAL and may be changed or removed in a
|
||||
// later release.
|
||||
type SharedBufferPool interface {
|
||||
// Get returns a buffer with specified length from the pool.
|
||||
//
|
||||
// The returned byte slice may be not zero initialized.
|
||||
Get(length int) []byte
|
||||
|
||||
// Put returns a buffer to the pool.
|
||||
Put(*[]byte)
|
||||
}
|
||||
|
||||
// NewSharedBufferPool creates a simple SharedBufferPool with buckets
|
||||
// of different sizes to optimize memory usage. This prevents the pool from
|
||||
// wasting large amounts of memory, even when handling messages of varying sizes.
|
||||
//
|
||||
// # Experimental
|
||||
//
|
||||
// Notice: This API is EXPERIMENTAL and may be changed or removed in a
|
||||
// later release.
|
||||
func NewSharedBufferPool() SharedBufferPool {
|
||||
return &simpleSharedBufferPool{
|
||||
pools: [poolArraySize]simpleSharedBufferChildPool{
|
||||
newBytesPool(level0PoolMaxSize),
|
||||
newBytesPool(level1PoolMaxSize),
|
||||
newBytesPool(level2PoolMaxSize),
|
||||
newBytesPool(level3PoolMaxSize),
|
||||
newBytesPool(level4PoolMaxSize),
|
||||
newBytesPool(0),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// simpleSharedBufferPool is a simple implementation of SharedBufferPool.
|
||||
type simpleSharedBufferPool struct {
|
||||
pools [poolArraySize]simpleSharedBufferChildPool
|
||||
}
|
||||
|
||||
func (p *simpleSharedBufferPool) Get(size int) []byte {
|
||||
return p.pools[p.poolIdx(size)].Get(size)
|
||||
}
|
||||
|
||||
func (p *simpleSharedBufferPool) Put(bs *[]byte) {
|
||||
p.pools[p.poolIdx(cap(*bs))].Put(bs)
|
||||
}
|
||||
|
||||
func (p *simpleSharedBufferPool) poolIdx(size int) int {
|
||||
switch {
|
||||
case size <= level0PoolMaxSize:
|
||||
return level0PoolIdx
|
||||
case size <= level1PoolMaxSize:
|
||||
return level1PoolIdx
|
||||
case size <= level2PoolMaxSize:
|
||||
return level2PoolIdx
|
||||
case size <= level3PoolMaxSize:
|
||||
return level3PoolIdx
|
||||
case size <= level4PoolMaxSize:
|
||||
return level4PoolIdx
|
||||
default:
|
||||
return levelMaxPoolIdx
|
||||
}
|
||||
}
|
||||
|
||||
const (
|
||||
level0PoolMaxSize = 16 // 16 B
|
||||
level1PoolMaxSize = level0PoolMaxSize * 16 // 256 B
|
||||
level2PoolMaxSize = level1PoolMaxSize * 16 // 4 KB
|
||||
level3PoolMaxSize = level2PoolMaxSize * 16 // 64 KB
|
||||
level4PoolMaxSize = level3PoolMaxSize * 16 // 1 MB
|
||||
)
|
||||
|
||||
const (
|
||||
level0PoolIdx = iota
|
||||
level1PoolIdx
|
||||
level2PoolIdx
|
||||
level3PoolIdx
|
||||
level4PoolIdx
|
||||
levelMaxPoolIdx
|
||||
poolArraySize
|
||||
)
|
||||
|
||||
type simpleSharedBufferChildPool interface {
|
||||
Get(size int) []byte
|
||||
Put(any)
|
||||
}
|
||||
|
||||
type bufferPool struct {
|
||||
sync.Pool
|
||||
|
||||
defaultSize int
|
||||
}
|
||||
|
||||
func (p *bufferPool) Get(size int) []byte {
|
||||
bs := p.Pool.Get().(*[]byte)
|
||||
|
||||
if cap(*bs) < size {
|
||||
p.Pool.Put(bs)
|
||||
|
||||
return make([]byte, size)
|
||||
}
|
||||
|
||||
return (*bs)[:size]
|
||||
}
|
||||
|
||||
func newBytesPool(size int) simpleSharedBufferChildPool {
|
||||
return &bufferPool{
|
||||
Pool: sync.Pool{
|
||||
New: func() any {
|
||||
bs := make([]byte, size)
|
||||
return &bs
|
||||
},
|
||||
},
|
||||
defaultSize: size,
|
||||
}
|
||||
}
|
||||
|
||||
// nopBufferPool is a buffer pool just makes new buffer without pooling.
|
||||
type nopBufferPool struct {
|
||||
}
|
||||
|
||||
func (nopBufferPool) Get(length int) []byte {
|
||||
return make([]byte, length)
|
||||
}
|
||||
|
||||
func (nopBufferPool) Put(*[]byte) {
|
||||
}
|
36
src/runtime/vendor/google.golang.org/grpc/stats/stats.go
generated
vendored
36
src/runtime/vendor/google.golang.org/grpc/stats/stats.go
generated
vendored
@ -59,18 +59,36 @@ func (s *Begin) IsClient() bool { return s.Client }
|
||||
|
||||
func (s *Begin) isRPCStats() {}
|
||||
|
||||
// PickerUpdated indicates that the LB policy provided a new picker while the
|
||||
// RPC was waiting for one.
|
||||
type PickerUpdated struct{}
|
||||
|
||||
// IsClient indicates if the stats information is from client side. Only Client
|
||||
// Side interfaces with a Picker, thus always returns true.
|
||||
func (*PickerUpdated) IsClient() bool { return true }
|
||||
|
||||
func (*PickerUpdated) isRPCStats() {}
|
||||
|
||||
// InPayload contains the information for an incoming payload.
|
||||
type InPayload struct {
|
||||
// Client is true if this InPayload is from client side.
|
||||
Client bool
|
||||
// Payload is the payload with original type.
|
||||
Payload interface{}
|
||||
Payload any
|
||||
// Data is the serialized message payload.
|
||||
Data []byte
|
||||
// Length is the length of uncompressed data.
|
||||
|
||||
// Length is the size of the uncompressed payload data. Does not include any
|
||||
// framing (gRPC or HTTP/2).
|
||||
Length int
|
||||
// WireLength is the length of data on wire (compressed, signed, encrypted).
|
||||
// CompressedLength is the size of the compressed payload data. Does not
|
||||
// include any framing (gRPC or HTTP/2). Same as Length if compression not
|
||||
// enabled.
|
||||
CompressedLength int
|
||||
// WireLength is the size of the compressed payload data plus gRPC framing.
|
||||
// Does not include HTTP/2 framing.
|
||||
WireLength int
|
||||
|
||||
// RecvTime is the time when the payload is received.
|
||||
RecvTime time.Time
|
||||
}
|
||||
@ -126,12 +144,18 @@ type OutPayload struct {
|
||||
// Client is true if this OutPayload is from client side.
|
||||
Client bool
|
||||
// Payload is the payload with original type.
|
||||
Payload interface{}
|
||||
Payload any
|
||||
// Data is the serialized message payload.
|
||||
Data []byte
|
||||
// Length is the length of uncompressed data.
|
||||
// Length is the size of the uncompressed payload data. Does not include any
|
||||
// framing (gRPC or HTTP/2).
|
||||
Length int
|
||||
// WireLength is the length of data on wire (compressed, signed, encrypted).
|
||||
// CompressedLength is the size of the compressed payload data. Does not
|
||||
// include any framing (gRPC or HTTP/2). Same as Length if compression not
|
||||
// enabled.
|
||||
CompressedLength int
|
||||
// WireLength is the size of the compressed payload data plus gRPC framing.
|
||||
// Does not include HTTP/2 framing.
|
||||
WireLength int
|
||||
// SentTime is the time when the payload is sent.
|
||||
SentTime time.Time
|
||||
|
67
src/runtime/vendor/google.golang.org/grpc/status/status.go
generated
vendored
67
src/runtime/vendor/google.golang.org/grpc/status/status.go
generated
vendored
@ -50,7 +50,7 @@ func New(c codes.Code, msg string) *Status {
|
||||
}
|
||||
|
||||
// Newf returns New(c, fmt.Sprintf(format, a...)).
|
||||
func Newf(c codes.Code, format string, a ...interface{}) *Status {
|
||||
func Newf(c codes.Code, format string, a ...any) *Status {
|
||||
return New(c, fmt.Sprintf(format, a...))
|
||||
}
|
||||
|
||||
@ -60,7 +60,7 @@ func Error(c codes.Code, msg string) error {
|
||||
}
|
||||
|
||||
// Errorf returns Error(c, fmt.Sprintf(format, a...)).
|
||||
func Errorf(c codes.Code, format string, a ...interface{}) error {
|
||||
func Errorf(c codes.Code, format string, a ...any) error {
|
||||
return Error(c, fmt.Sprintf(format, a...))
|
||||
}
|
||||
|
||||
@ -76,22 +76,52 @@ func FromProto(s *spb.Status) *Status {
|
||||
|
||||
// FromError returns a Status representation of err.
|
||||
//
|
||||
// - If err was produced by this package or implements the method `GRPCStatus()
|
||||
// *Status`, the appropriate Status is returned.
|
||||
// - If err was produced by this package or implements the method `GRPCStatus()
|
||||
// *Status` and `GRPCStatus()` does not return nil, or if err wraps a type
|
||||
// satisfying this, the Status from `GRPCStatus()` is returned. For wrapped
|
||||
// errors, the message returned contains the entire err.Error() text and not
|
||||
// just the wrapped status. In that case, ok is true.
|
||||
//
|
||||
// - If err is nil, a Status is returned with codes.OK and no message.
|
||||
// - If err is nil, a Status is returned with codes.OK and no message, and ok
|
||||
// is true.
|
||||
//
|
||||
// - Otherwise, err is an error not compatible with this package. In this
|
||||
// case, a Status is returned with codes.Unknown and err's Error() message,
|
||||
// and ok is false.
|
||||
// - If err implements the method `GRPCStatus() *Status` and `GRPCStatus()`
|
||||
// returns nil (which maps to Codes.OK), or if err wraps a type
|
||||
// satisfying this, a Status is returned with codes.Unknown and err's
|
||||
// Error() message, and ok is false.
|
||||
//
|
||||
// - Otherwise, err is an error not compatible with this package. In this
|
||||
// case, a Status is returned with codes.Unknown and err's Error() message,
|
||||
// and ok is false.
|
||||
func FromError(err error) (s *Status, ok bool) {
|
||||
if err == nil {
|
||||
return nil, true
|
||||
}
|
||||
if se, ok := err.(interface {
|
||||
GRPCStatus() *Status
|
||||
}); ok {
|
||||
return se.GRPCStatus(), true
|
||||
type grpcstatus interface{ GRPCStatus() *Status }
|
||||
if gs, ok := err.(grpcstatus); ok {
|
||||
grpcStatus := gs.GRPCStatus()
|
||||
if grpcStatus == nil {
|
||||
// Error has status nil, which maps to codes.OK. There
|
||||
// is no sensible behavior for this, so we turn it into
|
||||
// an error with codes.Unknown and discard the existing
|
||||
// status.
|
||||
return New(codes.Unknown, err.Error()), false
|
||||
}
|
||||
return grpcStatus, true
|
||||
}
|
||||
var gs grpcstatus
|
||||
if errors.As(err, &gs) {
|
||||
grpcStatus := gs.GRPCStatus()
|
||||
if grpcStatus == nil {
|
||||
// Error wraps an error that has status nil, which maps
|
||||
// to codes.OK. There is no sensible behavior for this,
|
||||
// so we turn it into an error with codes.Unknown and
|
||||
// discard the existing status.
|
||||
return New(codes.Unknown, err.Error()), false
|
||||
}
|
||||
p := grpcStatus.Proto()
|
||||
p.Message = err.Error()
|
||||
return status.FromProto(p), true
|
||||
}
|
||||
return New(codes.Unknown, err.Error()), false
|
||||
}
|
||||
@ -103,19 +133,16 @@ func Convert(err error) *Status {
|
||||
return s
|
||||
}
|
||||
|
||||
// Code returns the Code of the error if it is a Status error, codes.OK if err
|
||||
// is nil, or codes.Unknown otherwise.
|
||||
// Code returns the Code of the error if it is a Status error or if it wraps a
|
||||
// Status error. If that is not the case, it returns codes.OK if err is nil, or
|
||||
// codes.Unknown otherwise.
|
||||
func Code(err error) codes.Code {
|
||||
// Don't use FromError to avoid allocation of OK status.
|
||||
if err == nil {
|
||||
return codes.OK
|
||||
}
|
||||
if se, ok := err.(interface {
|
||||
GRPCStatus() *Status
|
||||
}); ok {
|
||||
return se.GRPCStatus().Code()
|
||||
}
|
||||
return codes.Unknown
|
||||
|
||||
return Convert(err).Code()
|
||||
}
|
||||
|
||||
// FromContextError converts a context error or wrapped context error into a
|
||||
|
411
src/runtime/vendor/google.golang.org/grpc/stream.go
generated
vendored
411
src/runtime/vendor/google.golang.org/grpc/stream.go
generated
vendored
@ -31,6 +31,7 @@ import (
|
||||
"google.golang.org/grpc/balancer"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/encoding"
|
||||
"google.golang.org/grpc/internal"
|
||||
"google.golang.org/grpc/internal/balancerload"
|
||||
"google.golang.org/grpc/internal/binarylog"
|
||||
"google.golang.org/grpc/internal/channelz"
|
||||
@ -39,6 +40,7 @@ import (
|
||||
imetadata "google.golang.org/grpc/internal/metadata"
|
||||
iresolver "google.golang.org/grpc/internal/resolver"
|
||||
"google.golang.org/grpc/internal/serviceconfig"
|
||||
istatus "google.golang.org/grpc/internal/status"
|
||||
"google.golang.org/grpc/internal/transport"
|
||||
"google.golang.org/grpc/metadata"
|
||||
"google.golang.org/grpc/peer"
|
||||
@ -53,7 +55,7 @@ import (
|
||||
// status package, or be one of the context errors. Otherwise, gRPC will use
|
||||
// codes.Unknown as the status code and err.Error() as the status message of the
|
||||
// RPC.
|
||||
type StreamHandler func(srv interface{}, stream ServerStream) error
|
||||
type StreamHandler func(srv any, stream ServerStream) error
|
||||
|
||||
// StreamDesc represents a streaming RPC service's method specification. Used
|
||||
// on the server when registering services and on the client when initiating
|
||||
@ -78,9 +80,9 @@ type Stream interface {
|
||||
// Deprecated: See ClientStream and ServerStream documentation instead.
|
||||
Context() context.Context
|
||||
// Deprecated: See ClientStream and ServerStream documentation instead.
|
||||
SendMsg(m interface{}) error
|
||||
SendMsg(m any) error
|
||||
// Deprecated: See ClientStream and ServerStream documentation instead.
|
||||
RecvMsg(m interface{}) error
|
||||
RecvMsg(m any) error
|
||||
}
|
||||
|
||||
// ClientStream defines the client-side behavior of a streaming RPC.
|
||||
@ -89,7 +91,9 @@ type Stream interface {
|
||||
// status package.
|
||||
type ClientStream interface {
|
||||
// Header returns the header metadata received from the server if there
|
||||
// is any. It blocks if the metadata is not ready to read.
|
||||
// is any. It blocks if the metadata is not ready to read. If the metadata
|
||||
// is nil and the error is also nil, then the stream was terminated without
|
||||
// headers, and the status can be discovered by calling RecvMsg.
|
||||
Header() (metadata.MD, error)
|
||||
// Trailer returns the trailer metadata from the server, if there is any.
|
||||
// It must only be called after stream.CloseAndRecv has returned, or
|
||||
@ -122,7 +126,10 @@ type ClientStream interface {
|
||||
// calling RecvMsg on the same stream at the same time, but it is not safe
|
||||
// to call SendMsg on the same stream in different goroutines. It is also
|
||||
// not safe to call CloseSend concurrently with SendMsg.
|
||||
SendMsg(m interface{}) error
|
||||
//
|
||||
// It is not safe to modify the message after calling SendMsg. Tracing
|
||||
// libraries and stats handlers may use the message lazily.
|
||||
SendMsg(m any) error
|
||||
// RecvMsg blocks until it receives a message into m or the stream is
|
||||
// done. It returns io.EOF when the stream completes successfully. On
|
||||
// any other error, the stream is aborted and the error contains the RPC
|
||||
@ -131,7 +138,7 @@ type ClientStream interface {
|
||||
// It is safe to have a goroutine calling SendMsg and another goroutine
|
||||
// calling RecvMsg on the same stream at the same time, but it is not
|
||||
// safe to call RecvMsg on the same stream in different goroutines.
|
||||
RecvMsg(m interface{}) error
|
||||
RecvMsg(m any) error
|
||||
}
|
||||
|
||||
// NewStream creates a new Stream for the client side. This is typically
|
||||
@ -140,13 +147,13 @@ type ClientStream interface {
|
||||
// To ensure resources are not leaked due to the stream returned, one of the following
|
||||
// actions must be performed:
|
||||
//
|
||||
// 1. Call Close on the ClientConn.
|
||||
// 2. Cancel the context provided.
|
||||
// 3. Call RecvMsg until a non-nil error is returned. A protobuf-generated
|
||||
// client-streaming RPC, for instance, might use the helper function
|
||||
// CloseAndRecv (note that CloseSend does not Recv, therefore is not
|
||||
// guaranteed to release all resources).
|
||||
// 4. Receive a non-nil, non-io.EOF error from Header or SendMsg.
|
||||
// 1. Call Close on the ClientConn.
|
||||
// 2. Cancel the context provided.
|
||||
// 3. Call RecvMsg until a non-nil error is returned. A protobuf-generated
|
||||
// client-streaming RPC, for instance, might use the helper function
|
||||
// CloseAndRecv (note that CloseSend does not Recv, therefore is not
|
||||
// guaranteed to release all resources).
|
||||
// 4. Receive a non-nil, non-io.EOF error from Header or SendMsg.
|
||||
//
|
||||
// If none of the above happen, a goroutine and a context will be leaked, and grpc
|
||||
// will not call the optionally-configured stats handler with a stats.End message.
|
||||
@ -167,10 +174,29 @@ func NewClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, meth
|
||||
}
|
||||
|
||||
func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, opts ...CallOption) (_ ClientStream, err error) {
|
||||
if md, _, ok := metadata.FromOutgoingContextRaw(ctx); ok {
|
||||
// Start tracking the RPC for idleness purposes. This is where a stream is
|
||||
// created for both streaming and unary RPCs, and hence is a good place to
|
||||
// track active RPC count.
|
||||
if err := cc.idlenessMgr.OnCallBegin(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// Add a calloption, to decrement the active call count, that gets executed
|
||||
// when the RPC completes.
|
||||
opts = append([]CallOption{OnFinish(func(error) { cc.idlenessMgr.OnCallEnd() })}, opts...)
|
||||
|
||||
if md, added, ok := metadata.FromOutgoingContextRaw(ctx); ok {
|
||||
// validate md
|
||||
if err := imetadata.Validate(md); err != nil {
|
||||
return nil, status.Error(codes.Internal, err.Error())
|
||||
}
|
||||
// validate added
|
||||
for _, kvs := range added {
|
||||
for i := 0; i < len(kvs); i += 2 {
|
||||
if err := imetadata.ValidatePair(kvs[i], kvs[i+1]); err != nil {
|
||||
return nil, status.Error(codes.Internal, err.Error())
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
if channelz.IsOn() {
|
||||
cc.incrCallsStarted()
|
||||
@ -195,6 +221,13 @@ func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, meth
|
||||
rpcInfo := iresolver.RPCInfo{Context: ctx, Method: method}
|
||||
rpcConfig, err := cc.safeConfigSelector.SelectConfig(rpcInfo)
|
||||
if err != nil {
|
||||
if st, ok := status.FromError(err); ok {
|
||||
// Restrict the code to the list allowed by gRFC A54.
|
||||
if istatus.IsRestrictedControlPlaneCode(st) {
|
||||
err = status.Errorf(codes.Internal, "config selector returned illegal status: %v", err)
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
return nil, toRPCErr(err)
|
||||
}
|
||||
|
||||
@ -301,12 +334,13 @@ func newClientStreamWithParams(ctx context.Context, desc *StreamDesc, cc *Client
|
||||
if !cc.dopts.disableRetry {
|
||||
cs.retryThrottler = cc.retryThrottler.Load().(*retryThrottler)
|
||||
}
|
||||
cs.binlog = binarylog.GetMethodLogger(method)
|
||||
|
||||
cs.attempt, err = cs.newAttemptLocked(false /* isTransparent */)
|
||||
if err != nil {
|
||||
cs.finish(err)
|
||||
return nil, err
|
||||
if ml := binarylog.GetMethodLogger(method); ml != nil {
|
||||
cs.binlogs = append(cs.binlogs, ml)
|
||||
}
|
||||
if cc.dopts.binaryLogger != nil {
|
||||
if ml := cc.dopts.binaryLogger.GetMethodLogger(method); ml != nil {
|
||||
cs.binlogs = append(cs.binlogs, ml)
|
||||
}
|
||||
}
|
||||
|
||||
// Pick the transport to use and create a new stream on the transport.
|
||||
@ -328,7 +362,7 @@ func newClientStreamWithParams(ctx context.Context, desc *StreamDesc, cc *Client
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if cs.binlog != nil {
|
||||
if len(cs.binlogs) != 0 {
|
||||
md, _ := metadata.FromOutgoingContext(ctx)
|
||||
logEntry := &binarylog.ClientHeader{
|
||||
OnClientSide: true,
|
||||
@ -342,7 +376,9 @@ func newClientStreamWithParams(ctx context.Context, desc *StreamDesc, cc *Client
|
||||
logEntry.Timeout = 0
|
||||
}
|
||||
}
|
||||
cs.binlog.Log(logEntry)
|
||||
for _, binlog := range cs.binlogs {
|
||||
binlog.Log(cs.ctx, logEntry)
|
||||
}
|
||||
}
|
||||
|
||||
if desc != unaryStreamDesc {
|
||||
@ -374,9 +410,9 @@ func (cs *clientStream) newAttemptLocked(isTransparent bool) (*csAttempt, error)
|
||||
|
||||
ctx := newContextWithRPCInfo(cs.ctx, cs.callInfo.failFast, cs.callInfo.codec, cs.cp, cs.comp)
|
||||
method := cs.callHdr.Method
|
||||
sh := cs.cc.dopts.copts.StatsHandler
|
||||
var beginTime time.Time
|
||||
if sh != nil {
|
||||
shs := cs.cc.dopts.copts.StatsHandlers
|
||||
for _, sh := range shs {
|
||||
ctx = sh.TagRPC(ctx, &stats.RPCTagInfo{FullMethodName: method, FailFast: cs.callInfo.failFast})
|
||||
beginTime = time.Now()
|
||||
begin := &stats.Begin{
|
||||
@ -405,7 +441,7 @@ func (cs *clientStream) newAttemptLocked(isTransparent bool) (*csAttempt, error)
|
||||
ctx = trace.NewContext(ctx, trInfo.tr)
|
||||
}
|
||||
|
||||
if cs.cc.parsedTarget.Scheme == "xds" {
|
||||
if cs.cc.parsedTarget.URL.Scheme == internal.GRPCResolverSchemeExtraMetadata {
|
||||
// Add extra metadata (metadata that will be added by transport) to context
|
||||
// so the balancer can see them.
|
||||
ctx = grpcutil.WithExtraMetadata(ctx, metadata.Pairs(
|
||||
@ -414,12 +450,12 @@ func (cs *clientStream) newAttemptLocked(isTransparent bool) (*csAttempt, error)
|
||||
}
|
||||
|
||||
return &csAttempt{
|
||||
ctx: ctx,
|
||||
beginTime: beginTime,
|
||||
cs: cs,
|
||||
dc: cs.cc.dopts.dc,
|
||||
statsHandler: sh,
|
||||
trInfo: trInfo,
|
||||
ctx: ctx,
|
||||
beginTime: beginTime,
|
||||
cs: cs,
|
||||
dc: cs.cc.dopts.dc,
|
||||
statsHandlers: shs,
|
||||
trInfo: trInfo,
|
||||
}, nil
|
||||
}
|
||||
|
||||
@ -427,7 +463,7 @@ func (a *csAttempt) getTransport() error {
|
||||
cs := a.cs
|
||||
|
||||
var err error
|
||||
a.t, a.done, err = cs.cc.getTransport(a.ctx, cs.callInfo.failFast, cs.callHdr.Method)
|
||||
a.t, a.pickResult, err = cs.cc.getTransport(a.ctx, cs.callInfo.failFast, cs.callHdr.Method)
|
||||
if err != nil {
|
||||
if de, ok := err.(dropError); ok {
|
||||
err = de.error
|
||||
@ -444,6 +480,25 @@ func (a *csAttempt) getTransport() error {
|
||||
func (a *csAttempt) newStream() error {
|
||||
cs := a.cs
|
||||
cs.callHdr.PreviousAttempts = cs.numRetries
|
||||
|
||||
// Merge metadata stored in PickResult, if any, with existing call metadata.
|
||||
// It is safe to overwrite the csAttempt's context here, since all state
|
||||
// maintained in it are local to the attempt. When the attempt has to be
|
||||
// retried, a new instance of csAttempt will be created.
|
||||
if a.pickResult.Metadata != nil {
|
||||
// We currently do not have a function it the metadata package which
|
||||
// merges given metadata with existing metadata in a context. Existing
|
||||
// function `AppendToOutgoingContext()` takes a variadic argument of key
|
||||
// value pairs.
|
||||
//
|
||||
// TODO: Make it possible to retrieve key value pairs from metadata.MD
|
||||
// in a form passable to AppendToOutgoingContext(), or create a version
|
||||
// of AppendToOutgoingContext() that accepts a metadata.MD.
|
||||
md, _ := metadata.FromOutgoingContext(a.ctx)
|
||||
md = metadata.Join(md, a.pickResult.Metadata)
|
||||
a.ctx = metadata.NewOutgoingContext(a.ctx, md)
|
||||
}
|
||||
|
||||
s, err := a.t.NewStream(a.ctx, cs.callHdr)
|
||||
if err != nil {
|
||||
nse, ok := err.(*transport.NewStreamError)
|
||||
@ -460,7 +515,7 @@ func (a *csAttempt) newStream() error {
|
||||
return toRPCErr(nse.Err)
|
||||
}
|
||||
a.s = s
|
||||
a.p = &parser{r: s}
|
||||
a.p = &parser{r: s, recvBufferPool: a.cs.cc.dopts.recvBufferPool}
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -486,7 +541,7 @@ type clientStream struct {
|
||||
|
||||
retryThrottler *retryThrottler // The throttler active when the RPC began.
|
||||
|
||||
binlog binarylog.MethodLogger // Binary logger, can be nil.
|
||||
binlogs []binarylog.MethodLogger
|
||||
// serverHeaderBinlogged is a boolean for whether server header has been
|
||||
// logged. Server header will be logged when the first time one of those
|
||||
// happens: stream.Header(), stream.Recv().
|
||||
@ -518,12 +573,12 @@ type clientStream struct {
|
||||
// csAttempt implements a single transport stream attempt within a
|
||||
// clientStream.
|
||||
type csAttempt struct {
|
||||
ctx context.Context
|
||||
cs *clientStream
|
||||
t transport.ClientTransport
|
||||
s *transport.Stream
|
||||
p *parser
|
||||
done func(balancer.DoneInfo)
|
||||
ctx context.Context
|
||||
cs *clientStream
|
||||
t transport.ClientTransport
|
||||
s *transport.Stream
|
||||
p *parser
|
||||
pickResult balancer.PickResult
|
||||
|
||||
finished bool
|
||||
dc Decompressor
|
||||
@ -536,8 +591,8 @@ type csAttempt struct {
|
||||
// and cleared when the finish method is called.
|
||||
trInfo *traceInfo
|
||||
|
||||
statsHandler stats.Handler
|
||||
beginTime time.Time
|
||||
statsHandlers []stats.Handler
|
||||
beginTime time.Time
|
||||
|
||||
// set for newStream errors that may be transparently retried
|
||||
allowTransparentRetry bool
|
||||
@ -704,6 +759,18 @@ func (cs *clientStream) withRetry(op func(a *csAttempt) error, onSuccess func())
|
||||
// already be status errors.
|
||||
return toRPCErr(op(cs.attempt))
|
||||
}
|
||||
if len(cs.buffer) == 0 {
|
||||
// For the first op, which controls creation of the stream and
|
||||
// assigns cs.attempt, we need to create a new attempt inline
|
||||
// before executing the first op. On subsequent ops, the attempt
|
||||
// is created immediately before replaying the ops.
|
||||
var err error
|
||||
if cs.attempt, err = cs.newAttemptLocked(false /* isTransparent */); err != nil {
|
||||
cs.mu.Unlock()
|
||||
cs.finish(err)
|
||||
return err
|
||||
}
|
||||
}
|
||||
a := cs.attempt
|
||||
cs.mu.Unlock()
|
||||
err := op(a)
|
||||
@ -734,12 +801,21 @@ func (cs *clientStream) Header() (metadata.MD, error) {
|
||||
m, err = a.s.Header()
|
||||
return toRPCErr(err)
|
||||
}, cs.commitAttemptLocked)
|
||||
|
||||
if m == nil && err == nil {
|
||||
// The stream ended with success. Finish the clientStream.
|
||||
err = io.EOF
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
cs.finish(err)
|
||||
return nil, err
|
||||
// Do not return the error. The user should get it by calling Recv().
|
||||
return nil, nil
|
||||
}
|
||||
if cs.binlog != nil && !cs.serverHeaderBinlogged {
|
||||
// Only log if binary log is on and header has not been logged.
|
||||
|
||||
if len(cs.binlogs) != 0 && !cs.serverHeaderBinlogged && m != nil {
|
||||
// Only log if binary log is on and header has not been logged, and
|
||||
// there is actually headers to log.
|
||||
logEntry := &binarylog.ServerHeader{
|
||||
OnClientSide: true,
|
||||
Header: m,
|
||||
@ -748,9 +824,12 @@ func (cs *clientStream) Header() (metadata.MD, error) {
|
||||
if peer, ok := peer.FromContext(cs.Context()); ok {
|
||||
logEntry.PeerAddr = peer.Addr
|
||||
}
|
||||
cs.binlog.Log(logEntry)
|
||||
cs.serverHeaderBinlogged = true
|
||||
for _, binlog := range cs.binlogs {
|
||||
binlog.Log(cs.ctx, logEntry)
|
||||
}
|
||||
}
|
||||
|
||||
return m, nil
|
||||
}
|
||||
|
||||
@ -791,7 +870,7 @@ func (cs *clientStream) bufferForRetryLocked(sz int, op func(a *csAttempt) error
|
||||
cs.buffer = append(cs.buffer, op)
|
||||
}
|
||||
|
||||
func (cs *clientStream) SendMsg(m interface{}) (err error) {
|
||||
func (cs *clientStream) SendMsg(m any) (err error) {
|
||||
defer func() {
|
||||
if err != nil && err != io.EOF {
|
||||
// Call finish on the client stream for errors generated by this SendMsg
|
||||
@ -823,52 +902,42 @@ func (cs *clientStream) SendMsg(m interface{}) (err error) {
|
||||
return a.sendMsg(m, hdr, payload, data)
|
||||
}
|
||||
err = cs.withRetry(op, func() { cs.bufferForRetryLocked(len(hdr)+len(payload), op) })
|
||||
if cs.binlog != nil && err == nil {
|
||||
cs.binlog.Log(&binarylog.ClientMessage{
|
||||
if len(cs.binlogs) != 0 && err == nil {
|
||||
cm := &binarylog.ClientMessage{
|
||||
OnClientSide: true,
|
||||
Message: data,
|
||||
})
|
||||
}
|
||||
for _, binlog := range cs.binlogs {
|
||||
binlog.Log(cs.ctx, cm)
|
||||
}
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func (cs *clientStream) RecvMsg(m interface{}) error {
|
||||
if cs.binlog != nil && !cs.serverHeaderBinlogged {
|
||||
func (cs *clientStream) RecvMsg(m any) error {
|
||||
if len(cs.binlogs) != 0 && !cs.serverHeaderBinlogged {
|
||||
// Call Header() to binary log header if it's not already logged.
|
||||
cs.Header()
|
||||
}
|
||||
var recvInfo *payloadInfo
|
||||
if cs.binlog != nil {
|
||||
if len(cs.binlogs) != 0 {
|
||||
recvInfo = &payloadInfo{}
|
||||
}
|
||||
err := cs.withRetry(func(a *csAttempt) error {
|
||||
return a.recvMsg(m, recvInfo)
|
||||
}, cs.commitAttemptLocked)
|
||||
if cs.binlog != nil && err == nil {
|
||||
cs.binlog.Log(&binarylog.ServerMessage{
|
||||
if len(cs.binlogs) != 0 && err == nil {
|
||||
sm := &binarylog.ServerMessage{
|
||||
OnClientSide: true,
|
||||
Message: recvInfo.uncompressedBytes,
|
||||
})
|
||||
}
|
||||
for _, binlog := range cs.binlogs {
|
||||
binlog.Log(cs.ctx, sm)
|
||||
}
|
||||
}
|
||||
if err != nil || !cs.desc.ServerStreams {
|
||||
// err != nil or non-server-streaming indicates end of stream.
|
||||
cs.finish(err)
|
||||
|
||||
if cs.binlog != nil {
|
||||
// finish will not log Trailer. Log Trailer here.
|
||||
logEntry := &binarylog.ServerTrailer{
|
||||
OnClientSide: true,
|
||||
Trailer: cs.Trailer(),
|
||||
Err: err,
|
||||
}
|
||||
if logEntry.Err == io.EOF {
|
||||
logEntry.Err = nil
|
||||
}
|
||||
if peer, ok := peer.FromContext(cs.Context()); ok {
|
||||
logEntry.PeerAddr = peer.Addr
|
||||
}
|
||||
cs.binlog.Log(logEntry)
|
||||
}
|
||||
}
|
||||
return err
|
||||
}
|
||||
@ -888,10 +957,13 @@ func (cs *clientStream) CloseSend() error {
|
||||
return nil
|
||||
}
|
||||
cs.withRetry(op, func() { cs.bufferForRetryLocked(0, op) })
|
||||
if cs.binlog != nil {
|
||||
cs.binlog.Log(&binarylog.ClientHalfClose{
|
||||
if len(cs.binlogs) != 0 {
|
||||
chc := &binarylog.ClientHalfClose{
|
||||
OnClientSide: true,
|
||||
})
|
||||
}
|
||||
for _, binlog := range cs.binlogs {
|
||||
binlog.Log(cs.ctx, chc)
|
||||
}
|
||||
}
|
||||
// We never returned an error here for reasons.
|
||||
return nil
|
||||
@ -908,6 +980,9 @@ func (cs *clientStream) finish(err error) {
|
||||
return
|
||||
}
|
||||
cs.finished = true
|
||||
for _, onFinish := range cs.callInfo.onFinish {
|
||||
onFinish(err)
|
||||
}
|
||||
cs.commitAttemptLocked()
|
||||
if cs.attempt != nil {
|
||||
cs.attempt.finish(err)
|
||||
@ -918,16 +993,31 @@ func (cs *clientStream) finish(err error) {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
cs.mu.Unlock()
|
||||
// For binary logging. only log cancel in finish (could be caused by RPC ctx
|
||||
// canceled or ClientConn closed). Trailer will be logged in RecvMsg.
|
||||
//
|
||||
// Only one of cancel or trailer needs to be logged. In the cases where
|
||||
// users don't call RecvMsg, users must have already canceled the RPC.
|
||||
if cs.binlog != nil && status.Code(err) == codes.Canceled {
|
||||
cs.binlog.Log(&binarylog.Cancel{
|
||||
OnClientSide: true,
|
||||
})
|
||||
// Only one of cancel or trailer needs to be logged.
|
||||
if len(cs.binlogs) != 0 {
|
||||
switch err {
|
||||
case errContextCanceled, errContextDeadline, ErrClientConnClosing:
|
||||
c := &binarylog.Cancel{
|
||||
OnClientSide: true,
|
||||
}
|
||||
for _, binlog := range cs.binlogs {
|
||||
binlog.Log(cs.ctx, c)
|
||||
}
|
||||
default:
|
||||
logEntry := &binarylog.ServerTrailer{
|
||||
OnClientSide: true,
|
||||
Trailer: cs.Trailer(),
|
||||
Err: err,
|
||||
}
|
||||
if peer, ok := peer.FromContext(cs.Context()); ok {
|
||||
logEntry.PeerAddr = peer.Addr
|
||||
}
|
||||
for _, binlog := range cs.binlogs {
|
||||
binlog.Log(cs.ctx, logEntry)
|
||||
}
|
||||
}
|
||||
}
|
||||
if err == nil {
|
||||
cs.retryThrottler.successfulRPC()
|
||||
@ -942,7 +1032,7 @@ func (cs *clientStream) finish(err error) {
|
||||
cs.cancel()
|
||||
}
|
||||
|
||||
func (a *csAttempt) sendMsg(m interface{}, hdr, payld, data []byte) error {
|
||||
func (a *csAttempt) sendMsg(m any, hdr, payld, data []byte) error {
|
||||
cs := a.cs
|
||||
if a.trInfo != nil {
|
||||
a.mu.Lock()
|
||||
@ -960,8 +1050,8 @@ func (a *csAttempt) sendMsg(m interface{}, hdr, payld, data []byte) error {
|
||||
}
|
||||
return io.EOF
|
||||
}
|
||||
if a.statsHandler != nil {
|
||||
a.statsHandler.HandleRPC(a.ctx, outPayload(true, m, data, payld, time.Now()))
|
||||
for _, sh := range a.statsHandlers {
|
||||
sh.HandleRPC(a.ctx, outPayload(true, m, data, payld, time.Now()))
|
||||
}
|
||||
if channelz.IsOn() {
|
||||
a.t.IncrMsgSent()
|
||||
@ -969,9 +1059,9 @@ func (a *csAttempt) sendMsg(m interface{}, hdr, payld, data []byte) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (a *csAttempt) recvMsg(m interface{}, payInfo *payloadInfo) (err error) {
|
||||
func (a *csAttempt) recvMsg(m any, payInfo *payloadInfo) (err error) {
|
||||
cs := a.cs
|
||||
if a.statsHandler != nil && payInfo == nil {
|
||||
if len(a.statsHandlers) != 0 && payInfo == nil {
|
||||
payInfo = &payloadInfo{}
|
||||
}
|
||||
|
||||
@ -999,6 +1089,7 @@ func (a *csAttempt) recvMsg(m interface{}, payInfo *payloadInfo) (err error) {
|
||||
}
|
||||
return io.EOF // indicates successful end of stream.
|
||||
}
|
||||
|
||||
return toRPCErr(err)
|
||||
}
|
||||
if a.trInfo != nil {
|
||||
@ -1008,15 +1099,16 @@ func (a *csAttempt) recvMsg(m interface{}, payInfo *payloadInfo) (err error) {
|
||||
}
|
||||
a.mu.Unlock()
|
||||
}
|
||||
if a.statsHandler != nil {
|
||||
a.statsHandler.HandleRPC(a.ctx, &stats.InPayload{
|
||||
for _, sh := range a.statsHandlers {
|
||||
sh.HandleRPC(a.ctx, &stats.InPayload{
|
||||
Client: true,
|
||||
RecvTime: time.Now(),
|
||||
Payload: m,
|
||||
// TODO truncate large payload.
|
||||
Data: payInfo.uncompressedBytes,
|
||||
WireLength: payInfo.wireLength + headerLen,
|
||||
Length: len(payInfo.uncompressedBytes),
|
||||
Data: payInfo.uncompressedBytes,
|
||||
WireLength: payInfo.compressedLength + headerLen,
|
||||
CompressedLength: payInfo.compressedLength,
|
||||
Length: len(payInfo.uncompressedBytes),
|
||||
})
|
||||
}
|
||||
if channelz.IsOn() {
|
||||
@ -1055,12 +1147,12 @@ func (a *csAttempt) finish(err error) {
|
||||
tr = a.s.Trailer()
|
||||
}
|
||||
|
||||
if a.done != nil {
|
||||
if a.pickResult.Done != nil {
|
||||
br := false
|
||||
if a.s != nil {
|
||||
br = a.s.BytesReceived()
|
||||
}
|
||||
a.done(balancer.DoneInfo{
|
||||
a.pickResult.Done(balancer.DoneInfo{
|
||||
Err: err,
|
||||
Trailer: tr,
|
||||
BytesSent: a.s != nil,
|
||||
@ -1068,7 +1160,7 @@ func (a *csAttempt) finish(err error) {
|
||||
ServerLoad: balancerload.Parse(tr),
|
||||
})
|
||||
}
|
||||
if a.statsHandler != nil {
|
||||
for _, sh := range a.statsHandlers {
|
||||
end := &stats.End{
|
||||
Client: true,
|
||||
BeginTime: a.beginTime,
|
||||
@ -1076,7 +1168,7 @@ func (a *csAttempt) finish(err error) {
|
||||
Trailer: tr,
|
||||
Error: err,
|
||||
}
|
||||
a.statsHandler.HandleRPC(a.ctx, end)
|
||||
sh.HandleRPC(a.ctx, end)
|
||||
}
|
||||
if a.trInfo != nil && a.trInfo.tr != nil {
|
||||
if err == nil {
|
||||
@ -1182,17 +1274,22 @@ func newNonRetryClientStream(ctx context.Context, desc *StreamDesc, method strin
|
||||
return nil, err
|
||||
}
|
||||
as.s = s
|
||||
as.p = &parser{r: s}
|
||||
as.p = &parser{r: s, recvBufferPool: ac.dopts.recvBufferPool}
|
||||
ac.incrCallsStarted()
|
||||
if desc != unaryStreamDesc {
|
||||
// Listen on cc and stream contexts to cleanup when the user closes the
|
||||
// ClientConn or cancels the stream context. In all other cases, an error
|
||||
// should already be injected into the recv buffer by the transport, which
|
||||
// the client will eventually receive, and then we will cancel the stream's
|
||||
// context in clientStream.finish.
|
||||
// Listen on stream context to cleanup when the stream context is
|
||||
// canceled. Also listen for the addrConn's context in case the
|
||||
// addrConn is closed or reconnects to a different address. In all
|
||||
// other cases, an error should already be injected into the recv
|
||||
// buffer by the transport, which the client will eventually receive,
|
||||
// and then we will cancel the stream's context in
|
||||
// addrConnStream.finish.
|
||||
go func() {
|
||||
ac.mu.Lock()
|
||||
acCtx := ac.ctx
|
||||
ac.mu.Unlock()
|
||||
select {
|
||||
case <-ac.ctx.Done():
|
||||
case <-acCtx.Done():
|
||||
as.finish(status.Error(codes.Canceled, "grpc: the SubConn is closing"))
|
||||
case <-ctx.Done():
|
||||
as.finish(toRPCErr(ctx.Err()))
|
||||
@ -1255,7 +1352,7 @@ func (as *addrConnStream) Context() context.Context {
|
||||
return as.s.Context()
|
||||
}
|
||||
|
||||
func (as *addrConnStream) SendMsg(m interface{}) (err error) {
|
||||
func (as *addrConnStream) SendMsg(m any) (err error) {
|
||||
defer func() {
|
||||
if err != nil && err != io.EOF {
|
||||
// Call finish on the client stream for errors generated by this SendMsg
|
||||
@ -1300,7 +1397,7 @@ func (as *addrConnStream) SendMsg(m interface{}) (err error) {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (as *addrConnStream) RecvMsg(m interface{}) (err error) {
|
||||
func (as *addrConnStream) RecvMsg(m any) (err error) {
|
||||
defer func() {
|
||||
if err != nil || !as.desc.ServerStreams {
|
||||
// err != nil or non-server-streaming indicates end of stream.
|
||||
@ -1416,7 +1513,10 @@ type ServerStream interface {
|
||||
// It is safe to have a goroutine calling SendMsg and another goroutine
|
||||
// calling RecvMsg on the same stream at the same time, but it is not safe
|
||||
// to call SendMsg on the same stream in different goroutines.
|
||||
SendMsg(m interface{}) error
|
||||
//
|
||||
// It is not safe to modify the message after calling SendMsg. Tracing
|
||||
// libraries and stats handlers may use the message lazily.
|
||||
SendMsg(m any) error
|
||||
// RecvMsg blocks until it receives a message into m or the stream is
|
||||
// done. It returns io.EOF when the client has performed a CloseSend. On
|
||||
// any non-EOF error, the stream is aborted and the error contains the
|
||||
@ -1425,7 +1525,7 @@ type ServerStream interface {
|
||||
// It is safe to have a goroutine calling SendMsg and another goroutine
|
||||
// calling RecvMsg on the same stream at the same time, but it is not
|
||||
// safe to call RecvMsg on the same stream in different goroutines.
|
||||
RecvMsg(m interface{}) error
|
||||
RecvMsg(m any) error
|
||||
}
|
||||
|
||||
// serverStream implements a server side Stream.
|
||||
@ -1441,13 +1541,15 @@ type serverStream struct {
|
||||
comp encoding.Compressor
|
||||
decomp encoding.Compressor
|
||||
|
||||
sendCompressorName string
|
||||
|
||||
maxReceiveMessageSize int
|
||||
maxSendMessageSize int
|
||||
trInfo *traceInfo
|
||||
|
||||
statsHandler stats.Handler
|
||||
statsHandler []stats.Handler
|
||||
|
||||
binlog binarylog.MethodLogger
|
||||
binlogs []binarylog.MethodLogger
|
||||
// serverHeaderBinlogged indicates whether server header has been logged. It
|
||||
// will happen when one of the following two happens: stream.SendHeader(),
|
||||
// stream.Send().
|
||||
@ -1481,12 +1583,15 @@ func (ss *serverStream) SendHeader(md metadata.MD) error {
|
||||
}
|
||||
|
||||
err = ss.t.WriteHeader(ss.s, md)
|
||||
if ss.binlog != nil && !ss.serverHeaderBinlogged {
|
||||
if len(ss.binlogs) != 0 && !ss.serverHeaderBinlogged {
|
||||
h, _ := ss.s.Header()
|
||||
ss.binlog.Log(&binarylog.ServerHeader{
|
||||
sh := &binarylog.ServerHeader{
|
||||
Header: h,
|
||||
})
|
||||
}
|
||||
ss.serverHeaderBinlogged = true
|
||||
for _, binlog := range ss.binlogs {
|
||||
binlog.Log(ss.ctx, sh)
|
||||
}
|
||||
}
|
||||
return err
|
||||
}
|
||||
@ -1501,7 +1606,7 @@ func (ss *serverStream) SetTrailer(md metadata.MD) {
|
||||
ss.s.SetTrailer(md)
|
||||
}
|
||||
|
||||
func (ss *serverStream) SendMsg(m interface{}) (err error) {
|
||||
func (ss *serverStream) SendMsg(m any) (err error) {
|
||||
defer func() {
|
||||
if ss.trInfo != nil {
|
||||
ss.mu.Lock()
|
||||
@ -1509,7 +1614,7 @@ func (ss *serverStream) SendMsg(m interface{}) (err error) {
|
||||
if err == nil {
|
||||
ss.trInfo.tr.LazyLog(&payload{sent: true, msg: m}, true)
|
||||
} else {
|
||||
ss.trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true)
|
||||
ss.trInfo.tr.LazyLog(&fmtStringer{"%v", []any{err}}, true)
|
||||
ss.trInfo.tr.SetError()
|
||||
}
|
||||
}
|
||||
@ -1530,6 +1635,13 @@ func (ss *serverStream) SendMsg(m interface{}) (err error) {
|
||||
}
|
||||
}()
|
||||
|
||||
// Server handler could have set new compressor by calling SetSendCompressor.
|
||||
// In case it is set, we need to use it for compressing outbound message.
|
||||
if sendCompressorsName := ss.s.SendCompress(); sendCompressorsName != ss.sendCompressorName {
|
||||
ss.comp = encoding.GetCompressor(sendCompressorsName)
|
||||
ss.sendCompressorName = sendCompressorsName
|
||||
}
|
||||
|
||||
// load hdr, payload, data
|
||||
hdr, payload, data, err := prepareMsg(m, ss.codec, ss.cp, ss.comp)
|
||||
if err != nil {
|
||||
@ -1543,25 +1655,33 @@ func (ss *serverStream) SendMsg(m interface{}) (err error) {
|
||||
if err := ss.t.Write(ss.s, hdr, payload, &transport.Options{Last: false}); err != nil {
|
||||
return toRPCErr(err)
|
||||
}
|
||||
if ss.binlog != nil {
|
||||
if len(ss.binlogs) != 0 {
|
||||
if !ss.serverHeaderBinlogged {
|
||||
h, _ := ss.s.Header()
|
||||
ss.binlog.Log(&binarylog.ServerHeader{
|
||||
sh := &binarylog.ServerHeader{
|
||||
Header: h,
|
||||
})
|
||||
}
|
||||
ss.serverHeaderBinlogged = true
|
||||
for _, binlog := range ss.binlogs {
|
||||
binlog.Log(ss.ctx, sh)
|
||||
}
|
||||
}
|
||||
ss.binlog.Log(&binarylog.ServerMessage{
|
||||
sm := &binarylog.ServerMessage{
|
||||
Message: data,
|
||||
})
|
||||
}
|
||||
for _, binlog := range ss.binlogs {
|
||||
binlog.Log(ss.ctx, sm)
|
||||
}
|
||||
}
|
||||
if ss.statsHandler != nil {
|
||||
ss.statsHandler.HandleRPC(ss.s.Context(), outPayload(false, m, data, payload, time.Now()))
|
||||
if len(ss.statsHandler) != 0 {
|
||||
for _, sh := range ss.statsHandler {
|
||||
sh.HandleRPC(ss.s.Context(), outPayload(false, m, data, payload, time.Now()))
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ss *serverStream) RecvMsg(m interface{}) (err error) {
|
||||
func (ss *serverStream) RecvMsg(m any) (err error) {
|
||||
defer func() {
|
||||
if ss.trInfo != nil {
|
||||
ss.mu.Lock()
|
||||
@ -1569,7 +1689,7 @@ func (ss *serverStream) RecvMsg(m interface{}) (err error) {
|
||||
if err == nil {
|
||||
ss.trInfo.tr.LazyLog(&payload{sent: false, msg: m}, true)
|
||||
} else if err != io.EOF {
|
||||
ss.trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true)
|
||||
ss.trInfo.tr.LazyLog(&fmtStringer{"%v", []any{err}}, true)
|
||||
ss.trInfo.tr.SetError()
|
||||
}
|
||||
}
|
||||
@ -1590,13 +1710,16 @@ func (ss *serverStream) RecvMsg(m interface{}) (err error) {
|
||||
}
|
||||
}()
|
||||
var payInfo *payloadInfo
|
||||
if ss.statsHandler != nil || ss.binlog != nil {
|
||||
if len(ss.statsHandler) != 0 || len(ss.binlogs) != 0 {
|
||||
payInfo = &payloadInfo{}
|
||||
}
|
||||
if err := recv(ss.p, ss.codec, ss.s, ss.dc, m, ss.maxReceiveMessageSize, payInfo, ss.decomp); err != nil {
|
||||
if err == io.EOF {
|
||||
if ss.binlog != nil {
|
||||
ss.binlog.Log(&binarylog.ClientHalfClose{})
|
||||
if len(ss.binlogs) != 0 {
|
||||
chc := &binarylog.ClientHalfClose{}
|
||||
for _, binlog := range ss.binlogs {
|
||||
binlog.Log(ss.ctx, chc)
|
||||
}
|
||||
}
|
||||
return err
|
||||
}
|
||||
@ -1605,20 +1728,26 @@ func (ss *serverStream) RecvMsg(m interface{}) (err error) {
|
||||
}
|
||||
return toRPCErr(err)
|
||||
}
|
||||
if ss.statsHandler != nil {
|
||||
ss.statsHandler.HandleRPC(ss.s.Context(), &stats.InPayload{
|
||||
RecvTime: time.Now(),
|
||||
Payload: m,
|
||||
// TODO truncate large payload.
|
||||
Data: payInfo.uncompressedBytes,
|
||||
WireLength: payInfo.wireLength + headerLen,
|
||||
Length: len(payInfo.uncompressedBytes),
|
||||
})
|
||||
if len(ss.statsHandler) != 0 {
|
||||
for _, sh := range ss.statsHandler {
|
||||
sh.HandleRPC(ss.s.Context(), &stats.InPayload{
|
||||
RecvTime: time.Now(),
|
||||
Payload: m,
|
||||
// TODO truncate large payload.
|
||||
Data: payInfo.uncompressedBytes,
|
||||
Length: len(payInfo.uncompressedBytes),
|
||||
WireLength: payInfo.compressedLength + headerLen,
|
||||
CompressedLength: payInfo.compressedLength,
|
||||
})
|
||||
}
|
||||
}
|
||||
if ss.binlog != nil {
|
||||
ss.binlog.Log(&binarylog.ClientMessage{
|
||||
if len(ss.binlogs) != 0 {
|
||||
cm := &binarylog.ClientMessage{
|
||||
Message: payInfo.uncompressedBytes,
|
||||
})
|
||||
}
|
||||
for _, binlog := range ss.binlogs {
|
||||
binlog.Log(ss.ctx, cm)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@ -1632,7 +1761,7 @@ func MethodFromServerStream(stream ServerStream) (string, bool) {
|
||||
// prepareMsg returns the hdr, payload and data
|
||||
// using the compressors passed or using the
|
||||
// passed preparedmsg
|
||||
func prepareMsg(m interface{}, codec baseCodec, cp Compressor, comp encoding.Compressor) (hdr, payload, data []byte, err error) {
|
||||
func prepareMsg(m any, codec baseCodec, cp Compressor, comp encoding.Compressor) (hdr, payload, data []byte, err error) {
|
||||
if preparedMsg, ok := m.(*PreparedMsg); ok {
|
||||
return preparedMsg.hdr, preparedMsg.payload, preparedMsg.encodedData, nil
|
||||
}
|
||||
|
2
src/runtime/vendor/google.golang.org/grpc/tap/tap.go
generated
vendored
2
src/runtime/vendor/google.golang.org/grpc/tap/tap.go
generated
vendored
@ -19,7 +19,7 @@
|
||||
// Package tap defines the function handles which are executed on the transport
|
||||
// layer of gRPC-Go and related information.
|
||||
//
|
||||
// Experimental
|
||||
// # Experimental
|
||||
//
|
||||
// Notice: This API is EXPERIMENTAL and may be changed or removed in a
|
||||
// later release.
|
||||
|
6
src/runtime/vendor/google.golang.org/grpc/trace.go
generated
vendored
6
src/runtime/vendor/google.golang.org/grpc/trace.go
generated
vendored
@ -97,8 +97,8 @@ func truncate(x string, l int) string {
|
||||
|
||||
// payload represents an RPC request or response payload.
|
||||
type payload struct {
|
||||
sent bool // whether this is an outgoing payload
|
||||
msg interface{} // e.g. a proto.Message
|
||||
sent bool // whether this is an outgoing payload
|
||||
msg any // e.g. a proto.Message
|
||||
// TODO(dsymonds): add stringifying info to codec, and limit how much we hold here?
|
||||
}
|
||||
|
||||
@ -111,7 +111,7 @@ func (p payload) String() string {
|
||||
|
||||
type fmtStringer struct {
|
||||
format string
|
||||
a []interface{}
|
||||
a []any
|
||||
}
|
||||
|
||||
func (f *fmtStringer) String() string {
|
||||
|
2
src/runtime/vendor/google.golang.org/grpc/version.go
generated
vendored
2
src/runtime/vendor/google.golang.org/grpc/version.go
generated
vendored
@ -19,4 +19,4 @@
|
||||
package grpc
|
||||
|
||||
// Version is the current grpc version.
|
||||
const Version = "1.47.0"
|
||||
const Version = "1.58.3"
|
||||
|
46
src/runtime/vendor/google.golang.org/grpc/vet.sh
generated
vendored
46
src/runtime/vendor/google.golang.org/grpc/vet.sh
generated
vendored
@ -41,16 +41,8 @@ if [[ "$1" = "-install" ]]; then
|
||||
github.com/client9/misspell/cmd/misspell
|
||||
popd
|
||||
if [[ -z "${VET_SKIP_PROTO}" ]]; then
|
||||
if [[ "${TRAVIS}" = "true" ]]; then
|
||||
PROTOBUF_VERSION=3.14.0
|
||||
PROTOC_FILENAME=protoc-${PROTOBUF_VERSION}-linux-x86_64.zip
|
||||
pushd /home/travis
|
||||
wget https://github.com/google/protobuf/releases/download/v${PROTOBUF_VERSION}/${PROTOC_FILENAME}
|
||||
unzip ${PROTOC_FILENAME}
|
||||
bin/protoc --version
|
||||
popd
|
||||
elif [[ "${GITHUB_ACTIONS}" = "true" ]]; then
|
||||
PROTOBUF_VERSION=3.14.0
|
||||
if [[ "${GITHUB_ACTIONS}" = "true" ]]; then
|
||||
PROTOBUF_VERSION=22.0 # a.k.a v4.22.0 in pb.go files.
|
||||
PROTOC_FILENAME=protoc-${PROTOBUF_VERSION}-linux-x86_64.zip
|
||||
pushd /home/runner/go
|
||||
wget https://github.com/google/protobuf/releases/download/v${PROTOBUF_VERSION}/${PROTOC_FILENAME}
|
||||
@ -66,8 +58,20 @@ elif [[ "$#" -ne 0 ]]; then
|
||||
die "Unknown argument(s): $*"
|
||||
fi
|
||||
|
||||
# - Check that generated proto files are up to date.
|
||||
if [[ -z "${VET_SKIP_PROTO}" ]]; then
|
||||
make proto && git status --porcelain 2>&1 | fail_on_output || \
|
||||
(git status; git --no-pager diff; exit 1)
|
||||
fi
|
||||
|
||||
if [[ -n "${VET_ONLY_PROTO}" ]]; then
|
||||
exit 0
|
||||
fi
|
||||
|
||||
# - Ensure all source files contain a copyright message.
|
||||
not git grep -L "\(Copyright [0-9]\{4,\} gRPC authors\)\|DO NOT EDIT" -- '*.go'
|
||||
# (Done in two parts because Darwin "git grep" has broken support for compound
|
||||
# exclusion matches.)
|
||||
(grep -L "DO NOT EDIT" $(git grep -L "\(Copyright [0-9]\{4,\} gRPC authors\)" -- '*.go') || true) | fail_on_output
|
||||
|
||||
# - Make sure all tests in grpc and grpc/test use leakcheck via Teardown.
|
||||
not grep 'func Test[^(]' *_test.go
|
||||
@ -80,8 +84,11 @@ not git grep -l 'x/net/context' -- "*.go"
|
||||
# thread safety.
|
||||
git grep -l '"math/rand"' -- "*.go" 2>&1 | not grep -v '^examples\|^stress\|grpcrand\|^benchmark\|wrr_test'
|
||||
|
||||
# - Do not use "interface{}"; use "any" instead.
|
||||
git grep -l 'interface{}' -- "*.go" 2>&1 | not grep -v '\.pb\.go\|protoc-gen-go-grpc'
|
||||
|
||||
# - Do not call grpclog directly. Use grpclog.Component instead.
|
||||
git grep -l 'grpclog.I\|grpclog.W\|grpclog.E\|grpclog.F\|grpclog.V' -- "*.go" | not grep -v '^grpclog/component.go\|^internal/grpctest/tlogger_test.go'
|
||||
git grep -l -e 'grpclog.I' --or -e 'grpclog.W' --or -e 'grpclog.E' --or -e 'grpclog.F' --or -e 'grpclog.V' -- "*.go" | not grep -v '^grpclog/component.go\|^internal/grpctest/tlogger_test.go'
|
||||
|
||||
# - Ensure all ptypes proto packages are renamed when importing.
|
||||
not git grep "\(import \|^\s*\)\"github.com/golang/protobuf/ptypes/" -- "*.go"
|
||||
@ -91,13 +98,6 @@ git grep '"github.com/envoyproxy/go-control-plane/envoy' -- '*.go' ':(exclude)*.
|
||||
|
||||
misspell -error .
|
||||
|
||||
# - Check that generated proto files are up to date.
|
||||
if [[ -z "${VET_SKIP_PROTO}" ]]; then
|
||||
PATH="/home/travis/bin:${PATH}" make proto && \
|
||||
git status --porcelain 2>&1 | fail_on_output || \
|
||||
(git status; git --no-pager diff; exit 1)
|
||||
fi
|
||||
|
||||
# - gofmt, goimports, golint (with exceptions for generated code), go vet,
|
||||
# go mod tidy.
|
||||
# Perform these checks on each module inside gRPC.
|
||||
@ -109,7 +109,7 @@ for MOD_FILE in $(find . -name 'go.mod'); do
|
||||
goimports -l . 2>&1 | not grep -vE "\.pb\.go"
|
||||
golint ./... 2>&1 | not grep -vE "/grpc_testing_not_regenerate/.*\.pb\.go:"
|
||||
|
||||
go mod tidy
|
||||
go mod tidy -compat=1.19
|
||||
git status --porcelain 2>&1 | fail_on_output || \
|
||||
(git status; git --no-pager diff; exit 1)
|
||||
popd
|
||||
@ -119,8 +119,9 @@ done
|
||||
#
|
||||
# TODO(dfawley): don't use deprecated functions in examples or first-party
|
||||
# plugins.
|
||||
# TODO(dfawley): enable ST1019 (duplicate imports) but allow for protobufs.
|
||||
SC_OUT="$(mktemp)"
|
||||
staticcheck -go 1.9 -checks 'inherit,-ST1015' ./... > "${SC_OUT}" || true
|
||||
staticcheck -go 1.19 -checks 'inherit,-ST1015,-ST1019,-SA1019' ./... > "${SC_OUT}" || true
|
||||
# Error if anything other than deprecation warnings are printed.
|
||||
not grep -v "is deprecated:.*SA1019" "${SC_OUT}"
|
||||
# Only ignore the following deprecated types/fields/functions.
|
||||
@ -147,7 +148,6 @@ grpc.NewGZIPDecompressor
|
||||
grpc.RPCCompressor
|
||||
grpc.RPCDecompressor
|
||||
grpc.ServiceConfig
|
||||
grpc.WithBalancerName
|
||||
grpc.WithCompressor
|
||||
grpc.WithDecompressor
|
||||
grpc.WithDialer
|
||||
@ -171,8 +171,6 @@ proto.RegisteredExtension is deprecated
|
||||
proto.RegisteredExtensions is deprecated
|
||||
proto.RegisterMapType is deprecated
|
||||
proto.Unmarshaler is deprecated
|
||||
resolver.Backend
|
||||
resolver.GRPCLB
|
||||
Target is deprecated: Use the Target field in the BuildOptions instead.
|
||||
xxx_messageInfo_
|
||||
' "${SC_OUT}"
|
||||
|
6
src/runtime/vendor/modules.txt
vendored
6
src/runtime/vendor/modules.txt
vendored
@ -630,8 +630,8 @@ google.golang.org/genproto/protobuf/field_mask
|
||||
## explicit; go 1.19
|
||||
google.golang.org/genproto/googleapis/rpc/code
|
||||
google.golang.org/genproto/googleapis/rpc/status
|
||||
# google.golang.org/grpc v1.58.3 => google.golang.org/grpc v1.47.0
|
||||
## explicit; go 1.14
|
||||
# google.golang.org/grpc v1.58.3
|
||||
## explicit; go 1.19
|
||||
google.golang.org/grpc
|
||||
google.golang.org/grpc/attributes
|
||||
google.golang.org/grpc/backoff
|
||||
@ -662,6 +662,7 @@ google.golang.org/grpc/internal/grpclog
|
||||
google.golang.org/grpc/internal/grpcrand
|
||||
google.golang.org/grpc/internal/grpcsync
|
||||
google.golang.org/grpc/internal/grpcutil
|
||||
google.golang.org/grpc/internal/idle
|
||||
google.golang.org/grpc/internal/metadata
|
||||
google.golang.org/grpc/internal/pretty
|
||||
google.golang.org/grpc/internal/resolver
|
||||
@ -752,5 +753,4 @@ tags.cncf.io/container-device-interface/specs-go
|
||||
# github.com/stretchr/testify => github.com/stretchr/testify v1.8.0
|
||||
# github.com/uber-go/atomic => go.uber.org/atomic v1.5.1
|
||||
# golang.org/x/text => golang.org/x/text v0.7.0
|
||||
# google.golang.org/grpc => google.golang.org/grpc v1.47.0
|
||||
# gopkg.in/yaml.v3 => gopkg.in/yaml.v3 v3.0.1
|
||||
|
Loading…
Reference in New Issue
Block a user