mirror of
https://github.com/distribution/distribution.git
synced 2025-09-25 14:18:10 +00:00
Upgrade go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp
Signed-off-by: krynju <krystian.gulinski@juliahub.com>
This commit is contained in:
141
vendor/golang.org/x/net/http2/transport.go
generated
vendored
141
vendor/golang.org/x/net/http2/transport.go
generated
vendored
@@ -25,7 +25,6 @@ import (
|
||||
"net/http"
|
||||
"net/http/httptrace"
|
||||
"net/textproto"
|
||||
"os"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
@@ -227,40 +226,26 @@ func (t *Transport) contextWithTimeout(ctx context.Context, d time.Duration) (co
|
||||
}
|
||||
|
||||
func (t *Transport) maxHeaderListSize() uint32 {
|
||||
if t.MaxHeaderListSize == 0 {
|
||||
n := int64(t.MaxHeaderListSize)
|
||||
if t.t1 != nil && t.t1.MaxResponseHeaderBytes != 0 {
|
||||
n = t.t1.MaxResponseHeaderBytes
|
||||
if n > 0 {
|
||||
n = adjustHTTP1MaxHeaderSize(n)
|
||||
}
|
||||
}
|
||||
if n <= 0 {
|
||||
return 10 << 20
|
||||
}
|
||||
if t.MaxHeaderListSize == 0xffffffff {
|
||||
if n >= 0xffffffff {
|
||||
return 0
|
||||
}
|
||||
return t.MaxHeaderListSize
|
||||
}
|
||||
|
||||
func (t *Transport) maxFrameReadSize() uint32 {
|
||||
if t.MaxReadFrameSize == 0 {
|
||||
return 0 // use the default provided by the peer
|
||||
}
|
||||
if t.MaxReadFrameSize < minMaxFrameSize {
|
||||
return minMaxFrameSize
|
||||
}
|
||||
if t.MaxReadFrameSize > maxFrameSize {
|
||||
return maxFrameSize
|
||||
}
|
||||
return t.MaxReadFrameSize
|
||||
return uint32(n)
|
||||
}
|
||||
|
||||
func (t *Transport) disableCompression() bool {
|
||||
return t.DisableCompression || (t.t1 != nil && t.t1.DisableCompression)
|
||||
}
|
||||
|
||||
func (t *Transport) pingTimeout() time.Duration {
|
||||
if t.PingTimeout == 0 {
|
||||
return 15 * time.Second
|
||||
}
|
||||
return t.PingTimeout
|
||||
|
||||
}
|
||||
|
||||
// ConfigureTransport configures a net/http HTTP/1 Transport to use HTTP/2.
|
||||
// It returns an error if t1 has already been HTTP/2-enabled.
|
||||
//
|
||||
@@ -370,11 +355,14 @@ type ClientConn struct {
|
||||
lastActive time.Time
|
||||
lastIdle time.Time // time last idle
|
||||
// Settings from peer: (also guarded by wmu)
|
||||
maxFrameSize uint32
|
||||
maxConcurrentStreams uint32
|
||||
peerMaxHeaderListSize uint64
|
||||
peerMaxHeaderTableSize uint32
|
||||
initialWindowSize uint32
|
||||
maxFrameSize uint32
|
||||
maxConcurrentStreams uint32
|
||||
peerMaxHeaderListSize uint64
|
||||
peerMaxHeaderTableSize uint32
|
||||
initialWindowSize uint32
|
||||
initialStreamRecvWindowSize int32
|
||||
readIdleTimeout time.Duration
|
||||
pingTimeout time.Duration
|
||||
|
||||
// reqHeaderMu is a 1-element semaphore channel controlling access to sending new requests.
|
||||
// Write to reqHeaderMu to lock it, read from it to unlock.
|
||||
@@ -499,6 +487,7 @@ func (cs *clientStream) closeReqBodyLocked() {
|
||||
}
|
||||
|
||||
type stickyErrWriter struct {
|
||||
group synctestGroupInterface
|
||||
conn net.Conn
|
||||
timeout time.Duration
|
||||
err *error
|
||||
@@ -508,22 +497,9 @@ func (sew stickyErrWriter) Write(p []byte) (n int, err error) {
|
||||
if *sew.err != nil {
|
||||
return 0, *sew.err
|
||||
}
|
||||
for {
|
||||
if sew.timeout != 0 {
|
||||
sew.conn.SetWriteDeadline(time.Now().Add(sew.timeout))
|
||||
}
|
||||
nn, err := sew.conn.Write(p[n:])
|
||||
n += nn
|
||||
if n < len(p) && nn > 0 && errors.Is(err, os.ErrDeadlineExceeded) {
|
||||
// Keep extending the deadline so long as we're making progress.
|
||||
continue
|
||||
}
|
||||
if sew.timeout != 0 {
|
||||
sew.conn.SetWriteDeadline(time.Time{})
|
||||
}
|
||||
*sew.err = err
|
||||
return n, err
|
||||
}
|
||||
n, err = writeWithByteTimeout(sew.group, sew.conn, sew.timeout, p)
|
||||
*sew.err = err
|
||||
return n, err
|
||||
}
|
||||
|
||||
// noCachedConnError is the concrete type of ErrNoCachedConn, which
|
||||
@@ -758,44 +734,36 @@ func (t *Transport) expectContinueTimeout() time.Duration {
|
||||
return t.t1.ExpectContinueTimeout
|
||||
}
|
||||
|
||||
func (t *Transport) maxDecoderHeaderTableSize() uint32 {
|
||||
if v := t.MaxDecoderHeaderTableSize; v > 0 {
|
||||
return v
|
||||
}
|
||||
return initialHeaderTableSize
|
||||
}
|
||||
|
||||
func (t *Transport) maxEncoderHeaderTableSize() uint32 {
|
||||
if v := t.MaxEncoderHeaderTableSize; v > 0 {
|
||||
return v
|
||||
}
|
||||
return initialHeaderTableSize
|
||||
}
|
||||
|
||||
func (t *Transport) NewClientConn(c net.Conn) (*ClientConn, error) {
|
||||
return t.newClientConn(c, t.disableKeepAlives())
|
||||
}
|
||||
|
||||
func (t *Transport) newClientConn(c net.Conn, singleUse bool) (*ClientConn, error) {
|
||||
conf := configFromTransport(t)
|
||||
cc := &ClientConn{
|
||||
t: t,
|
||||
tconn: c,
|
||||
readerDone: make(chan struct{}),
|
||||
nextStreamID: 1,
|
||||
maxFrameSize: 16 << 10, // spec default
|
||||
initialWindowSize: 65535, // spec default
|
||||
maxConcurrentStreams: initialMaxConcurrentStreams, // "infinite", per spec. Use a smaller value until we have received server settings.
|
||||
peerMaxHeaderListSize: 0xffffffffffffffff, // "infinite", per spec. Use 2^64-1 instead.
|
||||
streams: make(map[uint32]*clientStream),
|
||||
singleUse: singleUse,
|
||||
wantSettingsAck: true,
|
||||
pings: make(map[[8]byte]chan struct{}),
|
||||
reqHeaderMu: make(chan struct{}, 1),
|
||||
t: t,
|
||||
tconn: c,
|
||||
readerDone: make(chan struct{}),
|
||||
nextStreamID: 1,
|
||||
maxFrameSize: 16 << 10, // spec default
|
||||
initialWindowSize: 65535, // spec default
|
||||
initialStreamRecvWindowSize: conf.MaxUploadBufferPerStream,
|
||||
maxConcurrentStreams: initialMaxConcurrentStreams, // "infinite", per spec. Use a smaller value until we have received server settings.
|
||||
peerMaxHeaderListSize: 0xffffffffffffffff, // "infinite", per spec. Use 2^64-1 instead.
|
||||
streams: make(map[uint32]*clientStream),
|
||||
singleUse: singleUse,
|
||||
wantSettingsAck: true,
|
||||
readIdleTimeout: conf.SendPingTimeout,
|
||||
pingTimeout: conf.PingTimeout,
|
||||
pings: make(map[[8]byte]chan struct{}),
|
||||
reqHeaderMu: make(chan struct{}, 1),
|
||||
}
|
||||
var group synctestGroupInterface
|
||||
if t.transportTestHooks != nil {
|
||||
t.markNewGoroutine()
|
||||
t.transportTestHooks.newclientconn(cc)
|
||||
c = cc.tconn
|
||||
group = t.group
|
||||
}
|
||||
if VerboseLogs {
|
||||
t.vlogf("http2: Transport creating client conn %p to %v", cc, c.RemoteAddr())
|
||||
@@ -807,24 +775,23 @@ func (t *Transport) newClientConn(c net.Conn, singleUse bool) (*ClientConn, erro
|
||||
// TODO: adjust this writer size to account for frame size +
|
||||
// MTU + crypto/tls record padding.
|
||||
cc.bw = bufio.NewWriter(stickyErrWriter{
|
||||
group: group,
|
||||
conn: c,
|
||||
timeout: t.WriteByteTimeout,
|
||||
timeout: conf.WriteByteTimeout,
|
||||
err: &cc.werr,
|
||||
})
|
||||
cc.br = bufio.NewReader(c)
|
||||
cc.fr = NewFramer(cc.bw, cc.br)
|
||||
if t.maxFrameReadSize() != 0 {
|
||||
cc.fr.SetMaxReadFrameSize(t.maxFrameReadSize())
|
||||
}
|
||||
cc.fr.SetMaxReadFrameSize(conf.MaxReadFrameSize)
|
||||
if t.CountError != nil {
|
||||
cc.fr.countError = t.CountError
|
||||
}
|
||||
maxHeaderTableSize := t.maxDecoderHeaderTableSize()
|
||||
maxHeaderTableSize := conf.MaxDecoderHeaderTableSize
|
||||
cc.fr.ReadMetaHeaders = hpack.NewDecoder(maxHeaderTableSize, nil)
|
||||
cc.fr.MaxHeaderListSize = t.maxHeaderListSize()
|
||||
|
||||
cc.henc = hpack.NewEncoder(&cc.hbuf)
|
||||
cc.henc.SetMaxDynamicTableSizeLimit(t.maxEncoderHeaderTableSize())
|
||||
cc.henc.SetMaxDynamicTableSizeLimit(conf.MaxEncoderHeaderTableSize)
|
||||
cc.peerMaxHeaderTableSize = initialHeaderTableSize
|
||||
|
||||
if cs, ok := c.(connectionStater); ok {
|
||||
@@ -834,11 +801,9 @@ func (t *Transport) newClientConn(c net.Conn, singleUse bool) (*ClientConn, erro
|
||||
|
||||
initialSettings := []Setting{
|
||||
{ID: SettingEnablePush, Val: 0},
|
||||
{ID: SettingInitialWindowSize, Val: transportDefaultStreamFlow},
|
||||
}
|
||||
if max := t.maxFrameReadSize(); max != 0 {
|
||||
initialSettings = append(initialSettings, Setting{ID: SettingMaxFrameSize, Val: max})
|
||||
{ID: SettingInitialWindowSize, Val: uint32(cc.initialStreamRecvWindowSize)},
|
||||
}
|
||||
initialSettings = append(initialSettings, Setting{ID: SettingMaxFrameSize, Val: conf.MaxReadFrameSize})
|
||||
if max := t.maxHeaderListSize(); max != 0 {
|
||||
initialSettings = append(initialSettings, Setting{ID: SettingMaxHeaderListSize, Val: max})
|
||||
}
|
||||
@@ -848,8 +813,8 @@ func (t *Transport) newClientConn(c net.Conn, singleUse bool) (*ClientConn, erro
|
||||
|
||||
cc.bw.Write(clientPreface)
|
||||
cc.fr.WriteSettings(initialSettings...)
|
||||
cc.fr.WriteWindowUpdate(0, transportDefaultConnFlow)
|
||||
cc.inflow.init(transportDefaultConnFlow + initialWindowSize)
|
||||
cc.fr.WriteWindowUpdate(0, uint32(conf.MaxUploadBufferPerConnection))
|
||||
cc.inflow.init(conf.MaxUploadBufferPerConnection + initialWindowSize)
|
||||
cc.bw.Flush()
|
||||
if cc.werr != nil {
|
||||
cc.Close()
|
||||
@@ -867,7 +832,7 @@ func (t *Transport) newClientConn(c net.Conn, singleUse bool) (*ClientConn, erro
|
||||
}
|
||||
|
||||
func (cc *ClientConn) healthCheck() {
|
||||
pingTimeout := cc.t.pingTimeout()
|
||||
pingTimeout := cc.pingTimeout
|
||||
// We don't need to periodically ping in the health check, because the readLoop of ClientConn will
|
||||
// trigger the healthCheck again if there is no frame received.
|
||||
ctx, cancel := cc.t.contextWithTimeout(context.Background(), pingTimeout)
|
||||
@@ -2199,7 +2164,7 @@ type resAndError struct {
|
||||
func (cc *ClientConn) addStreamLocked(cs *clientStream) {
|
||||
cs.flow.add(int32(cc.initialWindowSize))
|
||||
cs.flow.setConnFlow(&cc.flow)
|
||||
cs.inflow.init(transportDefaultStreamFlow)
|
||||
cs.inflow.init(cc.initialStreamRecvWindowSize)
|
||||
cs.ID = cc.nextStreamID
|
||||
cc.nextStreamID += 2
|
||||
cc.streams[cs.ID] = cs
|
||||
@@ -2345,7 +2310,7 @@ func (cc *ClientConn) countReadFrameError(err error) {
|
||||
func (rl *clientConnReadLoop) run() error {
|
||||
cc := rl.cc
|
||||
gotSettings := false
|
||||
readIdleTimeout := cc.t.ReadIdleTimeout
|
||||
readIdleTimeout := cc.readIdleTimeout
|
||||
var t timer
|
||||
if readIdleTimeout != 0 {
|
||||
t = cc.t.afterFunc(readIdleTimeout, cc.healthCheck)
|
||||
|
Reference in New Issue
Block a user