From f943701a74ffa21ab44b5e1c7e7399c7a6a0f184 Mon Sep 17 00:00:00 2001 From: Andy Goldstein Date: Wed, 18 Feb 2015 09:27:35 -0500 Subject: [PATCH 1/3] bump(spf13/pflag):370c3171201099fa6b466db45c8a032cbce33d8d --- Godeps/Godeps.json | 2 +- .../src/github.com/spf13/pflag/flag.go | 5 +++ .../src/github.com/spf13/pflag/flag_test.go | 38 ++++++++++++++++++- 3 files changed, 42 insertions(+), 3 deletions(-) diff --git a/Godeps/Godeps.json b/Godeps/Godeps.json index 7a1bf0768e8..93635982675 100644 --- a/Godeps/Godeps.json +++ b/Godeps/Godeps.json @@ -235,7 +235,7 @@ }, { "ImportPath": "github.com/spf13/pflag", - "Rev": "f82776d6cc998e3c026baef7b24409ff49fe5c8d" + "Rev": "370c3171201099fa6b466db45c8a032cbce33d8d" }, { "ImportPath": "github.com/stretchr/objx", diff --git a/Godeps/_workspace/src/github.com/spf13/pflag/flag.go b/Godeps/_workspace/src/github.com/spf13/pflag/flag.go index 2e4cac08493..ad65ddad2df 100644 --- a/Godeps/_workspace/src/github.com/spf13/pflag/flag.go +++ b/Godeps/_workspace/src/github.com/spf13/pflag/flag.go @@ -543,6 +543,11 @@ func (f *FlagSet) parseArgs(args []string) (err error) { if s[1] == '-' { args, err = f.parseLongArg(s, args) + + if len(s) == 2 { + // stop parsing after -- + break + } } else { args, err = f.parseShortArg(s, args) } diff --git a/Godeps/_workspace/src/github.com/spf13/pflag/flag_test.go b/Godeps/_workspace/src/github.com/spf13/pflag/flag_test.go index 47865bd96ba..a33c601bc59 100644 --- a/Godeps/_workspace/src/github.com/spf13/pflag/flag_test.go +++ b/Godeps/_workspace/src/github.com/spf13/pflag/flag_test.go @@ -197,8 +197,8 @@ func TestShorthand(t *testing.T) { notaflag, } f.SetOutput(ioutil.Discard) - if err := f.Parse(args); err == nil { - t.Error("--i-look-like-a-flag should throw an error") + if err := f.Parse(args); err != nil { + t.Error("expected no error, got ", err) } if !f.Parsed() { t.Error("f.Parse() = false after Parse") @@ -356,3 +356,37 @@ func TestNoInterspersed(t *testing.T) { t.Fatal("expected interspersed options/non-options to fail") } } + +func TestTermination(t *testing.T) { + f := NewFlagSet("termination", ContinueOnError) + boolFlag := f.BoolP("bool", "l", false, "bool value") + if f.Parsed() { + t.Error("f.Parse() = true before Parse") + } + arg1 := "ls" + arg2 := "-l" + args := []string{ + "--", + arg1, + arg2, + } + f.SetOutput(ioutil.Discard) + if err := f.Parse(args); err != nil { + t.Fatal("expected no error; got ", err) + } + if !f.Parsed() { + t.Error("f.Parse() = false after Parse") + } + if *boolFlag { + t.Error("expected boolFlag=false, got true") + } + if len(f.Args()) != 2 { + t.Errorf("expected 2 arguments, got %d: %v", len(f.Args()), f.Args()) + } + if f.Args()[0] != arg1 { + t.Errorf("expected argument %q got %q", arg1, f.Args()[0]) + } + if f.Args()[1] != arg2 { + t.Errorf("expected argument %q got %q", arg2, f.Args()[1]) + } +} From 25d38c175b8d422630af857516db36c767dc9349 Mon Sep 17 00:00:00 2001 From: Andy Goldstein Date: Thu, 8 Jan 2015 15:40:47 -0500 Subject: [PATCH 2/3] Add command execution/port forwarding dependencies code.google.com/p/go.net/spdy github.com/docker/docker/pkg/term github.com/docker/spdystream github.com/kr/pty --- Godeps/Godeps.json | 19 + .../p/go.net/spdy/dictionary.go | 187 ++++ .../src/code.google.com/p/go.net/spdy/read.go | 348 +++++++ .../p/go.net/spdy/spdy_test.go | 644 +++++++++++++ .../code.google.com/p/go.net/spdy/types.go | 275 ++++++ .../code.google.com/p/go.net/spdy/write.go | 318 +++++++ .../docker/docker/pkg/term/MAINTAINERS | 1 + .../docker/docker/pkg/term/console_windows.go | 87 ++ .../docker/docker/pkg/term/tc_linux_cgo.go | 47 + .../docker/docker/pkg/term/tc_other.go | 19 + .../github.com/docker/docker/pkg/term/term.go | 103 ++ .../docker/docker/pkg/term/term_windows.go | 89 ++ .../docker/docker/pkg/term/termios_darwin.go | 65 ++ .../docker/docker/pkg/term/termios_freebsd.go | 65 ++ .../docker/docker/pkg/term/termios_linux.go | 46 + .../docker/spdystream/CONTRIBUTING.md | 13 + .../src/github.com/docker/spdystream/LICENSE | 191 ++++ .../github.com/docker/spdystream/MAINTAINERS | 1 + .../github.com/docker/spdystream/README.md | 78 ++ .../docker/spdystream/connection.go | 877 ++++++++++++++++++ .../github.com/docker/spdystream/handlers.go | 38 + .../github.com/docker/spdystream/priority.go | 97 ++ .../docker/spdystream/priority_test.go | 107 +++ .../docker/spdystream/spdy_bench_test.go | 113 +++ .../github.com/docker/spdystream/spdy_test.go | 735 +++++++++++++++ .../github.com/docker/spdystream/stream.go | 328 +++++++ .../src/github.com/docker/spdystream/utils.go | 16 + .../docker/spdystream/ws/connection.go | 65 ++ .../docker/spdystream/ws/ws_test.go | 175 ++++ .../src/github.com/kr/pty/.gitignore | 4 + .../_workspace/src/github.com/kr/pty/License | 23 + .../src/github.com/kr/pty/README.md | 36 + .../_workspace/src/github.com/kr/pty/doc.go | 16 + .../_workspace/src/github.com/kr/pty/ioctl.go | 11 + .../src/github.com/kr/pty/ioctl_bsd.go | 39 + .../src/github.com/kr/pty/mktypes.bash | 19 + .../src/github.com/kr/pty/pty_darwin.go | 60 ++ .../src/github.com/kr/pty/pty_freebsd.go | 73 ++ .../src/github.com/kr/pty/pty_linux.go | 46 + .../src/github.com/kr/pty/pty_unsupported.go | 11 + .../_workspace/src/github.com/kr/pty/run.go | 28 + .../_workspace/src/github.com/kr/pty/types.go | 10 + .../src/github.com/kr/pty/types_freebsd.go | 15 + .../_workspace/src/github.com/kr/pty/util.go | 35 + .../src/github.com/kr/pty/ztypes_386.go | 9 + .../src/github.com/kr/pty/ztypes_amd64.go | 9 + .../src/github.com/kr/pty/ztypes_arm.go | 9 + .../github.com/kr/pty/ztypes_freebsd_386.go | 13 + .../github.com/kr/pty/ztypes_freebsd_amd64.go | 14 + .../github.com/kr/pty/ztypes_freebsd_arm.go | 13 + .../src/github.com/kr/pty/ztypes_ppc64.go | 11 + .../src/github.com/kr/pty/ztypes_ppc64le.go | 11 + .../src/github.com/kr/pty/ztypes_s390x.go | 11 + 53 files changed, 5673 insertions(+) create mode 100644 Godeps/_workspace/src/code.google.com/p/go.net/spdy/dictionary.go create mode 100644 Godeps/_workspace/src/code.google.com/p/go.net/spdy/read.go create mode 100644 Godeps/_workspace/src/code.google.com/p/go.net/spdy/spdy_test.go create mode 100644 Godeps/_workspace/src/code.google.com/p/go.net/spdy/types.go create mode 100644 Godeps/_workspace/src/code.google.com/p/go.net/spdy/write.go create mode 100644 Godeps/_workspace/src/github.com/docker/docker/pkg/term/MAINTAINERS create mode 100644 Godeps/_workspace/src/github.com/docker/docker/pkg/term/console_windows.go create mode 100644 Godeps/_workspace/src/github.com/docker/docker/pkg/term/tc_linux_cgo.go create mode 100644 Godeps/_workspace/src/github.com/docker/docker/pkg/term/tc_other.go create mode 100644 Godeps/_workspace/src/github.com/docker/docker/pkg/term/term.go create mode 100644 Godeps/_workspace/src/github.com/docker/docker/pkg/term/term_windows.go create mode 100644 Godeps/_workspace/src/github.com/docker/docker/pkg/term/termios_darwin.go create mode 100644 Godeps/_workspace/src/github.com/docker/docker/pkg/term/termios_freebsd.go create mode 100644 Godeps/_workspace/src/github.com/docker/docker/pkg/term/termios_linux.go create mode 100644 Godeps/_workspace/src/github.com/docker/spdystream/CONTRIBUTING.md create mode 100644 Godeps/_workspace/src/github.com/docker/spdystream/LICENSE create mode 100644 Godeps/_workspace/src/github.com/docker/spdystream/MAINTAINERS create mode 100644 Godeps/_workspace/src/github.com/docker/spdystream/README.md create mode 100644 Godeps/_workspace/src/github.com/docker/spdystream/connection.go create mode 100644 Godeps/_workspace/src/github.com/docker/spdystream/handlers.go create mode 100644 Godeps/_workspace/src/github.com/docker/spdystream/priority.go create mode 100644 Godeps/_workspace/src/github.com/docker/spdystream/priority_test.go create mode 100644 Godeps/_workspace/src/github.com/docker/spdystream/spdy_bench_test.go create mode 100644 Godeps/_workspace/src/github.com/docker/spdystream/spdy_test.go create mode 100644 Godeps/_workspace/src/github.com/docker/spdystream/stream.go create mode 100644 Godeps/_workspace/src/github.com/docker/spdystream/utils.go create mode 100644 Godeps/_workspace/src/github.com/docker/spdystream/ws/connection.go create mode 100644 Godeps/_workspace/src/github.com/docker/spdystream/ws/ws_test.go create mode 100644 Godeps/_workspace/src/github.com/kr/pty/.gitignore create mode 100644 Godeps/_workspace/src/github.com/kr/pty/License create mode 100644 Godeps/_workspace/src/github.com/kr/pty/README.md create mode 100644 Godeps/_workspace/src/github.com/kr/pty/doc.go create mode 100644 Godeps/_workspace/src/github.com/kr/pty/ioctl.go create mode 100644 Godeps/_workspace/src/github.com/kr/pty/ioctl_bsd.go create mode 100644 Godeps/_workspace/src/github.com/kr/pty/mktypes.bash create mode 100644 Godeps/_workspace/src/github.com/kr/pty/pty_darwin.go create mode 100644 Godeps/_workspace/src/github.com/kr/pty/pty_freebsd.go create mode 100644 Godeps/_workspace/src/github.com/kr/pty/pty_linux.go create mode 100644 Godeps/_workspace/src/github.com/kr/pty/pty_unsupported.go create mode 100644 Godeps/_workspace/src/github.com/kr/pty/run.go create mode 100644 Godeps/_workspace/src/github.com/kr/pty/types.go create mode 100644 Godeps/_workspace/src/github.com/kr/pty/types_freebsd.go create mode 100644 Godeps/_workspace/src/github.com/kr/pty/util.go create mode 100644 Godeps/_workspace/src/github.com/kr/pty/ztypes_386.go create mode 100644 Godeps/_workspace/src/github.com/kr/pty/ztypes_amd64.go create mode 100644 Godeps/_workspace/src/github.com/kr/pty/ztypes_arm.go create mode 100644 Godeps/_workspace/src/github.com/kr/pty/ztypes_freebsd_386.go create mode 100644 Godeps/_workspace/src/github.com/kr/pty/ztypes_freebsd_amd64.go create mode 100644 Godeps/_workspace/src/github.com/kr/pty/ztypes_freebsd_arm.go create mode 100644 Godeps/_workspace/src/github.com/kr/pty/ztypes_ppc64.go create mode 100644 Godeps/_workspace/src/github.com/kr/pty/ztypes_ppc64le.go create mode 100644 Godeps/_workspace/src/github.com/kr/pty/ztypes_s390x.go diff --git a/Godeps/Godeps.json b/Godeps/Godeps.json index 93635982675..892eff398ed 100644 --- a/Godeps/Godeps.json +++ b/Godeps/Godeps.json @@ -14,6 +14,11 @@ "Comment": "null-12", "Rev": "7dda39b2e7d5e265014674c5af696ba4186679e9" }, + { + "ImportPath": "code.google.com/p/go.net/spdy", + "Comment": "null-240", + "Rev": "937a34c9de13c766c814510f76bca091dee06028" + }, { "ImportPath": "code.google.com/p/google-api-go-client/compute/v1", "Comment": "release-96", @@ -83,6 +88,11 @@ "Comment": "v1.4.1-108-g364720b", "Rev": "211513156dc1ace48e630b4bf4ea0fcfdc8d9abf" }, + { + "ImportPath": "github.com/docker/docker/pkg/term", + "Comment": "v1.4.1-108-g364720b", + "Rev": "364720b5e7e725cdc466171de873eefdb8609a33" + }, { "ImportPath": "github.com/docker/docker/pkg/units", "Comment": "v1.4.1-108-g364720b", @@ -93,6 +103,10 @@ "Comment": "v1.4.1-108-g364720b", "Rev": "211513156dc1ace48e630b4bf4ea0fcfdc8d9abf" }, + { + "ImportPath": "github.com/docker/spdystream", + "Rev": "29e1da2890f60336f98d0b3bf28b05070aa2ee4d" + }, { "ImportPath": "github.com/elazarl/go-bindata-assetfs", "Rev": "ae4665cf2d188c65764c73fe4af5378acc549510" @@ -145,6 +159,11 @@ "ImportPath": "github.com/matttproud/golang_protobuf_extensions/ext", "Rev": "7a864a042e844af638df17ebbabf8183dace556a" }, + { + "ImportPath": "github.com/kr/pty", + "Comment": "release.r56-25-g05017fc", + "Rev": "05017fcccf23c823bfdea560dcc958a136e54fb7" + }, { "ImportPath": "github.com/miekg/dns", "Rev": "3f504e8dabd5d562e997d19ce0200aa41973e1b2" diff --git a/Godeps/_workspace/src/code.google.com/p/go.net/spdy/dictionary.go b/Godeps/_workspace/src/code.google.com/p/go.net/spdy/dictionary.go new file mode 100644 index 00000000000..5a5ff0e14cd --- /dev/null +++ b/Godeps/_workspace/src/code.google.com/p/go.net/spdy/dictionary.go @@ -0,0 +1,187 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package spdy + +// headerDictionary is the dictionary sent to the zlib compressor/decompressor. +var headerDictionary = []byte{ + 0x00, 0x00, 0x00, 0x07, 0x6f, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x00, 0x00, 0x00, 0x04, 0x68, + 0x65, 0x61, 0x64, 0x00, 0x00, 0x00, 0x04, 0x70, + 0x6f, 0x73, 0x74, 0x00, 0x00, 0x00, 0x03, 0x70, + 0x75, 0x74, 0x00, 0x00, 0x00, 0x06, 0x64, 0x65, + 0x6c, 0x65, 0x74, 0x65, 0x00, 0x00, 0x00, 0x05, + 0x74, 0x72, 0x61, 0x63, 0x65, 0x00, 0x00, 0x00, + 0x06, 0x61, 0x63, 0x63, 0x65, 0x70, 0x74, 0x00, + 0x00, 0x00, 0x0e, 0x61, 0x63, 0x63, 0x65, 0x70, + 0x74, 0x2d, 0x63, 0x68, 0x61, 0x72, 0x73, 0x65, + 0x74, 0x00, 0x00, 0x00, 0x0f, 0x61, 0x63, 0x63, + 0x65, 0x70, 0x74, 0x2d, 0x65, 0x6e, 0x63, 0x6f, + 0x64, 0x69, 0x6e, 0x67, 0x00, 0x00, 0x00, 0x0f, + 0x61, 0x63, 0x63, 0x65, 0x70, 0x74, 0x2d, 0x6c, + 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x00, + 0x00, 0x00, 0x0d, 0x61, 0x63, 0x63, 0x65, 0x70, + 0x74, 0x2d, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x73, + 0x00, 0x00, 0x00, 0x03, 0x61, 0x67, 0x65, 0x00, + 0x00, 0x00, 0x05, 0x61, 0x6c, 0x6c, 0x6f, 0x77, + 0x00, 0x00, 0x00, 0x0d, 0x61, 0x75, 0x74, 0x68, + 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x00, 0x00, 0x00, 0x0d, 0x63, 0x61, 0x63, + 0x68, 0x65, 0x2d, 0x63, 0x6f, 0x6e, 0x74, 0x72, + 0x6f, 0x6c, 0x00, 0x00, 0x00, 0x0a, 0x63, 0x6f, + 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, + 0x00, 0x00, 0x00, 0x0c, 0x63, 0x6f, 0x6e, 0x74, + 0x65, 0x6e, 0x74, 0x2d, 0x62, 0x61, 0x73, 0x65, + 0x00, 0x00, 0x00, 0x10, 0x63, 0x6f, 0x6e, 0x74, + 0x65, 0x6e, 0x74, 0x2d, 0x65, 0x6e, 0x63, 0x6f, + 0x64, 0x69, 0x6e, 0x67, 0x00, 0x00, 0x00, 0x10, + 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x2d, + 0x6c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, + 0x00, 0x00, 0x00, 0x0e, 0x63, 0x6f, 0x6e, 0x74, + 0x65, 0x6e, 0x74, 0x2d, 0x6c, 0x65, 0x6e, 0x67, + 0x74, 0x68, 0x00, 0x00, 0x00, 0x10, 0x63, 0x6f, + 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x2d, 0x6c, 0x6f, + 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x00, 0x00, + 0x00, 0x0b, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, + 0x74, 0x2d, 0x6d, 0x64, 0x35, 0x00, 0x00, 0x00, + 0x0d, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, + 0x2d, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x00, 0x00, + 0x00, 0x0c, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, + 0x74, 0x2d, 0x74, 0x79, 0x70, 0x65, 0x00, 0x00, + 0x00, 0x04, 0x64, 0x61, 0x74, 0x65, 0x00, 0x00, + 0x00, 0x04, 0x65, 0x74, 0x61, 0x67, 0x00, 0x00, + 0x00, 0x06, 0x65, 0x78, 0x70, 0x65, 0x63, 0x74, + 0x00, 0x00, 0x00, 0x07, 0x65, 0x78, 0x70, 0x69, + 0x72, 0x65, 0x73, 0x00, 0x00, 0x00, 0x04, 0x66, + 0x72, 0x6f, 0x6d, 0x00, 0x00, 0x00, 0x04, 0x68, + 0x6f, 0x73, 0x74, 0x00, 0x00, 0x00, 0x08, 0x69, + 0x66, 0x2d, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x00, + 0x00, 0x00, 0x11, 0x69, 0x66, 0x2d, 0x6d, 0x6f, + 0x64, 0x69, 0x66, 0x69, 0x65, 0x64, 0x2d, 0x73, + 0x69, 0x6e, 0x63, 0x65, 0x00, 0x00, 0x00, 0x0d, + 0x69, 0x66, 0x2d, 0x6e, 0x6f, 0x6e, 0x65, 0x2d, + 0x6d, 0x61, 0x74, 0x63, 0x68, 0x00, 0x00, 0x00, + 0x08, 0x69, 0x66, 0x2d, 0x72, 0x61, 0x6e, 0x67, + 0x65, 0x00, 0x00, 0x00, 0x13, 0x69, 0x66, 0x2d, + 0x75, 0x6e, 0x6d, 0x6f, 0x64, 0x69, 0x66, 0x69, + 0x65, 0x64, 0x2d, 0x73, 0x69, 0x6e, 0x63, 0x65, + 0x00, 0x00, 0x00, 0x0d, 0x6c, 0x61, 0x73, 0x74, + 0x2d, 0x6d, 0x6f, 0x64, 0x69, 0x66, 0x69, 0x65, + 0x64, 0x00, 0x00, 0x00, 0x08, 0x6c, 0x6f, 0x63, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x00, 0x00, 0x00, + 0x0c, 0x6d, 0x61, 0x78, 0x2d, 0x66, 0x6f, 0x72, + 0x77, 0x61, 0x72, 0x64, 0x73, 0x00, 0x00, 0x00, + 0x06, 0x70, 0x72, 0x61, 0x67, 0x6d, 0x61, 0x00, + 0x00, 0x00, 0x12, 0x70, 0x72, 0x6f, 0x78, 0x79, + 0x2d, 0x61, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, + 0x69, 0x63, 0x61, 0x74, 0x65, 0x00, 0x00, 0x00, + 0x13, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2d, 0x61, + 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x00, 0x00, 0x00, 0x05, + 0x72, 0x61, 0x6e, 0x67, 0x65, 0x00, 0x00, 0x00, + 0x07, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x72, + 0x00, 0x00, 0x00, 0x0b, 0x72, 0x65, 0x74, 0x72, + 0x79, 0x2d, 0x61, 0x66, 0x74, 0x65, 0x72, 0x00, + 0x00, 0x00, 0x06, 0x73, 0x65, 0x72, 0x76, 0x65, + 0x72, 0x00, 0x00, 0x00, 0x02, 0x74, 0x65, 0x00, + 0x00, 0x00, 0x07, 0x74, 0x72, 0x61, 0x69, 0x6c, + 0x65, 0x72, 0x00, 0x00, 0x00, 0x11, 0x74, 0x72, + 0x61, 0x6e, 0x73, 0x66, 0x65, 0x72, 0x2d, 0x65, + 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x00, + 0x00, 0x00, 0x07, 0x75, 0x70, 0x67, 0x72, 0x61, + 0x64, 0x65, 0x00, 0x00, 0x00, 0x0a, 0x75, 0x73, + 0x65, 0x72, 0x2d, 0x61, 0x67, 0x65, 0x6e, 0x74, + 0x00, 0x00, 0x00, 0x04, 0x76, 0x61, 0x72, 0x79, + 0x00, 0x00, 0x00, 0x03, 0x76, 0x69, 0x61, 0x00, + 0x00, 0x00, 0x07, 0x77, 0x61, 0x72, 0x6e, 0x69, + 0x6e, 0x67, 0x00, 0x00, 0x00, 0x10, 0x77, 0x77, + 0x77, 0x2d, 0x61, 0x75, 0x74, 0x68, 0x65, 0x6e, + 0x74, 0x69, 0x63, 0x61, 0x74, 0x65, 0x00, 0x00, + 0x00, 0x06, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, + 0x00, 0x00, 0x00, 0x03, 0x67, 0x65, 0x74, 0x00, + 0x00, 0x00, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, + 0x73, 0x00, 0x00, 0x00, 0x06, 0x32, 0x30, 0x30, + 0x20, 0x4f, 0x4b, 0x00, 0x00, 0x00, 0x07, 0x76, + 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x00, 0x00, + 0x00, 0x08, 0x48, 0x54, 0x54, 0x50, 0x2f, 0x31, + 0x2e, 0x31, 0x00, 0x00, 0x00, 0x03, 0x75, 0x72, + 0x6c, 0x00, 0x00, 0x00, 0x06, 0x70, 0x75, 0x62, + 0x6c, 0x69, 0x63, 0x00, 0x00, 0x00, 0x0a, 0x73, + 0x65, 0x74, 0x2d, 0x63, 0x6f, 0x6f, 0x6b, 0x69, + 0x65, 0x00, 0x00, 0x00, 0x0a, 0x6b, 0x65, 0x65, + 0x70, 0x2d, 0x61, 0x6c, 0x69, 0x76, 0x65, 0x00, + 0x00, 0x00, 0x06, 0x6f, 0x72, 0x69, 0x67, 0x69, + 0x6e, 0x31, 0x30, 0x30, 0x31, 0x30, 0x31, 0x32, + 0x30, 0x31, 0x32, 0x30, 0x32, 0x32, 0x30, 0x35, + 0x32, 0x30, 0x36, 0x33, 0x30, 0x30, 0x33, 0x30, + 0x32, 0x33, 0x30, 0x33, 0x33, 0x30, 0x34, 0x33, + 0x30, 0x35, 0x33, 0x30, 0x36, 0x33, 0x30, 0x37, + 0x34, 0x30, 0x32, 0x34, 0x30, 0x35, 0x34, 0x30, + 0x36, 0x34, 0x30, 0x37, 0x34, 0x30, 0x38, 0x34, + 0x30, 0x39, 0x34, 0x31, 0x30, 0x34, 0x31, 0x31, + 0x34, 0x31, 0x32, 0x34, 0x31, 0x33, 0x34, 0x31, + 0x34, 0x34, 0x31, 0x35, 0x34, 0x31, 0x36, 0x34, + 0x31, 0x37, 0x35, 0x30, 0x32, 0x35, 0x30, 0x34, + 0x35, 0x30, 0x35, 0x32, 0x30, 0x33, 0x20, 0x4e, + 0x6f, 0x6e, 0x2d, 0x41, 0x75, 0x74, 0x68, 0x6f, + 0x72, 0x69, 0x74, 0x61, 0x74, 0x69, 0x76, 0x65, + 0x20, 0x49, 0x6e, 0x66, 0x6f, 0x72, 0x6d, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x32, 0x30, 0x34, 0x20, + 0x4e, 0x6f, 0x20, 0x43, 0x6f, 0x6e, 0x74, 0x65, + 0x6e, 0x74, 0x33, 0x30, 0x31, 0x20, 0x4d, 0x6f, + 0x76, 0x65, 0x64, 0x20, 0x50, 0x65, 0x72, 0x6d, + 0x61, 0x6e, 0x65, 0x6e, 0x74, 0x6c, 0x79, 0x34, + 0x30, 0x30, 0x20, 0x42, 0x61, 0x64, 0x20, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x34, 0x30, + 0x31, 0x20, 0x55, 0x6e, 0x61, 0x75, 0x74, 0x68, + 0x6f, 0x72, 0x69, 0x7a, 0x65, 0x64, 0x34, 0x30, + 0x33, 0x20, 0x46, 0x6f, 0x72, 0x62, 0x69, 0x64, + 0x64, 0x65, 0x6e, 0x34, 0x30, 0x34, 0x20, 0x4e, + 0x6f, 0x74, 0x20, 0x46, 0x6f, 0x75, 0x6e, 0x64, + 0x35, 0x30, 0x30, 0x20, 0x49, 0x6e, 0x74, 0x65, + 0x72, 0x6e, 0x61, 0x6c, 0x20, 0x53, 0x65, 0x72, + 0x76, 0x65, 0x72, 0x20, 0x45, 0x72, 0x72, 0x6f, + 0x72, 0x35, 0x30, 0x31, 0x20, 0x4e, 0x6f, 0x74, + 0x20, 0x49, 0x6d, 0x70, 0x6c, 0x65, 0x6d, 0x65, + 0x6e, 0x74, 0x65, 0x64, 0x35, 0x30, 0x33, 0x20, + 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x20, + 0x55, 0x6e, 0x61, 0x76, 0x61, 0x69, 0x6c, 0x61, + 0x62, 0x6c, 0x65, 0x4a, 0x61, 0x6e, 0x20, 0x46, + 0x65, 0x62, 0x20, 0x4d, 0x61, 0x72, 0x20, 0x41, + 0x70, 0x72, 0x20, 0x4d, 0x61, 0x79, 0x20, 0x4a, + 0x75, 0x6e, 0x20, 0x4a, 0x75, 0x6c, 0x20, 0x41, + 0x75, 0x67, 0x20, 0x53, 0x65, 0x70, 0x74, 0x20, + 0x4f, 0x63, 0x74, 0x20, 0x4e, 0x6f, 0x76, 0x20, + 0x44, 0x65, 0x63, 0x20, 0x30, 0x30, 0x3a, 0x30, + 0x30, 0x3a, 0x30, 0x30, 0x20, 0x4d, 0x6f, 0x6e, + 0x2c, 0x20, 0x54, 0x75, 0x65, 0x2c, 0x20, 0x57, + 0x65, 0x64, 0x2c, 0x20, 0x54, 0x68, 0x75, 0x2c, + 0x20, 0x46, 0x72, 0x69, 0x2c, 0x20, 0x53, 0x61, + 0x74, 0x2c, 0x20, 0x53, 0x75, 0x6e, 0x2c, 0x20, + 0x47, 0x4d, 0x54, 0x63, 0x68, 0x75, 0x6e, 0x6b, + 0x65, 0x64, 0x2c, 0x74, 0x65, 0x78, 0x74, 0x2f, + 0x68, 0x74, 0x6d, 0x6c, 0x2c, 0x69, 0x6d, 0x61, + 0x67, 0x65, 0x2f, 0x70, 0x6e, 0x67, 0x2c, 0x69, + 0x6d, 0x61, 0x67, 0x65, 0x2f, 0x6a, 0x70, 0x67, + 0x2c, 0x69, 0x6d, 0x61, 0x67, 0x65, 0x2f, 0x67, + 0x69, 0x66, 0x2c, 0x61, 0x70, 0x70, 0x6c, 0x69, + 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2f, 0x78, + 0x6d, 0x6c, 0x2c, 0x61, 0x70, 0x70, 0x6c, 0x69, + 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2f, 0x78, + 0x68, 0x74, 0x6d, 0x6c, 0x2b, 0x78, 0x6d, 0x6c, + 0x2c, 0x74, 0x65, 0x78, 0x74, 0x2f, 0x70, 0x6c, + 0x61, 0x69, 0x6e, 0x2c, 0x74, 0x65, 0x78, 0x74, + 0x2f, 0x6a, 0x61, 0x76, 0x61, 0x73, 0x63, 0x72, + 0x69, 0x70, 0x74, 0x2c, 0x70, 0x75, 0x62, 0x6c, + 0x69, 0x63, 0x70, 0x72, 0x69, 0x76, 0x61, 0x74, + 0x65, 0x6d, 0x61, 0x78, 0x2d, 0x61, 0x67, 0x65, + 0x3d, 0x67, 0x7a, 0x69, 0x70, 0x2c, 0x64, 0x65, + 0x66, 0x6c, 0x61, 0x74, 0x65, 0x2c, 0x73, 0x64, + 0x63, 0x68, 0x63, 0x68, 0x61, 0x72, 0x73, 0x65, + 0x74, 0x3d, 0x75, 0x74, 0x66, 0x2d, 0x38, 0x63, + 0x68, 0x61, 0x72, 0x73, 0x65, 0x74, 0x3d, 0x69, + 0x73, 0x6f, 0x2d, 0x38, 0x38, 0x35, 0x39, 0x2d, + 0x31, 0x2c, 0x75, 0x74, 0x66, 0x2d, 0x2c, 0x2a, + 0x2c, 0x65, 0x6e, 0x71, 0x3d, 0x30, 0x2e, +} diff --git a/Godeps/_workspace/src/code.google.com/p/go.net/spdy/read.go b/Godeps/_workspace/src/code.google.com/p/go.net/spdy/read.go new file mode 100644 index 00000000000..9359a95015c --- /dev/null +++ b/Godeps/_workspace/src/code.google.com/p/go.net/spdy/read.go @@ -0,0 +1,348 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package spdy + +import ( + "compress/zlib" + "encoding/binary" + "io" + "net/http" + "strings" +) + +func (frame *SynStreamFrame) read(h ControlFrameHeader, f *Framer) error { + return f.readSynStreamFrame(h, frame) +} + +func (frame *SynReplyFrame) read(h ControlFrameHeader, f *Framer) error { + return f.readSynReplyFrame(h, frame) +} + +func (frame *RstStreamFrame) read(h ControlFrameHeader, f *Framer) error { + frame.CFHeader = h + if err := binary.Read(f.r, binary.BigEndian, &frame.StreamId); err != nil { + return err + } + if err := binary.Read(f.r, binary.BigEndian, &frame.Status); err != nil { + return err + } + if frame.Status == 0 { + return &Error{InvalidControlFrame, frame.StreamId} + } + if frame.StreamId == 0 { + return &Error{ZeroStreamId, 0} + } + return nil +} + +func (frame *SettingsFrame) read(h ControlFrameHeader, f *Framer) error { + frame.CFHeader = h + var numSettings uint32 + if err := binary.Read(f.r, binary.BigEndian, &numSettings); err != nil { + return err + } + frame.FlagIdValues = make([]SettingsFlagIdValue, numSettings) + for i := uint32(0); i < numSettings; i++ { + if err := binary.Read(f.r, binary.BigEndian, &frame.FlagIdValues[i].Id); err != nil { + return err + } + frame.FlagIdValues[i].Flag = SettingsFlag((frame.FlagIdValues[i].Id & 0xff000000) >> 24) + frame.FlagIdValues[i].Id &= 0xffffff + if err := binary.Read(f.r, binary.BigEndian, &frame.FlagIdValues[i].Value); err != nil { + return err + } + } + return nil +} + +func (frame *PingFrame) read(h ControlFrameHeader, f *Framer) error { + frame.CFHeader = h + if err := binary.Read(f.r, binary.BigEndian, &frame.Id); err != nil { + return err + } + if frame.Id == 0 { + return &Error{ZeroStreamId, 0} + } + if frame.CFHeader.Flags != 0 { + return &Error{InvalidControlFrame, StreamId(frame.Id)} + } + return nil +} + +func (frame *GoAwayFrame) read(h ControlFrameHeader, f *Framer) error { + frame.CFHeader = h + if err := binary.Read(f.r, binary.BigEndian, &frame.LastGoodStreamId); err != nil { + return err + } + if frame.CFHeader.Flags != 0 { + return &Error{InvalidControlFrame, frame.LastGoodStreamId} + } + if frame.CFHeader.length != 8 { + return &Error{InvalidControlFrame, frame.LastGoodStreamId} + } + if err := binary.Read(f.r, binary.BigEndian, &frame.Status); err != nil { + return err + } + return nil +} + +func (frame *HeadersFrame) read(h ControlFrameHeader, f *Framer) error { + return f.readHeadersFrame(h, frame) +} + +func (frame *WindowUpdateFrame) read(h ControlFrameHeader, f *Framer) error { + frame.CFHeader = h + if err := binary.Read(f.r, binary.BigEndian, &frame.StreamId); err != nil { + return err + } + if frame.CFHeader.Flags != 0 { + return &Error{InvalidControlFrame, frame.StreamId} + } + if frame.CFHeader.length != 8 { + return &Error{InvalidControlFrame, frame.StreamId} + } + if err := binary.Read(f.r, binary.BigEndian, &frame.DeltaWindowSize); err != nil { + return err + } + return nil +} + +func newControlFrame(frameType ControlFrameType) (controlFrame, error) { + ctor, ok := cframeCtor[frameType] + if !ok { + return nil, &Error{Err: InvalidControlFrame} + } + return ctor(), nil +} + +var cframeCtor = map[ControlFrameType]func() controlFrame{ + TypeSynStream: func() controlFrame { return new(SynStreamFrame) }, + TypeSynReply: func() controlFrame { return new(SynReplyFrame) }, + TypeRstStream: func() controlFrame { return new(RstStreamFrame) }, + TypeSettings: func() controlFrame { return new(SettingsFrame) }, + TypePing: func() controlFrame { return new(PingFrame) }, + TypeGoAway: func() controlFrame { return new(GoAwayFrame) }, + TypeHeaders: func() controlFrame { return new(HeadersFrame) }, + TypeWindowUpdate: func() controlFrame { return new(WindowUpdateFrame) }, +} + +func (f *Framer) uncorkHeaderDecompressor(payloadSize int64) error { + if f.headerDecompressor != nil { + f.headerReader.N = payloadSize + return nil + } + f.headerReader = io.LimitedReader{R: f.r, N: payloadSize} + decompressor, err := zlib.NewReaderDict(&f.headerReader, []byte(headerDictionary)) + if err != nil { + return err + } + f.headerDecompressor = decompressor + return nil +} + +// ReadFrame reads SPDY encoded data and returns a decompressed Frame. +func (f *Framer) ReadFrame() (Frame, error) { + var firstWord uint32 + if err := binary.Read(f.r, binary.BigEndian, &firstWord); err != nil { + return nil, err + } + if firstWord&0x80000000 != 0 { + frameType := ControlFrameType(firstWord & 0xffff) + version := uint16(firstWord >> 16 & 0x7fff) + return f.parseControlFrame(version, frameType) + } + return f.parseDataFrame(StreamId(firstWord & 0x7fffffff)) +} + +func (f *Framer) parseControlFrame(version uint16, frameType ControlFrameType) (Frame, error) { + var length uint32 + if err := binary.Read(f.r, binary.BigEndian, &length); err != nil { + return nil, err + } + flags := ControlFlags((length & 0xff000000) >> 24) + length &= 0xffffff + header := ControlFrameHeader{version, frameType, flags, length} + cframe, err := newControlFrame(frameType) + if err != nil { + return nil, err + } + if err = cframe.read(header, f); err != nil { + return nil, err + } + return cframe, nil +} + +func parseHeaderValueBlock(r io.Reader, streamId StreamId) (http.Header, error) { + var numHeaders uint32 + if err := binary.Read(r, binary.BigEndian, &numHeaders); err != nil { + return nil, err + } + var e error + h := make(http.Header, int(numHeaders)) + for i := 0; i < int(numHeaders); i++ { + var length uint32 + if err := binary.Read(r, binary.BigEndian, &length); err != nil { + return nil, err + } + nameBytes := make([]byte, length) + if _, err := io.ReadFull(r, nameBytes); err != nil { + return nil, err + } + name := string(nameBytes) + if name != strings.ToLower(name) { + e = &Error{UnlowercasedHeaderName, streamId} + name = strings.ToLower(name) + } + if h[name] != nil { + e = &Error{DuplicateHeaders, streamId} + } + if err := binary.Read(r, binary.BigEndian, &length); err != nil { + return nil, err + } + value := make([]byte, length) + if _, err := io.ReadFull(r, value); err != nil { + return nil, err + } + valueList := strings.Split(string(value), headerValueSeparator) + for _, v := range valueList { + h.Add(name, v) + } + } + if e != nil { + return h, e + } + return h, nil +} + +func (f *Framer) readSynStreamFrame(h ControlFrameHeader, frame *SynStreamFrame) error { + frame.CFHeader = h + var err error + if err = binary.Read(f.r, binary.BigEndian, &frame.StreamId); err != nil { + return err + } + if err = binary.Read(f.r, binary.BigEndian, &frame.AssociatedToStreamId); err != nil { + return err + } + if err = binary.Read(f.r, binary.BigEndian, &frame.Priority); err != nil { + return err + } + frame.Priority >>= 5 + if err = binary.Read(f.r, binary.BigEndian, &frame.Slot); err != nil { + return err + } + reader := f.r + if !f.headerCompressionDisabled { + err := f.uncorkHeaderDecompressor(int64(h.length - 10)) + if err != nil { + return err + } + reader = f.headerDecompressor + } + frame.Headers, err = parseHeaderValueBlock(reader, frame.StreamId) + if !f.headerCompressionDisabled && (err == io.EOF && f.headerReader.N == 0 || f.headerReader.N != 0) { + err = &Error{WrongCompressedPayloadSize, 0} + } + if err != nil { + return err + } + for h := range frame.Headers { + if invalidReqHeaders[h] { + return &Error{InvalidHeaderPresent, frame.StreamId} + } + } + if frame.StreamId == 0 { + return &Error{ZeroStreamId, 0} + } + return nil +} + +func (f *Framer) readSynReplyFrame(h ControlFrameHeader, frame *SynReplyFrame) error { + frame.CFHeader = h + var err error + if err = binary.Read(f.r, binary.BigEndian, &frame.StreamId); err != nil { + return err + } + reader := f.r + if !f.headerCompressionDisabled { + err := f.uncorkHeaderDecompressor(int64(h.length - 4)) + if err != nil { + return err + } + reader = f.headerDecompressor + } + frame.Headers, err = parseHeaderValueBlock(reader, frame.StreamId) + if !f.headerCompressionDisabled && (err == io.EOF && f.headerReader.N == 0 || f.headerReader.N != 0) { + err = &Error{WrongCompressedPayloadSize, 0} + } + if err != nil { + return err + } + for h := range frame.Headers { + if invalidRespHeaders[h] { + return &Error{InvalidHeaderPresent, frame.StreamId} + } + } + if frame.StreamId == 0 { + return &Error{ZeroStreamId, 0} + } + return nil +} + +func (f *Framer) readHeadersFrame(h ControlFrameHeader, frame *HeadersFrame) error { + frame.CFHeader = h + var err error + if err = binary.Read(f.r, binary.BigEndian, &frame.StreamId); err != nil { + return err + } + reader := f.r + if !f.headerCompressionDisabled { + err := f.uncorkHeaderDecompressor(int64(h.length - 4)) + if err != nil { + return err + } + reader = f.headerDecompressor + } + frame.Headers, err = parseHeaderValueBlock(reader, frame.StreamId) + if !f.headerCompressionDisabled && (err == io.EOF && f.headerReader.N == 0 || f.headerReader.N != 0) { + err = &Error{WrongCompressedPayloadSize, 0} + } + if err != nil { + return err + } + var invalidHeaders map[string]bool + if frame.StreamId%2 == 0 { + invalidHeaders = invalidReqHeaders + } else { + invalidHeaders = invalidRespHeaders + } + for h := range frame.Headers { + if invalidHeaders[h] { + return &Error{InvalidHeaderPresent, frame.StreamId} + } + } + if frame.StreamId == 0 { + return &Error{ZeroStreamId, 0} + } + return nil +} + +func (f *Framer) parseDataFrame(streamId StreamId) (*DataFrame, error) { + var length uint32 + if err := binary.Read(f.r, binary.BigEndian, &length); err != nil { + return nil, err + } + var frame DataFrame + frame.StreamId = streamId + frame.Flags = DataFlags(length >> 24) + length &= 0xffffff + frame.Data = make([]byte, length) + if _, err := io.ReadFull(f.r, frame.Data); err != nil { + return nil, err + } + if frame.StreamId == 0 { + return nil, &Error{ZeroStreamId, 0} + } + return &frame, nil +} diff --git a/Godeps/_workspace/src/code.google.com/p/go.net/spdy/spdy_test.go b/Godeps/_workspace/src/code.google.com/p/go.net/spdy/spdy_test.go new file mode 100644 index 00000000000..ce581f1d056 --- /dev/null +++ b/Godeps/_workspace/src/code.google.com/p/go.net/spdy/spdy_test.go @@ -0,0 +1,644 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package spdy + +import ( + "bytes" + "compress/zlib" + "encoding/base64" + "io" + "io/ioutil" + "net/http" + "reflect" + "testing" +) + +var HeadersFixture = http.Header{ + "Url": []string{"http://www.google.com/"}, + "Method": []string{"get"}, + "Version": []string{"http/1.1"}, +} + +func TestHeaderParsing(t *testing.T) { + var headerValueBlockBuf bytes.Buffer + writeHeaderValueBlock(&headerValueBlockBuf, HeadersFixture) + const bogusStreamId = 1 + newHeaders, err := parseHeaderValueBlock(&headerValueBlockBuf, bogusStreamId) + if err != nil { + t.Fatal("parseHeaderValueBlock:", err) + } + if !reflect.DeepEqual(HeadersFixture, newHeaders) { + t.Fatal("got: ", newHeaders, "\nwant: ", HeadersFixture) + } +} + +func TestCreateParseSynStreamFrameCompressionDisable(t *testing.T) { + buffer := new(bytes.Buffer) + // Fixture framer for no compression test. + framer := &Framer{ + headerCompressionDisabled: true, + w: buffer, + headerBuf: new(bytes.Buffer), + r: buffer, + } + synStreamFrame := SynStreamFrame{ + CFHeader: ControlFrameHeader{ + version: Version, + frameType: TypeSynStream, + }, + StreamId: 2, + Headers: HeadersFixture, + } + if err := framer.WriteFrame(&synStreamFrame); err != nil { + t.Fatal("WriteFrame without compression:", err) + } + frame, err := framer.ReadFrame() + if err != nil { + t.Fatal("ReadFrame without compression:", err) + } + parsedSynStreamFrame, ok := frame.(*SynStreamFrame) + if !ok { + t.Fatal("Parsed incorrect frame type:", frame) + } + if !reflect.DeepEqual(synStreamFrame, *parsedSynStreamFrame) { + t.Fatal("got: ", *parsedSynStreamFrame, "\nwant: ", synStreamFrame) + } +} + +func TestCreateParseSynStreamFrameCompressionEnable(t *testing.T) { + buffer := new(bytes.Buffer) + framer, err := NewFramer(buffer, buffer) + synStreamFrame := SynStreamFrame{ + CFHeader: ControlFrameHeader{ + version: Version, + frameType: TypeSynStream, + }, + StreamId: 2, + Headers: HeadersFixture, + } + if err != nil { + t.Fatal("Failed to create new framer:", err) + } + if err := framer.WriteFrame(&synStreamFrame); err != nil { + t.Fatal("WriteFrame with compression:", err) + } + frame, err := framer.ReadFrame() + if err != nil { + t.Fatal("ReadFrame with compression:", err) + } + parsedSynStreamFrame, ok := frame.(*SynStreamFrame) + if !ok { + t.Fatal("Parsed incorrect frame type:", frame) + } + if !reflect.DeepEqual(synStreamFrame, *parsedSynStreamFrame) { + t.Fatal("got: ", *parsedSynStreamFrame, "\nwant: ", synStreamFrame) + } +} + +func TestCreateParseSynReplyFrameCompressionDisable(t *testing.T) { + buffer := new(bytes.Buffer) + framer := &Framer{ + headerCompressionDisabled: true, + w: buffer, + headerBuf: new(bytes.Buffer), + r: buffer, + } + synReplyFrame := SynReplyFrame{ + CFHeader: ControlFrameHeader{ + version: Version, + frameType: TypeSynReply, + }, + StreamId: 2, + Headers: HeadersFixture, + } + if err := framer.WriteFrame(&synReplyFrame); err != nil { + t.Fatal("WriteFrame without compression:", err) + } + frame, err := framer.ReadFrame() + if err != nil { + t.Fatal("ReadFrame without compression:", err) + } + parsedSynReplyFrame, ok := frame.(*SynReplyFrame) + if !ok { + t.Fatal("Parsed incorrect frame type:", frame) + } + if !reflect.DeepEqual(synReplyFrame, *parsedSynReplyFrame) { + t.Fatal("got: ", *parsedSynReplyFrame, "\nwant: ", synReplyFrame) + } +} + +func TestCreateParseSynReplyFrameCompressionEnable(t *testing.T) { + buffer := new(bytes.Buffer) + framer, err := NewFramer(buffer, buffer) + synReplyFrame := SynReplyFrame{ + CFHeader: ControlFrameHeader{ + version: Version, + frameType: TypeSynReply, + }, + StreamId: 2, + Headers: HeadersFixture, + } + if err != nil { + t.Fatal("Failed to create new framer:", err) + } + if err := framer.WriteFrame(&synReplyFrame); err != nil { + t.Fatal("WriteFrame with compression:", err) + } + frame, err := framer.ReadFrame() + if err != nil { + t.Fatal("ReadFrame with compression:", err) + } + parsedSynReplyFrame, ok := frame.(*SynReplyFrame) + if !ok { + t.Fatal("Parsed incorrect frame type:", frame) + } + if !reflect.DeepEqual(synReplyFrame, *parsedSynReplyFrame) { + t.Fatal("got: ", *parsedSynReplyFrame, "\nwant: ", synReplyFrame) + } +} + +func TestCreateParseRstStream(t *testing.T) { + buffer := new(bytes.Buffer) + framer, err := NewFramer(buffer, buffer) + if err != nil { + t.Fatal("Failed to create new framer:", err) + } + rstStreamFrame := RstStreamFrame{ + CFHeader: ControlFrameHeader{ + version: Version, + frameType: TypeRstStream, + }, + StreamId: 1, + Status: InvalidStream, + } + if err := framer.WriteFrame(&rstStreamFrame); err != nil { + t.Fatal("WriteFrame:", err) + } + frame, err := framer.ReadFrame() + if err != nil { + t.Fatal("ReadFrame:", err) + } + parsedRstStreamFrame, ok := frame.(*RstStreamFrame) + if !ok { + t.Fatal("Parsed incorrect frame type:", frame) + } + if !reflect.DeepEqual(rstStreamFrame, *parsedRstStreamFrame) { + t.Fatal("got: ", *parsedRstStreamFrame, "\nwant: ", rstStreamFrame) + } +} + +func TestCreateParseSettings(t *testing.T) { + buffer := new(bytes.Buffer) + framer, err := NewFramer(buffer, buffer) + if err != nil { + t.Fatal("Failed to create new framer:", err) + } + settingsFrame := SettingsFrame{ + CFHeader: ControlFrameHeader{ + version: Version, + frameType: TypeSettings, + }, + FlagIdValues: []SettingsFlagIdValue{ + {FlagSettingsPersistValue, SettingsCurrentCwnd, 10}, + {FlagSettingsPersisted, SettingsUploadBandwidth, 1}, + }, + } + if err := framer.WriteFrame(&settingsFrame); err != nil { + t.Fatal("WriteFrame:", err) + } + frame, err := framer.ReadFrame() + if err != nil { + t.Fatal("ReadFrame:", err) + } + parsedSettingsFrame, ok := frame.(*SettingsFrame) + if !ok { + t.Fatal("Parsed incorrect frame type:", frame) + } + if !reflect.DeepEqual(settingsFrame, *parsedSettingsFrame) { + t.Fatal("got: ", *parsedSettingsFrame, "\nwant: ", settingsFrame) + } +} + +func TestCreateParsePing(t *testing.T) { + buffer := new(bytes.Buffer) + framer, err := NewFramer(buffer, buffer) + if err != nil { + t.Fatal("Failed to create new framer:", err) + } + pingFrame := PingFrame{ + CFHeader: ControlFrameHeader{ + version: Version, + frameType: TypePing, + }, + Id: 31337, + } + if err := framer.WriteFrame(&pingFrame); err != nil { + t.Fatal("WriteFrame:", err) + } + if pingFrame.CFHeader.Flags != 0 { + t.Fatal("Incorrect frame type:", pingFrame) + } + frame, err := framer.ReadFrame() + if err != nil { + t.Fatal("ReadFrame:", err) + } + parsedPingFrame, ok := frame.(*PingFrame) + if !ok { + t.Fatal("Parsed incorrect frame type:", frame) + } + if parsedPingFrame.CFHeader.Flags != 0 { + t.Fatal("Parsed incorrect frame type:", parsedPingFrame) + } + if !reflect.DeepEqual(pingFrame, *parsedPingFrame) { + t.Fatal("got: ", *parsedPingFrame, "\nwant: ", pingFrame) + } +} + +func TestCreateParseGoAway(t *testing.T) { + buffer := new(bytes.Buffer) + framer, err := NewFramer(buffer, buffer) + if err != nil { + t.Fatal("Failed to create new framer:", err) + } + goAwayFrame := GoAwayFrame{ + CFHeader: ControlFrameHeader{ + version: Version, + frameType: TypeGoAway, + }, + LastGoodStreamId: 31337, + Status: 1, + } + if err := framer.WriteFrame(&goAwayFrame); err != nil { + t.Fatal("WriteFrame:", err) + } + if goAwayFrame.CFHeader.Flags != 0 { + t.Fatal("Incorrect frame type:", goAwayFrame) + } + if goAwayFrame.CFHeader.length != 8 { + t.Fatal("Incorrect frame type:", goAwayFrame) + } + frame, err := framer.ReadFrame() + if err != nil { + t.Fatal("ReadFrame:", err) + } + parsedGoAwayFrame, ok := frame.(*GoAwayFrame) + if !ok { + t.Fatal("Parsed incorrect frame type:", frame) + } + if parsedGoAwayFrame.CFHeader.Flags != 0 { + t.Fatal("Incorrect frame type:", parsedGoAwayFrame) + } + if parsedGoAwayFrame.CFHeader.length != 8 { + t.Fatal("Incorrect frame type:", parsedGoAwayFrame) + } + if !reflect.DeepEqual(goAwayFrame, *parsedGoAwayFrame) { + t.Fatal("got: ", *parsedGoAwayFrame, "\nwant: ", goAwayFrame) + } +} + +func TestCreateParseHeadersFrame(t *testing.T) { + buffer := new(bytes.Buffer) + framer := &Framer{ + headerCompressionDisabled: true, + w: buffer, + headerBuf: new(bytes.Buffer), + r: buffer, + } + headersFrame := HeadersFrame{ + CFHeader: ControlFrameHeader{ + version: Version, + frameType: TypeHeaders, + }, + StreamId: 2, + } + headersFrame.Headers = HeadersFixture + if err := framer.WriteFrame(&headersFrame); err != nil { + t.Fatal("WriteFrame without compression:", err) + } + frame, err := framer.ReadFrame() + if err != nil { + t.Fatal("ReadFrame without compression:", err) + } + parsedHeadersFrame, ok := frame.(*HeadersFrame) + if !ok { + t.Fatal("Parsed incorrect frame type:", frame) + } + if !reflect.DeepEqual(headersFrame, *parsedHeadersFrame) { + t.Fatal("got: ", *parsedHeadersFrame, "\nwant: ", headersFrame) + } +} + +func TestCreateParseHeadersFrameCompressionEnable(t *testing.T) { + buffer := new(bytes.Buffer) + headersFrame := HeadersFrame{ + CFHeader: ControlFrameHeader{ + version: Version, + frameType: TypeHeaders, + }, + StreamId: 2, + } + headersFrame.Headers = HeadersFixture + + framer, err := NewFramer(buffer, buffer) + if err := framer.WriteFrame(&headersFrame); err != nil { + t.Fatal("WriteFrame with compression:", err) + } + frame, err := framer.ReadFrame() + if err != nil { + t.Fatal("ReadFrame with compression:", err) + } + parsedHeadersFrame, ok := frame.(*HeadersFrame) + if !ok { + t.Fatal("Parsed incorrect frame type:", frame) + } + if !reflect.DeepEqual(headersFrame, *parsedHeadersFrame) { + t.Fatal("got: ", *parsedHeadersFrame, "\nwant: ", headersFrame) + } +} + +func TestCreateParseWindowUpdateFrame(t *testing.T) { + buffer := new(bytes.Buffer) + framer, err := NewFramer(buffer, buffer) + if err != nil { + t.Fatal("Failed to create new framer:", err) + } + windowUpdateFrame := WindowUpdateFrame{ + CFHeader: ControlFrameHeader{ + version: Version, + frameType: TypeWindowUpdate, + }, + StreamId: 31337, + DeltaWindowSize: 1, + } + if err := framer.WriteFrame(&windowUpdateFrame); err != nil { + t.Fatal("WriteFrame:", err) + } + if windowUpdateFrame.CFHeader.Flags != 0 { + t.Fatal("Incorrect frame type:", windowUpdateFrame) + } + if windowUpdateFrame.CFHeader.length != 8 { + t.Fatal("Incorrect frame type:", windowUpdateFrame) + } + frame, err := framer.ReadFrame() + if err != nil { + t.Fatal("ReadFrame:", err) + } + parsedWindowUpdateFrame, ok := frame.(*WindowUpdateFrame) + if !ok { + t.Fatal("Parsed incorrect frame type:", frame) + } + if parsedWindowUpdateFrame.CFHeader.Flags != 0 { + t.Fatal("Incorrect frame type:", parsedWindowUpdateFrame) + } + if parsedWindowUpdateFrame.CFHeader.length != 8 { + t.Fatal("Incorrect frame type:", parsedWindowUpdateFrame) + } + if !reflect.DeepEqual(windowUpdateFrame, *parsedWindowUpdateFrame) { + t.Fatal("got: ", *parsedWindowUpdateFrame, "\nwant: ", windowUpdateFrame) + } +} + +func TestCreateParseDataFrame(t *testing.T) { + buffer := new(bytes.Buffer) + framer, err := NewFramer(buffer, buffer) + if err != nil { + t.Fatal("Failed to create new framer:", err) + } + dataFrame := DataFrame{ + StreamId: 1, + Data: []byte{'h', 'e', 'l', 'l', 'o'}, + } + if err := framer.WriteFrame(&dataFrame); err != nil { + t.Fatal("WriteFrame:", err) + } + frame, err := framer.ReadFrame() + if err != nil { + t.Fatal("ReadFrame:", err) + } + parsedDataFrame, ok := frame.(*DataFrame) + if !ok { + t.Fatal("Parsed incorrect frame type:", frame) + } + if !reflect.DeepEqual(dataFrame, *parsedDataFrame) { + t.Fatal("got: ", *parsedDataFrame, "\nwant: ", dataFrame) + } +} + +func TestCompressionContextAcrossFrames(t *testing.T) { + buffer := new(bytes.Buffer) + framer, err := NewFramer(buffer, buffer) + if err != nil { + t.Fatal("Failed to create new framer:", err) + } + headersFrame := HeadersFrame{ + CFHeader: ControlFrameHeader{ + version: Version, + frameType: TypeHeaders, + }, + StreamId: 2, + Headers: HeadersFixture, + } + if err := framer.WriteFrame(&headersFrame); err != nil { + t.Fatal("WriteFrame (HEADERS):", err) + } + synStreamFrame := SynStreamFrame{ + ControlFrameHeader{ + Version, + TypeSynStream, + 0, // Flags + 0, // length + }, + 2, // StreamId + 0, // AssociatedTOStreamID + 0, // Priority + 1, // Slot + nil, // Headers + } + synStreamFrame.Headers = HeadersFixture + + if err := framer.WriteFrame(&synStreamFrame); err != nil { + t.Fatal("WriteFrame (SYN_STREAM):", err) + } + frame, err := framer.ReadFrame() + if err != nil { + t.Fatal("ReadFrame (HEADERS):", err, buffer.Bytes()) + } + parsedHeadersFrame, ok := frame.(*HeadersFrame) + if !ok { + t.Fatalf("expected HeadersFrame; got %T %v", frame, frame) + } + if !reflect.DeepEqual(headersFrame, *parsedHeadersFrame) { + t.Fatal("got: ", *parsedHeadersFrame, "\nwant: ", headersFrame) + } + frame, err = framer.ReadFrame() + if err != nil { + t.Fatal("ReadFrame (SYN_STREAM):", err, buffer.Bytes()) + } + parsedSynStreamFrame, ok := frame.(*SynStreamFrame) + if !ok { + t.Fatalf("expected SynStreamFrame; got %T %v", frame, frame) + } + if !reflect.DeepEqual(synStreamFrame, *parsedSynStreamFrame) { + t.Fatal("got: ", *parsedSynStreamFrame, "\nwant: ", synStreamFrame) + } +} + +func TestMultipleSPDYFrames(t *testing.T) { + // Initialize the framers. + pr1, pw1 := io.Pipe() + pr2, pw2 := io.Pipe() + writer, err := NewFramer(pw1, pr2) + if err != nil { + t.Fatal("Failed to create writer:", err) + } + reader, err := NewFramer(pw2, pr1) + if err != nil { + t.Fatal("Failed to create reader:", err) + } + + // Set up the frames we're actually transferring. + headersFrame := HeadersFrame{ + CFHeader: ControlFrameHeader{ + version: Version, + frameType: TypeHeaders, + }, + StreamId: 2, + Headers: HeadersFixture, + } + synStreamFrame := SynStreamFrame{ + CFHeader: ControlFrameHeader{ + version: Version, + frameType: TypeSynStream, + }, + StreamId: 2, + Headers: HeadersFixture, + } + + // Start the goroutines to write the frames. + go func() { + if err := writer.WriteFrame(&headersFrame); err != nil { + t.Fatal("WriteFrame (HEADERS): ", err) + } + if err := writer.WriteFrame(&synStreamFrame); err != nil { + t.Fatal("WriteFrame (SYN_STREAM): ", err) + } + }() + + // Read the frames and verify they look as expected. + frame, err := reader.ReadFrame() + if err != nil { + t.Fatal("ReadFrame (HEADERS): ", err) + } + parsedHeadersFrame, ok := frame.(*HeadersFrame) + if !ok { + t.Fatal("Parsed incorrect frame type:", frame) + } + if !reflect.DeepEqual(headersFrame, *parsedHeadersFrame) { + t.Fatal("got: ", *parsedHeadersFrame, "\nwant: ", headersFrame) + } + frame, err = reader.ReadFrame() + if err != nil { + t.Fatal("ReadFrame (SYN_STREAM):", err) + } + parsedSynStreamFrame, ok := frame.(*SynStreamFrame) + if !ok { + t.Fatal("Parsed incorrect frame type.") + } + if !reflect.DeepEqual(synStreamFrame, *parsedSynStreamFrame) { + t.Fatal("got: ", *parsedSynStreamFrame, "\nwant: ", synStreamFrame) + } +} + +func TestReadMalformedZlibHeader(t *testing.T) { + // These were constructed by corrupting the first byte of the zlib + // header after writing. + malformedStructs := map[string]string{ + "SynStreamFrame": "gAIAAQAAABgAAAACAAAAAAAAF/nfolGyYmAAAAAA//8=", + "SynReplyFrame": "gAIAAgAAABQAAAACAAAX+d+iUbJiYAAAAAD//w==", + "HeadersFrame": "gAIACAAAABQAAAACAAAX+d+iUbJiYAAAAAD//w==", + } + for name, bad := range malformedStructs { + b, err := base64.StdEncoding.DecodeString(bad) + if err != nil { + t.Errorf("Unable to decode base64 encoded frame %s: %v", name, err) + } + buf := bytes.NewBuffer(b) + reader, err := NewFramer(buf, buf) + if err != nil { + t.Fatalf("NewFramer: %v", err) + } + _, err = reader.ReadFrame() + if err != zlib.ErrHeader { + t.Errorf("Frame %s, expected: %#v, actual: %#v", name, zlib.ErrHeader, err) + } + } +} + +// TODO: these tests are too weak for updating SPDY spec. Fix me. + +type zeroStream struct { + frame Frame + encoded string +} + +var streamIdZeroFrames = map[string]zeroStream{ + "SynStreamFrame": { + &SynStreamFrame{StreamId: 0}, + "gAIAAQAAABgAAAAAAAAAAAAAePnfolGyYmAAAAAA//8=", + }, + "SynReplyFrame": { + &SynReplyFrame{StreamId: 0}, + "gAIAAgAAABQAAAAAAAB4+d+iUbJiYAAAAAD//w==", + }, + "RstStreamFrame": { + &RstStreamFrame{StreamId: 0}, + "gAIAAwAAAAgAAAAAAAAAAA==", + }, + "HeadersFrame": { + &HeadersFrame{StreamId: 0}, + "gAIACAAAABQAAAAAAAB4+d+iUbJiYAAAAAD//w==", + }, + "DataFrame": { + &DataFrame{StreamId: 0}, + "AAAAAAAAAAA=", + }, + "PingFrame": { + &PingFrame{Id: 0}, + "gAIABgAAAAQAAAAA", + }, +} + +func TestNoZeroStreamId(t *testing.T) { + t.Log("skipping") // TODO: update to work with SPDY3 + return + + for name, f := range streamIdZeroFrames { + b, err := base64.StdEncoding.DecodeString(f.encoded) + if err != nil { + t.Errorf("Unable to decode base64 encoded frame %s: %v", f, err) + continue + } + framer, err := NewFramer(ioutil.Discard, bytes.NewReader(b)) + if err != nil { + t.Fatalf("NewFramer: %v", err) + } + err = framer.WriteFrame(f.frame) + checkZeroStreamId(t, name, "WriteFrame", err) + + _, err = framer.ReadFrame() + checkZeroStreamId(t, name, "ReadFrame", err) + } +} + +func checkZeroStreamId(t *testing.T, frame string, method string, err error) { + if err == nil { + t.Errorf("%s ZeroStreamId, no error on %s", method, frame) + return + } + eerr, ok := err.(*Error) + if !ok || eerr.Err != ZeroStreamId { + t.Errorf("%s ZeroStreamId, incorrect error %#v, frame %s", method, eerr, frame) + } +} diff --git a/Godeps/_workspace/src/code.google.com/p/go.net/spdy/types.go b/Godeps/_workspace/src/code.google.com/p/go.net/spdy/types.go new file mode 100644 index 00000000000..7b6ee9c6f2b --- /dev/null +++ b/Godeps/_workspace/src/code.google.com/p/go.net/spdy/types.go @@ -0,0 +1,275 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package spdy implements the SPDY protocol (currently SPDY/3), described in +// http://www.chromium.org/spdy/spdy-protocol/spdy-protocol-draft3. +package spdy + +import ( + "bytes" + "compress/zlib" + "io" + "net/http" +) + +// Version is the protocol version number that this package implements. +const Version = 3 + +// ControlFrameType stores the type field in a control frame header. +type ControlFrameType uint16 + +const ( + TypeSynStream ControlFrameType = 0x0001 + TypeSynReply = 0x0002 + TypeRstStream = 0x0003 + TypeSettings = 0x0004 + TypePing = 0x0006 + TypeGoAway = 0x0007 + TypeHeaders = 0x0008 + TypeWindowUpdate = 0x0009 +) + +// ControlFlags are the flags that can be set on a control frame. +type ControlFlags uint8 + +const ( + ControlFlagFin ControlFlags = 0x01 + ControlFlagUnidirectional = 0x02 + ControlFlagSettingsClearSettings = 0x01 +) + +// DataFlags are the flags that can be set on a data frame. +type DataFlags uint8 + +const ( + DataFlagFin DataFlags = 0x01 +) + +// MaxDataLength is the maximum number of bytes that can be stored in one frame. +const MaxDataLength = 1<<24 - 1 + +// headerValueSepator separates multiple header values. +const headerValueSeparator = "\x00" + +// Frame is a single SPDY frame in its unpacked in-memory representation. Use +// Framer to read and write it. +type Frame interface { + write(f *Framer) error +} + +// ControlFrameHeader contains all the fields in a control frame header, +// in its unpacked in-memory representation. +type ControlFrameHeader struct { + // Note, high bit is the "Control" bit. + version uint16 // spdy version number + frameType ControlFrameType + Flags ControlFlags + length uint32 // length of data field +} + +type controlFrame interface { + Frame + read(h ControlFrameHeader, f *Framer) error +} + +// StreamId represents a 31-bit value identifying the stream. +type StreamId uint32 + +// SynStreamFrame is the unpacked, in-memory representation of a SYN_STREAM +// frame. +type SynStreamFrame struct { + CFHeader ControlFrameHeader + StreamId StreamId + AssociatedToStreamId StreamId // stream id for a stream which this stream is associated to + Priority uint8 // priority of this frame (3-bit) + Slot uint8 // index in the server's credential vector of the client certificate + Headers http.Header +} + +// SynReplyFrame is the unpacked, in-memory representation of a SYN_REPLY frame. +type SynReplyFrame struct { + CFHeader ControlFrameHeader + StreamId StreamId + Headers http.Header +} + +// RstStreamStatus represents the status that led to a RST_STREAM. +type RstStreamStatus uint32 + +const ( + ProtocolError RstStreamStatus = iota + 1 + InvalidStream + RefusedStream + UnsupportedVersion + Cancel + InternalError + FlowControlError + StreamInUse + StreamAlreadyClosed + InvalidCredentials + FrameTooLarge +) + +// RstStreamFrame is the unpacked, in-memory representation of a RST_STREAM +// frame. +type RstStreamFrame struct { + CFHeader ControlFrameHeader + StreamId StreamId + Status RstStreamStatus +} + +// SettingsFlag represents a flag in a SETTINGS frame. +type SettingsFlag uint8 + +const ( + FlagSettingsPersistValue SettingsFlag = 0x1 + FlagSettingsPersisted = 0x2 +) + +// SettingsFlag represents the id of an id/value pair in a SETTINGS frame. +type SettingsId uint32 + +const ( + SettingsUploadBandwidth SettingsId = iota + 1 + SettingsDownloadBandwidth + SettingsRoundTripTime + SettingsMaxConcurrentStreams + SettingsCurrentCwnd + SettingsDownloadRetransRate + SettingsInitialWindowSize + SettingsClientCretificateVectorSize +) + +// SettingsFlagIdValue is the unpacked, in-memory representation of the +// combined flag/id/value for a setting in a SETTINGS frame. +type SettingsFlagIdValue struct { + Flag SettingsFlag + Id SettingsId + Value uint32 +} + +// SettingsFrame is the unpacked, in-memory representation of a SPDY +// SETTINGS frame. +type SettingsFrame struct { + CFHeader ControlFrameHeader + FlagIdValues []SettingsFlagIdValue +} + +// PingFrame is the unpacked, in-memory representation of a PING frame. +type PingFrame struct { + CFHeader ControlFrameHeader + Id uint32 // unique id for this ping, from server is even, from client is odd. +} + +// GoAwayStatus represents the status in a GoAwayFrame. +type GoAwayStatus uint32 + +const ( + GoAwayOK GoAwayStatus = iota + GoAwayProtocolError + GoAwayInternalError +) + +// GoAwayFrame is the unpacked, in-memory representation of a GOAWAY frame. +type GoAwayFrame struct { + CFHeader ControlFrameHeader + LastGoodStreamId StreamId // last stream id which was accepted by sender + Status GoAwayStatus +} + +// HeadersFrame is the unpacked, in-memory representation of a HEADERS frame. +type HeadersFrame struct { + CFHeader ControlFrameHeader + StreamId StreamId + Headers http.Header +} + +// WindowUpdateFrame is the unpacked, in-memory representation of a +// WINDOW_UPDATE frame. +type WindowUpdateFrame struct { + CFHeader ControlFrameHeader + StreamId StreamId + DeltaWindowSize uint32 // additional number of bytes to existing window size +} + +// TODO: Implement credential frame and related methods. + +// DataFrame is the unpacked, in-memory representation of a DATA frame. +type DataFrame struct { + // Note, high bit is the "Control" bit. Should be 0 for data frames. + StreamId StreamId + Flags DataFlags + Data []byte // payload data of this frame +} + +// A SPDY specific error. +type ErrorCode string + +const ( + UnlowercasedHeaderName ErrorCode = "header was not lowercased" + DuplicateHeaders = "multiple headers with same name" + WrongCompressedPayloadSize = "compressed payload size was incorrect" + UnknownFrameType = "unknown frame type" + InvalidControlFrame = "invalid control frame" + InvalidDataFrame = "invalid data frame" + InvalidHeaderPresent = "frame contained invalid header" + ZeroStreamId = "stream id zero is disallowed" +) + +// Error contains both the type of error and additional values. StreamId is 0 +// if Error is not associated with a stream. +type Error struct { + Err ErrorCode + StreamId StreamId +} + +func (e *Error) Error() string { + return string(e.Err) +} + +var invalidReqHeaders = map[string]bool{ + "Connection": true, + "Host": true, + "Keep-Alive": true, + "Proxy-Connection": true, + "Transfer-Encoding": true, +} + +var invalidRespHeaders = map[string]bool{ + "Connection": true, + "Keep-Alive": true, + "Proxy-Connection": true, + "Transfer-Encoding": true, +} + +// Framer handles serializing/deserializing SPDY frames, including compressing/ +// decompressing payloads. +type Framer struct { + headerCompressionDisabled bool + w io.Writer + headerBuf *bytes.Buffer + headerCompressor *zlib.Writer + r io.Reader + headerReader io.LimitedReader + headerDecompressor io.ReadCloser +} + +// NewFramer allocates a new Framer for a given SPDY connection, represented by +// a io.Writer and io.Reader. Note that Framer will read and write individual fields +// from/to the Reader and Writer, so the caller should pass in an appropriately +// buffered implementation to optimize performance. +func NewFramer(w io.Writer, r io.Reader) (*Framer, error) { + compressBuf := new(bytes.Buffer) + compressor, err := zlib.NewWriterLevelDict(compressBuf, zlib.BestCompression, []byte(headerDictionary)) + if err != nil { + return nil, err + } + framer := &Framer{ + w: w, + headerBuf: compressBuf, + headerCompressor: compressor, + r: r, + } + return framer, nil +} diff --git a/Godeps/_workspace/src/code.google.com/p/go.net/spdy/write.go b/Godeps/_workspace/src/code.google.com/p/go.net/spdy/write.go new file mode 100644 index 00000000000..b212f66a235 --- /dev/null +++ b/Godeps/_workspace/src/code.google.com/p/go.net/spdy/write.go @@ -0,0 +1,318 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package spdy + +import ( + "encoding/binary" + "io" + "net/http" + "strings" +) + +func (frame *SynStreamFrame) write(f *Framer) error { + return f.writeSynStreamFrame(frame) +} + +func (frame *SynReplyFrame) write(f *Framer) error { + return f.writeSynReplyFrame(frame) +} + +func (frame *RstStreamFrame) write(f *Framer) (err error) { + if frame.StreamId == 0 { + return &Error{ZeroStreamId, 0} + } + frame.CFHeader.version = Version + frame.CFHeader.frameType = TypeRstStream + frame.CFHeader.Flags = 0 + frame.CFHeader.length = 8 + + // Serialize frame to Writer. + if err = writeControlFrameHeader(f.w, frame.CFHeader); err != nil { + return + } + if err = binary.Write(f.w, binary.BigEndian, frame.StreamId); err != nil { + return + } + if frame.Status == 0 { + return &Error{InvalidControlFrame, frame.StreamId} + } + if err = binary.Write(f.w, binary.BigEndian, frame.Status); err != nil { + return + } + return +} + +func (frame *SettingsFrame) write(f *Framer) (err error) { + frame.CFHeader.version = Version + frame.CFHeader.frameType = TypeSettings + frame.CFHeader.length = uint32(len(frame.FlagIdValues)*8 + 4) + + // Serialize frame to Writer. + if err = writeControlFrameHeader(f.w, frame.CFHeader); err != nil { + return + } + if err = binary.Write(f.w, binary.BigEndian, uint32(len(frame.FlagIdValues))); err != nil { + return + } + for _, flagIdValue := range frame.FlagIdValues { + flagId := uint32(flagIdValue.Flag)<<24 | uint32(flagIdValue.Id) + if err = binary.Write(f.w, binary.BigEndian, flagId); err != nil { + return + } + if err = binary.Write(f.w, binary.BigEndian, flagIdValue.Value); err != nil { + return + } + } + return +} + +func (frame *PingFrame) write(f *Framer) (err error) { + if frame.Id == 0 { + return &Error{ZeroStreamId, 0} + } + frame.CFHeader.version = Version + frame.CFHeader.frameType = TypePing + frame.CFHeader.Flags = 0 + frame.CFHeader.length = 4 + + // Serialize frame to Writer. + if err = writeControlFrameHeader(f.w, frame.CFHeader); err != nil { + return + } + if err = binary.Write(f.w, binary.BigEndian, frame.Id); err != nil { + return + } + return +} + +func (frame *GoAwayFrame) write(f *Framer) (err error) { + frame.CFHeader.version = Version + frame.CFHeader.frameType = TypeGoAway + frame.CFHeader.Flags = 0 + frame.CFHeader.length = 8 + + // Serialize frame to Writer. + if err = writeControlFrameHeader(f.w, frame.CFHeader); err != nil { + return + } + if err = binary.Write(f.w, binary.BigEndian, frame.LastGoodStreamId); err != nil { + return + } + if err = binary.Write(f.w, binary.BigEndian, frame.Status); err != nil { + return + } + return nil +} + +func (frame *HeadersFrame) write(f *Framer) error { + return f.writeHeadersFrame(frame) +} + +func (frame *WindowUpdateFrame) write(f *Framer) (err error) { + frame.CFHeader.version = Version + frame.CFHeader.frameType = TypeWindowUpdate + frame.CFHeader.Flags = 0 + frame.CFHeader.length = 8 + + // Serialize frame to Writer. + if err = writeControlFrameHeader(f.w, frame.CFHeader); err != nil { + return + } + if err = binary.Write(f.w, binary.BigEndian, frame.StreamId); err != nil { + return + } + if err = binary.Write(f.w, binary.BigEndian, frame.DeltaWindowSize); err != nil { + return + } + return nil +} + +func (frame *DataFrame) write(f *Framer) error { + return f.writeDataFrame(frame) +} + +// WriteFrame writes a frame. +func (f *Framer) WriteFrame(frame Frame) error { + return frame.write(f) +} + +func writeControlFrameHeader(w io.Writer, h ControlFrameHeader) error { + if err := binary.Write(w, binary.BigEndian, 0x8000|h.version); err != nil { + return err + } + if err := binary.Write(w, binary.BigEndian, h.frameType); err != nil { + return err + } + flagsAndLength := uint32(h.Flags)<<24 | h.length + if err := binary.Write(w, binary.BigEndian, flagsAndLength); err != nil { + return err + } + return nil +} + +func writeHeaderValueBlock(w io.Writer, h http.Header) (n int, err error) { + n = 0 + if err = binary.Write(w, binary.BigEndian, uint32(len(h))); err != nil { + return + } + n += 2 + for name, values := range h { + if err = binary.Write(w, binary.BigEndian, uint32(len(name))); err != nil { + return + } + n += 2 + name = strings.ToLower(name) + if _, err = io.WriteString(w, name); err != nil { + return + } + n += len(name) + v := strings.Join(values, headerValueSeparator) + if err = binary.Write(w, binary.BigEndian, uint32(len(v))); err != nil { + return + } + n += 2 + if _, err = io.WriteString(w, v); err != nil { + return + } + n += len(v) + } + return +} + +func (f *Framer) writeSynStreamFrame(frame *SynStreamFrame) (err error) { + if frame.StreamId == 0 { + return &Error{ZeroStreamId, 0} + } + // Marshal the headers. + var writer io.Writer = f.headerBuf + if !f.headerCompressionDisabled { + writer = f.headerCompressor + } + if _, err = writeHeaderValueBlock(writer, frame.Headers); err != nil { + return + } + if !f.headerCompressionDisabled { + f.headerCompressor.Flush() + } + + // Set ControlFrameHeader. + frame.CFHeader.version = Version + frame.CFHeader.frameType = TypeSynStream + frame.CFHeader.length = uint32(len(f.headerBuf.Bytes()) + 10) + + // Serialize frame to Writer. + if err = writeControlFrameHeader(f.w, frame.CFHeader); err != nil { + return err + } + if err = binary.Write(f.w, binary.BigEndian, frame.StreamId); err != nil { + return err + } + if err = binary.Write(f.w, binary.BigEndian, frame.AssociatedToStreamId); err != nil { + return err + } + if err = binary.Write(f.w, binary.BigEndian, frame.Priority<<5); err != nil { + return err + } + if err = binary.Write(f.w, binary.BigEndian, frame.Slot); err != nil { + return err + } + if _, err = f.w.Write(f.headerBuf.Bytes()); err != nil { + return err + } + f.headerBuf.Reset() + return nil +} + +func (f *Framer) writeSynReplyFrame(frame *SynReplyFrame) (err error) { + if frame.StreamId == 0 { + return &Error{ZeroStreamId, 0} + } + // Marshal the headers. + var writer io.Writer = f.headerBuf + if !f.headerCompressionDisabled { + writer = f.headerCompressor + } + if _, err = writeHeaderValueBlock(writer, frame.Headers); err != nil { + return + } + if !f.headerCompressionDisabled { + f.headerCompressor.Flush() + } + + // Set ControlFrameHeader. + frame.CFHeader.version = Version + frame.CFHeader.frameType = TypeSynReply + frame.CFHeader.length = uint32(len(f.headerBuf.Bytes()) + 4) + + // Serialize frame to Writer. + if err = writeControlFrameHeader(f.w, frame.CFHeader); err != nil { + return + } + if err = binary.Write(f.w, binary.BigEndian, frame.StreamId); err != nil { + return + } + if _, err = f.w.Write(f.headerBuf.Bytes()); err != nil { + return + } + f.headerBuf.Reset() + return +} + +func (f *Framer) writeHeadersFrame(frame *HeadersFrame) (err error) { + if frame.StreamId == 0 { + return &Error{ZeroStreamId, 0} + } + // Marshal the headers. + var writer io.Writer = f.headerBuf + if !f.headerCompressionDisabled { + writer = f.headerCompressor + } + if _, err = writeHeaderValueBlock(writer, frame.Headers); err != nil { + return + } + if !f.headerCompressionDisabled { + f.headerCompressor.Flush() + } + + // Set ControlFrameHeader. + frame.CFHeader.version = Version + frame.CFHeader.frameType = TypeHeaders + frame.CFHeader.length = uint32(len(f.headerBuf.Bytes()) + 4) + + // Serialize frame to Writer. + if err = writeControlFrameHeader(f.w, frame.CFHeader); err != nil { + return + } + if err = binary.Write(f.w, binary.BigEndian, frame.StreamId); err != nil { + return + } + if _, err = f.w.Write(f.headerBuf.Bytes()); err != nil { + return + } + f.headerBuf.Reset() + return +} + +func (f *Framer) writeDataFrame(frame *DataFrame) (err error) { + if frame.StreamId == 0 { + return &Error{ZeroStreamId, 0} + } + if frame.StreamId&0x80000000 != 0 || len(frame.Data) > MaxDataLength { + return &Error{InvalidDataFrame, frame.StreamId} + } + + // Serialize frame to Writer. + if err = binary.Write(f.w, binary.BigEndian, frame.StreamId); err != nil { + return + } + flagsAndLength := uint32(frame.Flags)<<24 | uint32(len(frame.Data)) + if err = binary.Write(f.w, binary.BigEndian, flagsAndLength); err != nil { + return + } + if _, err = f.w.Write(frame.Data); err != nil { + return + } + return nil +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/term/MAINTAINERS b/Godeps/_workspace/src/github.com/docker/docker/pkg/term/MAINTAINERS new file mode 100644 index 00000000000..aee10c84210 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/term/MAINTAINERS @@ -0,0 +1 @@ +Solomon Hykes (@shykes) diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/term/console_windows.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/term/console_windows.go new file mode 100644 index 00000000000..6335b2b837f --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/term/console_windows.go @@ -0,0 +1,87 @@ +// +build windows + +package term + +import ( + "syscall" + "unsafe" +) + +const ( + // Consts for Get/SetConsoleMode function + // see http://msdn.microsoft.com/en-us/library/windows/desktop/ms683167(v=vs.85).aspx + ENABLE_ECHO_INPUT = 0x0004 + ENABLE_INSERT_MODE = 0x0020 + ENABLE_LINE_INPUT = 0x0002 + ENABLE_MOUSE_INPUT = 0x0010 + ENABLE_PROCESSED_INPUT = 0x0001 + ENABLE_QUICK_EDIT_MODE = 0x0040 + ENABLE_WINDOW_INPUT = 0x0008 + // If parameter is a screen buffer handle, additional values + ENABLE_PROCESSED_OUTPUT = 0x0001 + ENABLE_WRAP_AT_EOL_OUTPUT = 0x0002 +) + +var kernel32DLL = syscall.NewLazyDLL("kernel32.dll") + +var ( + setConsoleModeProc = kernel32DLL.NewProc("SetConsoleMode") + getConsoleScreenBufferInfoProc = kernel32DLL.NewProc("GetConsoleScreenBufferInfo") +) + +func GetConsoleMode(fileDesc uintptr) (uint32, error) { + var mode uint32 + err := syscall.GetConsoleMode(syscall.Handle(fileDesc), &mode) + return mode, err +} + +func SetConsoleMode(fileDesc uintptr, mode uint32) error { + r, _, err := setConsoleModeProc.Call(fileDesc, uintptr(mode), 0) + if r == 0 { + if err != nil { + return err + } + return syscall.EINVAL + } + return nil +} + +// types for calling GetConsoleScreenBufferInfo +// see http://msdn.microsoft.com/en-us/library/windows/desktop/ms682093(v=vs.85).aspx +type ( + SHORT int16 + + SMALL_RECT struct { + Left SHORT + Top SHORT + Right SHORT + Bottom SHORT + } + + COORD struct { + X SHORT + Y SHORT + } + + WORD uint16 + + CONSOLE_SCREEN_BUFFER_INFO struct { + dwSize COORD + dwCursorPosition COORD + wAttributes WORD + srWindow SMALL_RECT + dwMaximumWindowSize COORD + } +) + +func GetConsoleScreenBufferInfo(fileDesc uintptr) (*CONSOLE_SCREEN_BUFFER_INFO, error) { + var info CONSOLE_SCREEN_BUFFER_INFO + r, _, err := getConsoleScreenBufferInfoProc.Call(uintptr(fileDesc), uintptr(unsafe.Pointer(&info)), 0) + if r == 0 { + if err != nil { + return nil, err + } + return nil, syscall.EINVAL + } + return &info, nil +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/term/tc_linux_cgo.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/term/tc_linux_cgo.go new file mode 100644 index 00000000000..ae9516c99cf --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/term/tc_linux_cgo.go @@ -0,0 +1,47 @@ +// +build linux,cgo + +package term + +import ( + "syscall" + "unsafe" +) + +// #include +import "C" + +type Termios syscall.Termios + +// MakeRaw put the terminal connected to the given file descriptor into raw +// mode and returns the previous state of the terminal so that it can be +// restored. +func MakeRaw(fd uintptr) (*State, error) { + var oldState State + if err := tcget(fd, &oldState.termios); err != 0 { + return nil, err + } + + newState := oldState.termios + + C.cfmakeraw((*C.struct_termios)(unsafe.Pointer(&newState))) + if err := tcset(fd, &newState); err != 0 { + return nil, err + } + return &oldState, nil +} + +func tcget(fd uintptr, p *Termios) syscall.Errno { + ret, err := C.tcgetattr(C.int(fd), (*C.struct_termios)(unsafe.Pointer(p))) + if ret != 0 { + return err.(syscall.Errno) + } + return 0 +} + +func tcset(fd uintptr, p *Termios) syscall.Errno { + ret, err := C.tcsetattr(C.int(fd), C.TCSANOW, (*C.struct_termios)(unsafe.Pointer(p))) + if ret != 0 { + return err.(syscall.Errno) + } + return 0 +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/term/tc_other.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/term/tc_other.go new file mode 100644 index 00000000000..266039bac3e --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/term/tc_other.go @@ -0,0 +1,19 @@ +// +build !windows +// +build !linux !cgo + +package term + +import ( + "syscall" + "unsafe" +) + +func tcget(fd uintptr, p *Termios) syscall.Errno { + _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, uintptr(getTermios), uintptr(unsafe.Pointer(p))) + return err +} + +func tcset(fd uintptr, p *Termios) syscall.Errno { + _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, setTermios, uintptr(unsafe.Pointer(p))) + return err +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/term/term.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/term/term.go new file mode 100644 index 00000000000..8d807d8d446 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/term/term.go @@ -0,0 +1,103 @@ +// +build !windows + +package term + +import ( + "errors" + "os" + "os/signal" + "syscall" + "unsafe" +) + +var ( + ErrInvalidState = errors.New("Invalid terminal state") +) + +type State struct { + termios Termios +} + +type Winsize struct { + Height uint16 + Width uint16 + x uint16 + y uint16 +} + +func GetWinsize(fd uintptr) (*Winsize, error) { + ws := &Winsize{} + _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, uintptr(syscall.TIOCGWINSZ), uintptr(unsafe.Pointer(ws))) + // Skipp errno = 0 + if err == 0 { + return ws, nil + } + return ws, err +} + +func SetWinsize(fd uintptr, ws *Winsize) error { + _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, uintptr(syscall.TIOCSWINSZ), uintptr(unsafe.Pointer(ws))) + // Skipp errno = 0 + if err == 0 { + return nil + } + return err +} + +// IsTerminal returns true if the given file descriptor is a terminal. +func IsTerminal(fd uintptr) bool { + var termios Termios + return tcget(fd, &termios) == 0 +} + +// Restore restores the terminal connected to the given file descriptor to a +// previous state. +func RestoreTerminal(fd uintptr, state *State) error { + if state == nil { + return ErrInvalidState + } + if err := tcset(fd, &state.termios); err != 0 { + return err + } + return nil +} + +func SaveState(fd uintptr) (*State, error) { + var oldState State + if err := tcget(fd, &oldState.termios); err != 0 { + return nil, err + } + + return &oldState, nil +} + +func DisableEcho(fd uintptr, state *State) error { + newState := state.termios + newState.Lflag &^= syscall.ECHO + + if err := tcset(fd, &newState); err != 0 { + return err + } + handleInterrupt(fd, state) + return nil +} + +func SetRawTerminal(fd uintptr) (*State, error) { + oldState, err := MakeRaw(fd) + if err != nil { + return nil, err + } + handleInterrupt(fd, oldState) + return oldState, err +} + +func handleInterrupt(fd uintptr, state *State) { + sigchan := make(chan os.Signal, 1) + signal.Notify(sigchan, os.Interrupt) + + go func() { + _ = <-sigchan + RestoreTerminal(fd, state) + os.Exit(0) + }() +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/term/term_windows.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/term/term_windows.go new file mode 100644 index 00000000000..d372e86a884 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/term/term_windows.go @@ -0,0 +1,89 @@ +// +build windows + +package term + +type State struct { + mode uint32 +} + +type Winsize struct { + Height uint16 + Width uint16 + x uint16 + y uint16 +} + +func GetWinsize(fd uintptr) (*Winsize, error) { + ws := &Winsize{} + var info *CONSOLE_SCREEN_BUFFER_INFO + info, err := GetConsoleScreenBufferInfo(fd) + if err != nil { + return nil, err + } + ws.Height = uint16(info.srWindow.Right - info.srWindow.Left + 1) + ws.Width = uint16(info.srWindow.Bottom - info.srWindow.Top + 1) + + ws.x = 0 // todo azlinux -- this is the pixel size of the Window, and not currently used by any caller + ws.y = 0 + + return ws, nil +} + +func SetWinsize(fd uintptr, ws *Winsize) error { + return nil +} + +// IsTerminal returns true if the given file descriptor is a terminal. +func IsTerminal(fd uintptr) bool { + _, e := GetConsoleMode(fd) + return e == nil +} + +// Restore restores the terminal connected to the given file descriptor to a +// previous state. +func RestoreTerminal(fd uintptr, state *State) error { + return SetConsoleMode(fd, state.mode) +} + +func SaveState(fd uintptr) (*State, error) { + mode, e := GetConsoleMode(fd) + if e != nil { + return nil, e + } + return &State{mode}, nil +} + +// see http://msdn.microsoft.com/en-us/library/windows/desktop/ms683462(v=vs.85).aspx for these flag settings +func DisableEcho(fd uintptr, state *State) error { + state.mode &^= (ENABLE_ECHO_INPUT) + state.mode |= (ENABLE_PROCESSED_INPUT | ENABLE_LINE_INPUT) + return SetConsoleMode(fd, state.mode) +} + +func SetRawTerminal(fd uintptr) (*State, error) { + oldState, err := MakeRaw(fd) + if err != nil { + return nil, err + } + // TODO (azlinux): implement handling interrupt and restore state of terminal + return oldState, err +} + +// MakeRaw puts the terminal connected to the given file descriptor into raw +// mode and returns the previous state of the terminal so that it can be +// restored. +func MakeRaw(fd uintptr) (*State, error) { + var state *State + state, err := SaveState(fd) + if err != nil { + return nil, err + } + + // see http://msdn.microsoft.com/en-us/library/windows/desktop/ms683462(v=vs.85).aspx for these flag settings + state.mode &^= (ENABLE_ECHO_INPUT | ENABLE_PROCESSED_INPUT | ENABLE_LINE_INPUT) + err = SetConsoleMode(fd, state.mode) + if err != nil { + return nil, err + } + return state, nil +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/term/termios_darwin.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/term/termios_darwin.go new file mode 100644 index 00000000000..11cd70d10b8 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/term/termios_darwin.go @@ -0,0 +1,65 @@ +package term + +import ( + "syscall" + "unsafe" +) + +const ( + getTermios = syscall.TIOCGETA + setTermios = syscall.TIOCSETA + + IGNBRK = syscall.IGNBRK + PARMRK = syscall.PARMRK + INLCR = syscall.INLCR + IGNCR = syscall.IGNCR + ECHONL = syscall.ECHONL + CSIZE = syscall.CSIZE + ICRNL = syscall.ICRNL + ISTRIP = syscall.ISTRIP + PARENB = syscall.PARENB + ECHO = syscall.ECHO + ICANON = syscall.ICANON + ISIG = syscall.ISIG + IXON = syscall.IXON + BRKINT = syscall.BRKINT + INPCK = syscall.INPCK + OPOST = syscall.OPOST + CS8 = syscall.CS8 + IEXTEN = syscall.IEXTEN +) + +type Termios struct { + Iflag uint64 + Oflag uint64 + Cflag uint64 + Lflag uint64 + Cc [20]byte + Ispeed uint64 + Ospeed uint64 +} + +// MakeRaw put the terminal connected to the given file descriptor into raw +// mode and returns the previous state of the terminal so that it can be +// restored. +func MakeRaw(fd uintptr) (*State, error) { + var oldState State + if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, uintptr(getTermios), uintptr(unsafe.Pointer(&oldState.termios))); err != 0 { + return nil, err + } + + newState := oldState.termios + newState.Iflag &^= (IGNBRK | BRKINT | PARMRK | ISTRIP | INLCR | IGNCR | ICRNL | IXON) + newState.Oflag &^= OPOST + newState.Lflag &^= (ECHO | ECHONL | ICANON | ISIG | IEXTEN) + newState.Cflag &^= (CSIZE | PARENB) + newState.Cflag |= CS8 + newState.Cc[syscall.VMIN] = 1 + newState.Cc[syscall.VTIME] = 0 + + if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, uintptr(setTermios), uintptr(unsafe.Pointer(&newState))); err != 0 { + return nil, err + } + + return &oldState, nil +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/term/termios_freebsd.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/term/termios_freebsd.go new file mode 100644 index 00000000000..ed3659572cc --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/term/termios_freebsd.go @@ -0,0 +1,65 @@ +package term + +import ( + "syscall" + "unsafe" +) + +const ( + getTermios = syscall.TIOCGETA + setTermios = syscall.TIOCSETA + + IGNBRK = syscall.IGNBRK + PARMRK = syscall.PARMRK + INLCR = syscall.INLCR + IGNCR = syscall.IGNCR + ECHONL = syscall.ECHONL + CSIZE = syscall.CSIZE + ICRNL = syscall.ICRNL + ISTRIP = syscall.ISTRIP + PARENB = syscall.PARENB + ECHO = syscall.ECHO + ICANON = syscall.ICANON + ISIG = syscall.ISIG + IXON = syscall.IXON + BRKINT = syscall.BRKINT + INPCK = syscall.INPCK + OPOST = syscall.OPOST + CS8 = syscall.CS8 + IEXTEN = syscall.IEXTEN +) + +type Termios struct { + Iflag uint32 + Oflag uint32 + Cflag uint32 + Lflag uint32 + Cc [20]byte + Ispeed uint32 + Ospeed uint32 +} + +// MakeRaw put the terminal connected to the given file descriptor into raw +// mode and returns the previous state of the terminal so that it can be +// restored. +func MakeRaw(fd uintptr) (*State, error) { + var oldState State + if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, uintptr(getTermios), uintptr(unsafe.Pointer(&oldState.termios))); err != 0 { + return nil, err + } + + newState := oldState.termios + newState.Iflag &^= (IGNBRK | BRKINT | PARMRK | ISTRIP | INLCR | IGNCR | ICRNL | IXON) + newState.Oflag &^= OPOST + newState.Lflag &^= (ECHO | ECHONL | ICANON | ISIG | IEXTEN) + newState.Cflag &^= (CSIZE | PARENB) + newState.Cflag |= CS8 + newState.Cc[syscall.VMIN] = 1 + newState.Cc[syscall.VTIME] = 0 + + if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, uintptr(setTermios), uintptr(unsafe.Pointer(&newState))); err != 0 { + return nil, err + } + + return &oldState, nil +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/term/termios_linux.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/term/termios_linux.go new file mode 100644 index 00000000000..024187ff066 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/term/termios_linux.go @@ -0,0 +1,46 @@ +// +build !cgo + +package term + +import ( + "syscall" + "unsafe" +) + +const ( + getTermios = syscall.TCGETS + setTermios = syscall.TCSETS +) + +type Termios struct { + Iflag uint32 + Oflag uint32 + Cflag uint32 + Lflag uint32 + Cc [20]byte + Ispeed uint32 + Ospeed uint32 +} + +// MakeRaw put the terminal connected to the given file descriptor into raw +// mode and returns the previous state of the terminal so that it can be +// restored. +func MakeRaw(fd uintptr) (*State, error) { + var oldState State + if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, getTermios, uintptr(unsafe.Pointer(&oldState.termios))); err != 0 { + return nil, err + } + + newState := oldState.termios + + newState.Iflag &^= (syscall.IGNBRK | syscall.BRKINT | syscall.PARMRK | syscall.ISTRIP | syscall.INLCR | syscall.IGNCR | syscall.ICRNL | syscall.IXON) + newState.Oflag &^= syscall.OPOST + newState.Lflag &^= (syscall.ECHO | syscall.ECHONL | syscall.ICANON | syscall.ISIG | syscall.IEXTEN) + newState.Cflag &^= (syscall.CSIZE | syscall.PARENB) + newState.Cflag |= syscall.CS8 + + if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, setTermios, uintptr(unsafe.Pointer(&newState))); err != 0 { + return nil, err + } + return &oldState, nil +} diff --git a/Godeps/_workspace/src/github.com/docker/spdystream/CONTRIBUTING.md b/Godeps/_workspace/src/github.com/docker/spdystream/CONTRIBUTING.md new file mode 100644 index 00000000000..d4eddcc5396 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/spdystream/CONTRIBUTING.md @@ -0,0 +1,13 @@ +# Contributing to SpdyStream + +Want to hack on spdystream? Awesome! Here are instructions to get you +started. + +SpdyStream is a part of the [Docker](https://docker.io) project, and follows +the same rules and principles. If you're already familiar with the way +Docker does things, you'll feel right at home. + +Otherwise, go read +[Docker's contributions guidelines](https://github.com/dotcloud/docker/blob/master/CONTRIBUTING.md). + +Happy hacking! diff --git a/Godeps/_workspace/src/github.com/docker/spdystream/LICENSE b/Godeps/_workspace/src/github.com/docker/spdystream/LICENSE new file mode 100644 index 00000000000..27448585ad4 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/spdystream/LICENSE @@ -0,0 +1,191 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright 2014 Docker, Inc. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/Godeps/_workspace/src/github.com/docker/spdystream/MAINTAINERS b/Godeps/_workspace/src/github.com/docker/spdystream/MAINTAINERS new file mode 100644 index 00000000000..4eb44dcf437 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/spdystream/MAINTAINERS @@ -0,0 +1 @@ +Derek McGowan (@dmcg) diff --git a/Godeps/_workspace/src/github.com/docker/spdystream/README.md b/Godeps/_workspace/src/github.com/docker/spdystream/README.md new file mode 100644 index 00000000000..076b17919c0 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/spdystream/README.md @@ -0,0 +1,78 @@ +# SpdyStream + +A multiplexed stream library using spdy + +## Usage + +Client example (connecting to mirroring server without auth) + +```go +package main + +import ( + "fmt" + "github.com/docker/spdystream" + "net" + "net/http" +) + +func main() { + conn, err := net.Dial("tcp", "localhost:8080") + if err != nil { + panic(err) + } + spdyConn, err := spdystream.NewConnection(conn, false) + if err != nil { + panic(err) + } + go spdyConn.Serve(spdystream.NoOpStreamHandler) + stream, err := spdyConn.CreateStream(http.Header{}, nil, false) + if err != nil { + panic(err) + } + + stream.Wait() + + fmt.Fprint(stream, "Writing to stream") + + buf := make([]byte, 25) + stream.Read(buf) + fmt.Println(string(buf)) + + stream.Close() +} +``` + +Server example (mirroring server without auth) + +```go +package main + +import ( + "github.com/docker/spdystream" + "net" +) + +func main() { + listener, err := net.Listen("tcp", "localhost:8080") + if err != nil { + panic(err) + } + for { + conn, err := listener.Accept() + if err != nil { + panic(err) + } + spdyConn, err := spdystream.NewConnection(conn, true) + if err != nil { + panic(err) + } + go spdyConn.Serve(spdystream.MirrorStreamHandler) + } +} +``` + +## Copyright and license + +Code and documentation copyright 2013-2014 Docker, inc. Code released under the Apache 2.0 license. +Docs released under Creative commons. diff --git a/Godeps/_workspace/src/github.com/docker/spdystream/connection.go b/Godeps/_workspace/src/github.com/docker/spdystream/connection.go new file mode 100644 index 00000000000..3f937e4cb23 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/spdystream/connection.go @@ -0,0 +1,877 @@ +package spdystream + +import ( + "errors" + "fmt" + "io" + "net" + "net/http" + "sync" + "time" + + "code.google.com/p/go.net/spdy" +) + +var ( + ErrInvalidStreamId = errors.New("Invalid stream id") + ErrTimeout = errors.New("Timeout occured") + ErrReset = errors.New("Stream reset") + ErrWriteClosedStream = errors.New("Write on closed stream") +) + +const ( + FRAME_WORKERS = 5 + QUEUE_SIZE = 50 +) + +type StreamHandler func(stream *Stream) + +type AuthHandler func(header http.Header, slot uint8, parent uint32) bool + +type idleAwareFramer struct { + f *spdy.Framer + conn *Connection + resetChan chan struct{} + setTimeoutChan chan time.Duration + timeout time.Duration +} + +func newIdleAwareFramer(framer *spdy.Framer) *idleAwareFramer { + iaf := &idleAwareFramer{ + f: framer, + resetChan: make(chan struct{}, 2), + setTimeoutChan: make(chan time.Duration), + } + return iaf +} + +func (i *idleAwareFramer) monitor() { + var ( + timer *time.Timer + expired <-chan time.Time + ) +Loop: + for { + select { + case timeout := <-i.setTimeoutChan: + i.timeout = timeout + if timeout == 0 { + if timer != nil { + timer.Stop() + } + } else { + if timer == nil { + timer = time.NewTimer(timeout) + expired = timer.C + } else { + timer.Reset(timeout) + } + } + case <-i.resetChan: + if timer != nil && i.timeout > 0 { + timer.Reset(i.timeout) + } + case <-expired: + for _, stream := range i.conn.streams { + stream.Reset() + } + i.conn.Close() + break Loop + case <-i.conn.closeChan: + if timer != nil { + timer.Stop() + } + break Loop + } + } +} + +func (i *idleAwareFramer) WriteFrame(frame spdy.Frame) error { + err := i.f.WriteFrame(frame) + if err != nil { + return err + } + + i.resetChan <- struct{}{} + + return nil +} + +func (i *idleAwareFramer) ReadFrame() (spdy.Frame, error) { + frame, err := i.f.ReadFrame() + if err != nil { + return nil, err + } + + i.resetChan <- struct{}{} + + return frame, nil +} + +type Connection struct { + conn net.Conn + framer *idleAwareFramer + writeLock sync.Mutex + + closeChan chan bool + goneAway bool + lastStreamChan chan<- *Stream + goAwayTimeout time.Duration + closeTimeout time.Duration + + streamLock *sync.RWMutex + streamCond *sync.Cond + streams map[spdy.StreamId]*Stream + + nextIdLock sync.Mutex + receiveIdLock sync.Mutex + nextStreamId spdy.StreamId + receivedStreamId spdy.StreamId + + pingIdLock sync.Mutex + pingId uint32 + pingChans map[uint32]chan error + + shutdownLock sync.Mutex + shutdownChan chan error + hasShutdown bool +} + +// NewConnection creates a new spdy connection from an existing +// network connection. +func NewConnection(conn net.Conn, server bool) (*Connection, error) { + framer, framerErr := spdy.NewFramer(conn, conn) + if framerErr != nil { + return nil, framerErr + } + idleAwareFramer := newIdleAwareFramer(framer) + var sid spdy.StreamId + var rid spdy.StreamId + var pid uint32 + if server { + sid = 2 + rid = 1 + pid = 2 + } else { + sid = 1 + rid = 2 + pid = 1 + } + + streamLock := new(sync.RWMutex) + streamCond := sync.NewCond(streamLock) + + session := &Connection{ + conn: conn, + framer: idleAwareFramer, + + closeChan: make(chan bool), + goAwayTimeout: time.Duration(0), + closeTimeout: time.Duration(0), + + streamLock: streamLock, + streamCond: streamCond, + streams: make(map[spdy.StreamId]*Stream), + nextStreamId: sid, + receivedStreamId: rid, + + pingId: pid, + pingChans: make(map[uint32]chan error), + + shutdownChan: make(chan error), + } + idleAwareFramer.conn = session + go idleAwareFramer.monitor() + + return session, nil +} + +// Ping sends a ping frame across the connection and +// returns the response time +func (s *Connection) Ping() (time.Duration, error) { + pid := s.pingId + s.pingIdLock.Lock() + if s.pingId > 0x7ffffffe { + s.pingId = s.pingId - 0x7ffffffe + } else { + s.pingId = s.pingId + 2 + } + s.pingIdLock.Unlock() + pingChan := make(chan error) + s.pingChans[pid] = pingChan + defer delete(s.pingChans, pid) + + frame := &spdy.PingFrame{Id: pid} + startTime := time.Now() + s.writeLock.Lock() + writeErr := s.framer.WriteFrame(frame) + s.writeLock.Unlock() + if writeErr != nil { + return time.Duration(0), writeErr + } + select { + case <-s.closeChan: + return time.Duration(0), errors.New("connection closed") + case err, ok := <-pingChan: + if ok && err != nil { + return time.Duration(0), err + } + break + } + return time.Now().Sub(startTime), nil +} + +// Serve handles frames sent from the server, including reply frames +// which are needed to fully initiate connections. Both clients and servers +// should call Serve in a separate goroutine before creating streams. +func (s *Connection) Serve(newHandler StreamHandler) { + // Parition queues to ensure stream frames are handled + // by the same worker, ensuring order is maintained + frameQueues := make([]*PriorityFrameQueue, FRAME_WORKERS) + for i := 0; i < FRAME_WORKERS; i++ { + frameQueues[i] = NewPriorityFrameQueue(QUEUE_SIZE) + // Ensure frame queue is drained when connection is closed + go func(frameQueue *PriorityFrameQueue) { + <-s.closeChan + frameQueue.Drain() + }(frameQueues[i]) + + go s.frameHandler(frameQueues[i], newHandler) + } + + var partitionRoundRobin int + for { + readFrame, err := s.framer.ReadFrame() + if err != nil { + if err != io.EOF { + fmt.Errorf("frame read error: %s", err) + } else { + debugMessage("EOF received") + } + break + } + var priority uint8 + var partition int + switch frame := readFrame.(type) { + case *spdy.SynStreamFrame: + if s.checkStreamFrame(frame) { + priority = frame.Priority + partition = int(frame.StreamId % FRAME_WORKERS) + debugMessage("(%p) Add stream frame: %d ", s, frame.StreamId) + s.addStreamFrame(frame) + } else { + debugMessage("(%p) Rejected stream frame: %d ", s, frame.StreamId) + continue + } + case *spdy.SynReplyFrame: + priority = s.getStreamPriority(frame.StreamId) + partition = int(frame.StreamId % FRAME_WORKERS) + case *spdy.DataFrame: + priority = s.getStreamPriority(frame.StreamId) + partition = int(frame.StreamId % FRAME_WORKERS) + case *spdy.RstStreamFrame: + priority = s.getStreamPriority(frame.StreamId) + partition = int(frame.StreamId % FRAME_WORKERS) + case *spdy.HeadersFrame: + priority = s.getStreamPriority(frame.StreamId) + partition = int(frame.StreamId % FRAME_WORKERS) + case *spdy.PingFrame: + priority = 0 + partition = partitionRoundRobin + partitionRoundRobin = (partitionRoundRobin + 1) % FRAME_WORKERS + case *spdy.GoAwayFrame: + priority = 0 + partition = partitionRoundRobin + partitionRoundRobin = (partitionRoundRobin + 1) % FRAME_WORKERS + default: + priority = 7 + partition = partitionRoundRobin + partitionRoundRobin = (partitionRoundRobin + 1) % FRAME_WORKERS + } + frameQueues[partition].Push(readFrame, priority) + } + close(s.closeChan) + + s.streamCond.L.Lock() + // notify streams that they're now closed, which will + // unblock any stream Read() calls + for _, stream := range s.streams { + stream.closeRemoteChannels() + } + s.streams = make(map[spdy.StreamId]*Stream) + s.streamCond.Broadcast() + s.streamCond.L.Unlock() +} + +func (s *Connection) frameHandler(frameQueue *PriorityFrameQueue, newHandler StreamHandler) { + for { + popFrame := frameQueue.Pop() + if popFrame == nil { + return + } + + var frameErr error + switch frame := popFrame.(type) { + case *spdy.SynStreamFrame: + frameErr = s.handleStreamFrame(frame, newHandler) + case *spdy.SynReplyFrame: + frameErr = s.handleReplyFrame(frame) + case *spdy.DataFrame: + frameErr = s.handleDataFrame(frame) + case *spdy.RstStreamFrame: + frameErr = s.handleResetFrame(frame) + case *spdy.HeadersFrame: + frameErr = s.handleHeaderFrame(frame) + case *spdy.PingFrame: + frameErr = s.handlePingFrame(frame) + case *spdy.GoAwayFrame: + frameErr = s.handleGoAwayFrame(frame) + default: + frameErr = fmt.Errorf("unhandled frame type: %T", frame) + } + + if frameErr != nil { + fmt.Errorf("frame handling error: %s", frameErr) + } + } +} + +func (s *Connection) getStreamPriority(streamId spdy.StreamId) uint8 { + stream, streamOk := s.getStream(streamId) + if !streamOk { + return 7 + } + return stream.priority +} + +func (s *Connection) addStreamFrame(frame *spdy.SynStreamFrame) { + var parent *Stream + if frame.AssociatedToStreamId != spdy.StreamId(0) { + parent, _ = s.getStream(frame.AssociatedToStreamId) + } + + stream := &Stream{ + streamId: frame.StreamId, + parent: parent, + conn: s, + startChan: make(chan error), + headers: frame.Headers, + finished: (frame.CFHeader.Flags & spdy.ControlFlagUnidirectional) != 0x00, + replyCond: sync.NewCond(new(sync.Mutex)), + dataChan: make(chan []byte), + headerChan: make(chan http.Header), + closeChan: make(chan bool), + } + if frame.CFHeader.Flags&spdy.ControlFlagFin != 0x00 { + stream.closeRemoteChannels() + } + + s.addStream(stream) +} + +// checkStreamFrame checks to see if a stream frame is allowed. +// If the stream is invalid, then a reset frame with protocol error +// will be returned. +func (s *Connection) checkStreamFrame(frame *spdy.SynStreamFrame) bool { + s.receiveIdLock.Lock() + defer s.receiveIdLock.Unlock() + if s.goneAway { + return false + } + validationErr := s.validateStreamId(frame.StreamId) + if validationErr != nil { + go func() { + resetErr := s.sendResetFrame(spdy.ProtocolError, frame.StreamId) + if resetErr != nil { + fmt.Errorf("reset error: %s", resetErr) + } + }() + return false + } + return true +} + +func (s *Connection) handleStreamFrame(frame *spdy.SynStreamFrame, newHandler StreamHandler) error { + stream, ok := s.getStream(frame.StreamId) + if !ok { + return fmt.Errorf("Missing stream: %d", frame.StreamId) + } + + newHandler(stream) + + return nil +} + +func (s *Connection) handleReplyFrame(frame *spdy.SynReplyFrame) error { + debugMessage("(%p) Reply frame received for %d", s, frame.StreamId) + stream, streamOk := s.getStream(frame.StreamId) + if !streamOk { + debugMessage("Reply frame gone away for %d", frame.StreamId) + // Stream has already gone away + return nil + } + if stream.replied { + // Stream has already received reply + return nil + } + stream.replied = true + + // TODO Check for error + if (frame.CFHeader.Flags & spdy.ControlFlagFin) != 0x00 { + s.remoteStreamFinish(stream) + } + + close(stream.startChan) + + return nil +} + +func (s *Connection) handleResetFrame(frame *spdy.RstStreamFrame) error { + stream, streamOk := s.getStream(frame.StreamId) + if !streamOk { + // Stream has already been removed + return nil + } + s.removeStream(stream) + stream.closeRemoteChannels() + + if !stream.replied { + stream.replied = true + stream.startChan <- ErrReset + close(stream.startChan) + } + + stream.finishLock.Lock() + stream.finished = true + stream.finishLock.Unlock() + + return nil +} + +func (s *Connection) handleHeaderFrame(frame *spdy.HeadersFrame) error { + stream, streamOk := s.getStream(frame.StreamId) + if !streamOk { + // Stream has already gone away + return nil + } + if !stream.replied { + // No reply received...Protocol error? + return nil + } + + // TODO limit headers while not blocking (use buffered chan or goroutine?) + select { + case <-stream.closeChan: + return nil + case stream.headerChan <- frame.Headers: + } + + if (frame.CFHeader.Flags & spdy.ControlFlagFin) != 0x00 { + s.remoteStreamFinish(stream) + } + + return nil +} + +func (s *Connection) handleDataFrame(frame *spdy.DataFrame) error { + debugMessage("(%p) Data frame received for %d", s, frame.StreamId) + stream, streamOk := s.getStream(frame.StreamId) + if !streamOk { + debugMessage("Data frame gone away for %d", frame.StreamId) + // Stream has already gone away + return nil + } + if !stream.replied { + debugMessage("Data frame not replied %d", frame.StreamId) + // No reply received...Protocol error? + return nil + } + + debugMessage("(%p) (%d) Data frame handling", stream, stream.streamId) + if len(frame.Data) > 0 { + stream.dataLock.RLock() + select { + case <-stream.closeChan: + debugMessage("(%p) (%d) Data frame not sent (stream shut down)", stream, stream.streamId) + case stream.dataChan <- frame.Data: + debugMessage("(%p) (%d) Data frame sent", stream, stream.streamId) + } + stream.dataLock.RUnlock() + } + if (frame.Flags & spdy.DataFlagFin) != 0x00 { + s.remoteStreamFinish(stream) + } + return nil +} + +func (s *Connection) handlePingFrame(frame *spdy.PingFrame) error { + if s.pingId&0x01 != frame.Id&0x01 { + s.writeLock.Lock() + defer s.writeLock.Unlock() + return s.framer.WriteFrame(frame) + } + pingChan, pingOk := s.pingChans[frame.Id] + if pingOk { + close(pingChan) + } + return nil +} + +func (s *Connection) handleGoAwayFrame(frame *spdy.GoAwayFrame) error { + debugMessage("(%p) Go away received", s) + s.receiveIdLock.Lock() + if s.goneAway { + s.receiveIdLock.Unlock() + return nil + } + s.goneAway = true + s.receiveIdLock.Unlock() + + if s.lastStreamChan != nil { + stream, _ := s.getStream(frame.LastGoodStreamId) + go func() { + s.lastStreamChan <- stream + }() + } + + // Do not block frame handler waiting for closure + go s.shutdown(s.goAwayTimeout) + + return nil +} + +func (s *Connection) remoteStreamFinish(stream *Stream) { + stream.closeRemoteChannels() + + stream.finishLock.Lock() + if stream.finished { + // Stream is fully closed, cleanup + s.removeStream(stream) + } + stream.finishLock.Unlock() +} + +// CreateStream creates a new spdy stream using the parameters for +// creating the stream frame. The stream frame will be sent upon +// calling this function, however this function does not wait for +// the reply frame. If waiting for the reply is desired, use +// the stream Wait or WaitTimeout function on the stream returned +// by this function. +func (s *Connection) CreateStream(headers http.Header, parent *Stream, fin bool) (*Stream, error) { + streamId := s.getNextStreamId() + if streamId == 0 { + return nil, fmt.Errorf("Unable to get new stream id") + } + + stream := &Stream{ + streamId: streamId, + parent: parent, + conn: s, + startChan: make(chan error), + headers: headers, + dataChan: make(chan []byte), + headerChan: make(chan http.Header), + closeChan: make(chan bool), + } + + debugMessage("(%p) (%p) Create stream", s, stream) + + s.addStream(stream) + + return stream, s.sendStream(stream, fin) +} + +func (s *Connection) shutdown(closeTimeout time.Duration) { + // TODO Ensure this isn't called multiple times + s.shutdownLock.Lock() + if s.hasShutdown { + s.shutdownLock.Unlock() + return + } + s.hasShutdown = true + s.shutdownLock.Unlock() + + var timeout <-chan time.Time + if closeTimeout > time.Duration(0) { + timeout = time.After(closeTimeout) + } + streamsClosed := make(chan bool) + + go func() { + s.streamCond.L.Lock() + for len(s.streams) > 0 { + debugMessage("Streams opened: %d, %#v", len(s.streams), s.streams) + s.streamCond.Wait() + } + s.streamCond.L.Unlock() + close(streamsClosed) + }() + + var err error + select { + case <-streamsClosed: + // No active streams, close should be safe + err = s.conn.Close() + case <-timeout: + // Force ungraceful close + err = s.conn.Close() + // Wait for cleanup to clear active streams + <-streamsClosed + } + + if err != nil { + duration := 10 * time.Minute + time.AfterFunc(duration, func() { + select { + case err, ok := <-s.shutdownChan: + if ok { + fmt.Errorf("Unhandled close error after %s: %s", duration, err) + } + default: + } + }) + s.shutdownChan <- err + } + close(s.shutdownChan) + + return +} + +// Closes spdy connection by sending GoAway frame and initiating shutdown +func (s *Connection) Close() error { + s.receiveIdLock.Lock() + if s.goneAway { + s.receiveIdLock.Unlock() + return nil + } + s.goneAway = true + s.receiveIdLock.Unlock() + + var lastStreamId spdy.StreamId + if s.receivedStreamId > 2 { + lastStreamId = s.receivedStreamId - 2 + } + + goAwayFrame := &spdy.GoAwayFrame{ + LastGoodStreamId: lastStreamId, + Status: spdy.GoAwayOK, + } + + s.writeLock.Lock() + err := s.framer.WriteFrame(goAwayFrame) + s.writeLock.Unlock() + if err != nil { + return err + } + + go s.shutdown(s.closeTimeout) + + return nil +} + +// CloseWait closes the connection and waits for shutdown +// to finish. Note the underlying network Connection +// is not closed until the end of shutdown. +func (s *Connection) CloseWait() error { + closeErr := s.Close() + if closeErr != nil { + return closeErr + } + shutdownErr, ok := <-s.shutdownChan + if ok { + return shutdownErr + } + return nil +} + +// Wait waits for the connection to finish shutdown or for +// the wait timeout duration to expire. This needs to be +// called either after Close has been called or the GOAWAYFRAME +// has been received. If the wait timeout is 0, this function +// will block until shutdown finishes. If wait is never called +// and a shutdown error occurs, that error will be logged as an +// unhandled error. +func (s *Connection) Wait(waitTimeout time.Duration) error { + var timeout <-chan time.Time + if waitTimeout > time.Duration(0) { + timeout = time.After(waitTimeout) + } + + select { + case err, ok := <-s.shutdownChan: + if ok { + return err + } + case <-timeout: + return ErrTimeout + } + return nil +} + +// NotifyClose registers a channel to be called when the remote +// peer inidicates connection closure. The last stream to be +// received by the remote will be sent on the channel. The notify +// timeout will determine the duration between go away received +// and the connection being closed. +func (s *Connection) NotifyClose(c chan<- *Stream, timeout time.Duration) { + s.goAwayTimeout = timeout + s.lastStreamChan = c +} + +// SetCloseTimeout sets the amount of time close will wait for +// streams to finish before terminating the underlying network +// connection. Setting the timeout to 0 will cause close to +// wait forever, which is the default. +func (s *Connection) SetCloseTimeout(timeout time.Duration) { + s.closeTimeout = timeout +} + +// SetIdleTimeout sets the amount of time the connection may sit idle before +// it is forcefully terminated. +func (s *Connection) SetIdleTimeout(timeout time.Duration) { + s.framer.setTimeoutChan <- timeout +} + +func (s *Connection) sendHeaders(headers http.Header, stream *Stream, fin bool) error { + var flags spdy.ControlFlags + if fin { + flags = spdy.ControlFlagFin + } + + headerFrame := &spdy.HeadersFrame{ + StreamId: stream.streamId, + Headers: headers, + CFHeader: spdy.ControlFrameHeader{Flags: flags}, + } + + s.writeLock.Lock() + defer s.writeLock.Unlock() + return s.framer.WriteFrame(headerFrame) +} + +func (s *Connection) sendReply(headers http.Header, stream *Stream, fin bool) error { + var flags spdy.ControlFlags + if fin { + flags = spdy.ControlFlagFin + } + + replyFrame := &spdy.SynReplyFrame{ + StreamId: stream.streamId, + Headers: headers, + CFHeader: spdy.ControlFrameHeader{Flags: flags}, + } + + s.writeLock.Lock() + defer s.writeLock.Unlock() + return s.framer.WriteFrame(replyFrame) +} + +func (s *Connection) sendResetFrame(status spdy.RstStreamStatus, streamId spdy.StreamId) error { + resetFrame := &spdy.RstStreamFrame{ + StreamId: streamId, + Status: status, + } + + s.writeLock.Lock() + defer s.writeLock.Unlock() + return s.framer.WriteFrame(resetFrame) +} + +func (s *Connection) sendReset(status spdy.RstStreamStatus, stream *Stream) error { + return s.sendResetFrame(status, stream.streamId) +} + +func (s *Connection) sendStream(stream *Stream, fin bool) error { + var flags spdy.ControlFlags + if fin { + flags = spdy.ControlFlagFin + stream.finished = true + } + + var parentId spdy.StreamId + if stream.parent != nil { + parentId = stream.parent.streamId + } + + streamFrame := &spdy.SynStreamFrame{ + StreamId: spdy.StreamId(stream.streamId), + AssociatedToStreamId: spdy.StreamId(parentId), + Headers: stream.headers, + CFHeader: spdy.ControlFrameHeader{Flags: flags}, + } + + s.writeLock.Lock() + defer s.writeLock.Unlock() + return s.framer.WriteFrame(streamFrame) +} + +// getNextStreamId returns the next sequential id +// every call should produce a unique value or an error +func (s *Connection) getNextStreamId() spdy.StreamId { + s.nextIdLock.Lock() + defer s.nextIdLock.Unlock() + sid := s.nextStreamId + if sid > 0x7fffffff { + return 0 + } + s.nextStreamId = s.nextStreamId + 2 + return sid +} + +// PeekNextStreamId returns the next sequential id and keeps the next id untouched +func (s *Connection) PeekNextStreamId() spdy.StreamId { + sid := s.nextStreamId + return sid +} + +func (s *Connection) validateStreamId(rid spdy.StreamId) error { + if rid > 0x7fffffff || rid < s.receivedStreamId { + return ErrInvalidStreamId + } + s.receivedStreamId = rid + 2 + return nil +} + +func (s *Connection) addStream(stream *Stream) { + s.streamCond.L.Lock() + s.streams[stream.streamId] = stream + debugMessage("(%p) (%p) Stream added, broadcasting: %d", s, stream, stream.streamId) + s.streamCond.Broadcast() + s.streamCond.L.Unlock() +} + +func (s *Connection) removeStream(stream *Stream) { + s.streamCond.L.Lock() + delete(s.streams, stream.streamId) + debugMessage("Stream removed, broadcasting: %d", stream.streamId) + s.streamCond.Broadcast() + s.streamCond.L.Unlock() +} + +func (s *Connection) getStream(streamId spdy.StreamId) (stream *Stream, ok bool) { + s.streamLock.RLock() + stream, ok = s.streams[streamId] + s.streamLock.RUnlock() + return +} + +// FindStream looks up the given stream id and either waits for the +// stream to be found or returns nil if the stream id is no longer +// valid. +func (s *Connection) FindStream(streamId uint32) *Stream { + var stream *Stream + var ok bool + s.streamCond.L.Lock() + stream, ok = s.streams[spdy.StreamId(streamId)] + debugMessage("(%p) Found stream %d? %t", s, spdy.StreamId(streamId), ok) + for !ok && streamId >= uint32(s.receivedStreamId) { + s.streamCond.Wait() + stream, ok = s.streams[spdy.StreamId(streamId)] + } + s.streamCond.L.Unlock() + return stream +} + +func (s *Connection) CloseChan() <-chan bool { + return s.closeChan +} diff --git a/Godeps/_workspace/src/github.com/docker/spdystream/handlers.go b/Godeps/_workspace/src/github.com/docker/spdystream/handlers.go new file mode 100644 index 00000000000..b59fa5fdcd0 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/spdystream/handlers.go @@ -0,0 +1,38 @@ +package spdystream + +import ( + "io" + "net/http" +) + +// MirrorStreamHandler mirrors all streams. +func MirrorStreamHandler(stream *Stream) { + replyErr := stream.SendReply(http.Header{}, false) + if replyErr != nil { + return + } + + go func() { + io.Copy(stream, stream) + stream.Close() + }() + go func() { + for { + header, receiveErr := stream.ReceiveHeader() + if receiveErr != nil { + return + } + sendErr := stream.SendHeader(header, false) + if sendErr != nil { + return + } + } + }() +} + +// NoopStreamHandler does nothing when stream connects, most +// likely used with RejectAuthHandler which will not allow any +// streams to make it to the stream handler. +func NoOpStreamHandler(stream *Stream) { + stream.SendReply(http.Header{}, false) +} diff --git a/Godeps/_workspace/src/github.com/docker/spdystream/priority.go b/Godeps/_workspace/src/github.com/docker/spdystream/priority.go new file mode 100644 index 00000000000..abc3291bc75 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/spdystream/priority.go @@ -0,0 +1,97 @@ +package spdystream + +import ( + "code.google.com/p/go.net/spdy" + "container/heap" + "sync" +) + +type prioritizedFrame struct { + frame spdy.Frame + priority uint8 + insertId uint64 +} + +type frameQueue []*prioritizedFrame + +func (fq frameQueue) Len() int { + return len(fq) +} + +func (fq frameQueue) Less(i, j int) bool { + if fq[i].priority == fq[j].priority { + return fq[i].insertId < fq[j].insertId + } + return fq[i].priority < fq[j].priority +} + +func (fq frameQueue) Swap(i, j int) { + fq[i], fq[j] = fq[j], fq[i] +} + +func (fq *frameQueue) Push(x interface{}) { + *fq = append(*fq, x.(*prioritizedFrame)) +} + +func (fq *frameQueue) Pop() interface{} { + old := *fq + n := len(old) + *fq = old[0 : n-1] + return old[n-1] +} + +type PriorityFrameQueue struct { + queue *frameQueue + c *sync.Cond + size int + nextInsertId uint64 + drain bool +} + +func NewPriorityFrameQueue(size int) *PriorityFrameQueue { + queue := make(frameQueue, 0, size) + heap.Init(&queue) + + return &PriorityFrameQueue{ + queue: &queue, + size: size, + c: sync.NewCond(&sync.Mutex{}), + } +} + +func (q *PriorityFrameQueue) Push(frame spdy.Frame, priority uint8) { + q.c.L.Lock() + defer q.c.L.Unlock() + for q.queue.Len() >= q.size { + q.c.Wait() + } + pFrame := &prioritizedFrame{ + frame: frame, + priority: priority, + insertId: q.nextInsertId, + } + q.nextInsertId = q.nextInsertId + 1 + heap.Push(q.queue, pFrame) + q.c.Signal() +} + +func (q *PriorityFrameQueue) Pop() spdy.Frame { + q.c.L.Lock() + defer q.c.L.Unlock() + for q.queue.Len() == 0 { + if q.drain { + return nil + } + q.c.Wait() + } + frame := heap.Pop(q.queue).(*prioritizedFrame).frame + q.c.Signal() + return frame +} + +func (q *PriorityFrameQueue) Drain() { + q.c.L.Lock() + defer q.c.L.Unlock() + q.drain = true + q.c.Broadcast() +} diff --git a/Godeps/_workspace/src/github.com/docker/spdystream/priority_test.go b/Godeps/_workspace/src/github.com/docker/spdystream/priority_test.go new file mode 100644 index 00000000000..38cf225e1d1 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/spdystream/priority_test.go @@ -0,0 +1,107 @@ +package spdystream + +import ( + "code.google.com/p/go.net/spdy" + "sync" + "testing" + "time" +) + +func TestPriorityQueueOrdering(t *testing.T) { + queue := NewPriorityFrameQueue(150) + data1 := &spdy.DataFrame{} + data2 := &spdy.DataFrame{} + data3 := &spdy.DataFrame{} + data4 := &spdy.DataFrame{} + queue.Push(data1, 2) + queue.Push(data2, 1) + queue.Push(data3, 1) + queue.Push(data4, 0) + + if queue.Pop() != data4 { + t.Fatalf("Wrong order, expected data4 first") + } + if queue.Pop() != data2 { + t.Fatalf("Wrong order, expected data2 second") + } + if queue.Pop() != data3 { + t.Fatalf("Wrong order, expected data3 third") + } + if queue.Pop() != data1 { + t.Fatalf("Wrong order, expected data1 fourth") + } + + // Insert 50 Medium priority frames + for i := spdy.StreamId(50); i < 100; i++ { + queue.Push(&spdy.DataFrame{StreamId: i}, 1) + } + // Insert 50 low priority frames + for i := spdy.StreamId(100); i < 150; i++ { + queue.Push(&spdy.DataFrame{StreamId: i}, 2) + } + // Insert 50 high priority frames + for i := spdy.StreamId(0); i < 50; i++ { + queue.Push(&spdy.DataFrame{StreamId: i}, 0) + } + + for i := spdy.StreamId(0); i < 150; i++ { + frame := queue.Pop() + if frame.(*spdy.DataFrame).StreamId != i { + t.Fatalf("Wrong frame\nActual: %d\nExpecting: %d", frame.(*spdy.DataFrame).StreamId, i) + } + } +} + +func TestPriorityQueueSync(t *testing.T) { + queue := NewPriorityFrameQueue(150) + var wg sync.WaitGroup + insertRange := func(start, stop spdy.StreamId, priority uint8) { + for i := start; i < stop; i++ { + queue.Push(&spdy.DataFrame{StreamId: i}, priority) + } + wg.Done() + } + wg.Add(3) + go insertRange(spdy.StreamId(100), spdy.StreamId(150), 2) + go insertRange(spdy.StreamId(0), spdy.StreamId(50), 0) + go insertRange(spdy.StreamId(50), spdy.StreamId(100), 1) + + wg.Wait() + for i := spdy.StreamId(0); i < 150; i++ { + frame := queue.Pop() + if frame.(*spdy.DataFrame).StreamId != i { + t.Fatalf("Wrong frame\nActual: %d\nExpecting: %d", frame.(*spdy.DataFrame).StreamId, i) + } + } +} + +func TestPriorityQueueBlocking(t *testing.T) { + queue := NewPriorityFrameQueue(15) + for i := 0; i < 15; i++ { + queue.Push(&spdy.DataFrame{}, 2) + } + doneChan := make(chan bool) + go func() { + queue.Push(&spdy.DataFrame{}, 2) + close(doneChan) + }() + select { + case <-doneChan: + t.Fatalf("Push succeeded, expected to block") + case <-time.After(time.Millisecond): + break + } + + queue.Pop() + + select { + case <-doneChan: + break + case <-time.After(time.Millisecond): + t.Fatalf("Push should have succeeded, but timeout reached") + } + + for i := 0; i < 15; i++ { + queue.Pop() + } +} diff --git a/Godeps/_workspace/src/github.com/docker/spdystream/spdy_bench_test.go b/Godeps/_workspace/src/github.com/docker/spdystream/spdy_bench_test.go new file mode 100644 index 00000000000..6f9e4910151 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/spdystream/spdy_bench_test.go @@ -0,0 +1,113 @@ +package spdystream + +import ( + "fmt" + "io" + "net" + "net/http" + "sync" + "testing" +) + +func configureServer() (io.Closer, string, *sync.WaitGroup) { + authenticated = true + wg := &sync.WaitGroup{} + server, listen, serverErr := runServer(wg) + + if serverErr != nil { + panic(serverErr) + } + + return server, listen, wg +} + +func BenchmarkDial10000(b *testing.B) { + server, addr, wg := configureServer() + + defer func() { + server.Close() + wg.Wait() + }() + + for i := 0; i < b.N; i++ { + conn, dialErr := net.Dial("tcp", addr) + if dialErr != nil { + panic(fmt.Sprintf("Error dialing server: %s", dialErr)) + } + conn.Close() + } +} + +func BenchmarkDialWithSPDYStream10000(b *testing.B) { + server, addr, wg := configureServer() + + defer func() { + server.Close() + wg.Wait() + }() + + for i := 0; i < b.N; i++ { + conn, dialErr := net.Dial("tcp", addr) + if dialErr != nil { + b.Fatalf("Error dialing server: %s", dialErr) + } + + spdyConn, spdyErr := NewConnection(conn, false) + if spdyErr != nil { + b.Fatalf("Error creating spdy connection: %s", spdyErr) + } + go spdyConn.Serve(NoOpStreamHandler) + + closeErr := spdyConn.Close() + if closeErr != nil { + b.Fatalf("Error closing connection: %s, closeErr") + } + } +} + +func benchmarkStreamWithDataAndSize(size uint64, b *testing.B) { + server, addr, wg := configureServer() + + defer func() { + server.Close() + wg.Wait() + }() + + for i := 0; i < b.N; i++ { + conn, dialErr := net.Dial("tcp", addr) + if dialErr != nil { + b.Fatalf("Error dialing server: %s", dialErr) + } + + spdyConn, spdyErr := NewConnection(conn, false) + if spdyErr != nil { + b.Fatalf("Error creating spdy connection: %s", spdyErr) + } + + go spdyConn.Serve(MirrorStreamHandler) + + stream, err := spdyConn.CreateStream(http.Header{}, nil, false) + + writer := make([]byte, size) + + stream.Write(writer) + + if err != nil { + panic(err) + } + + reader := make([]byte, size) + stream.Read(reader) + + stream.Close() + + closeErr := spdyConn.Close() + if closeErr != nil { + b.Fatalf("Error closing connection: %s, closeErr") + } + } +} + +func BenchmarkStreamWith1Byte10000(b *testing.B) { benchmarkStreamWithDataAndSize(1, b) } +func BenchmarkStreamWith1KiloByte10000(b *testing.B) { benchmarkStreamWithDataAndSize(1024, b) } +func BenchmarkStreamWith1Megabyte10000(b *testing.B) { benchmarkStreamWithDataAndSize(1024*1024, b) } diff --git a/Godeps/_workspace/src/github.com/docker/spdystream/spdy_test.go b/Godeps/_workspace/src/github.com/docker/spdystream/spdy_test.go new file mode 100644 index 00000000000..715654d4a50 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/spdystream/spdy_test.go @@ -0,0 +1,735 @@ +package spdystream + +import ( + "bytes" + "io" + "net" + "net/http" + "sync" + "testing" + "time" +) + +func TestSpdyStreams(t *testing.T) { + var wg sync.WaitGroup + server, listen, serverErr := runServer(&wg) + if serverErr != nil { + t.Fatalf("Error initializing server: %s", serverErr) + } + + conn, dialErr := net.Dial("tcp", listen) + if dialErr != nil { + t.Fatalf("Error dialing server: %s", dialErr) + } + + spdyConn, spdyErr := NewConnection(conn, false) + if spdyErr != nil { + t.Fatalf("Error creating spdy connection: %s", spdyErr) + } + go spdyConn.Serve(NoOpStreamHandler) + + authenticated = true + stream, streamErr := spdyConn.CreateStream(http.Header{}, nil, false) + if streamErr != nil { + t.Fatalf("Error creating stream: %s", streamErr) + } + + waitErr := stream.Wait() + if waitErr != nil { + t.Fatalf("Error waiting for stream: %s", waitErr) + } + + message := []byte("hello") + writeErr := stream.WriteData(message, false) + if writeErr != nil { + t.Fatalf("Error writing data") + } + + buf := make([]byte, 10) + n, readErr := stream.Read(buf) + if readErr != nil { + t.Fatalf("Error reading data from stream: %s", readErr) + } + if n != 5 { + t.Fatalf("Unexpected number of bytes read:\nActual: %d\nExpected: 5", n) + } + if bytes.Compare(buf[:n], message) != 0 { + t.Fatalf("Did not receive expected message:\nActual: %s\nExpectd: %s", buf, message) + } + + headers := http.Header{ + "TestKey": []string{"TestVal"}, + } + sendErr := stream.SendHeader(headers, false) + if sendErr != nil { + t.Fatalf("Error sending headers: %s", sendErr) + } + receiveHeaders, receiveErr := stream.ReceiveHeader() + if receiveErr != nil { + t.Fatalf("Error receiving headers: %s", receiveErr) + } + if len(receiveHeaders) != 1 { + t.Fatalf("Unexpected number of headers:\nActual: %d\nExpecting:%d", len(receiveHeaders), 1) + } + testVal := receiveHeaders.Get("TestKey") + if testVal != "TestVal" { + t.Fatalf("Wrong test value:\nActual: %q\nExpecting: %q", testVal, "TestVal") + } + + writeErr = stream.WriteData(message, true) + if writeErr != nil { + t.Fatalf("Error writing data") + } + + smallBuf := make([]byte, 3) + n, readErr = stream.Read(smallBuf) + if readErr != nil { + t.Fatalf("Error reading data from stream: %s", readErr) + } + if n != 3 { + t.Fatalf("Unexpected number of bytes read:\nActual: %d\nExpected: 3", n) + } + if bytes.Compare(smallBuf[:n], []byte("hel")) != 0 { + t.Fatalf("Did not receive expected message:\nActual: %s\nExpectd: %s", smallBuf[:n], message) + } + n, readErr = stream.Read(smallBuf) + if readErr != nil { + t.Fatalf("Error reading data from stream: %s", readErr) + } + if n != 2 { + t.Fatalf("Unexpected number of bytes read:\nActual: %d\nExpected: 2", n) + } + if bytes.Compare(smallBuf[:n], []byte("lo")) != 0 { + t.Fatalf("Did not receive expected message:\nActual: %s\nExpected: lo", smallBuf[:n]) + } + + n, readErr = stream.Read(buf) + if readErr != io.EOF { + t.Fatalf("Expected EOF reading from finished stream, read %d bytes", n) + } + + // Closing again should return error since stream is already closed + streamCloseErr := stream.Close() + if streamCloseErr == nil { + t.Fatalf("No error closing finished stream") + } + if streamCloseErr != ErrWriteClosedStream { + t.Fatalf("Unexpected error closing stream: %s", streamCloseErr) + } + + streamResetErr := stream.Reset() + if streamResetErr != nil { + t.Fatalf("Error reseting stream: %s", streamResetErr) + } + + authenticated = false + badStream, badStreamErr := spdyConn.CreateStream(http.Header{}, nil, false) + if badStreamErr != nil { + t.Fatalf("Error creating stream: %s", badStreamErr) + } + + waitErr = badStream.Wait() + if waitErr == nil { + t.Fatalf("Did not receive error creating stream") + } + if waitErr != ErrReset { + t.Fatalf("Unexpected error creating stream: %s", waitErr) + } + streamCloseErr = badStream.Close() + if streamCloseErr == nil { + t.Fatalf("No error closing bad stream") + } + + spdyCloseErr := spdyConn.Close() + if spdyCloseErr != nil { + t.Fatalf("Error closing spdy connection: %s", spdyCloseErr) + } + + closeErr := server.Close() + if closeErr != nil { + t.Fatalf("Error shutting down server: %s", closeErr) + } + wg.Wait() +} + +func TestPing(t *testing.T) { + var wg sync.WaitGroup + server, listen, serverErr := runServer(&wg) + if serverErr != nil { + t.Fatalf("Error initializing server: %s", serverErr) + } + + conn, dialErr := net.Dial("tcp", listen) + if dialErr != nil { + t.Fatalf("Error dialing server: %s", dialErr) + } + + spdyConn, spdyErr := NewConnection(conn, false) + if spdyErr != nil { + t.Fatalf("Error creating spdy connection: %s", spdyErr) + } + go spdyConn.Serve(NoOpStreamHandler) + + pingTime, pingErr := spdyConn.Ping() + if pingErr != nil { + t.Fatalf("Error pinging server: %s", pingErr) + } + if pingTime == time.Duration(0) { + t.Fatalf("Expecting non-zero ping time") + } + + closeErr := server.Close() + if closeErr != nil { + t.Fatalf("Error shutting down server: %s", closeErr) + } + wg.Wait() +} + +func TestHalfClose(t *testing.T) { + var wg sync.WaitGroup + server, listen, serverErr := runServer(&wg) + if serverErr != nil { + t.Fatalf("Error initializing server: %s", serverErr) + } + + conn, dialErr := net.Dial("tcp", listen) + if dialErr != nil { + t.Fatalf("Error dialing server: %s", dialErr) + } + + spdyConn, spdyErr := NewConnection(conn, false) + if spdyErr != nil { + t.Fatalf("Error creating spdy connection: %s", spdyErr) + } + go spdyConn.Serve(NoOpStreamHandler) + + authenticated = true + stream, streamErr := spdyConn.CreateStream(http.Header{}, nil, false) + if streamErr != nil { + t.Fatalf("Error creating stream: %s", streamErr) + } + + waitErr := stream.Wait() + if waitErr != nil { + t.Fatalf("Error waiting for stream: %s", waitErr) + } + + message := []byte("hello and will read after close") + writeErr := stream.WriteData(message, false) + if writeErr != nil { + t.Fatalf("Error writing data") + } + + streamCloseErr := stream.Close() + if streamCloseErr != nil { + t.Fatalf("Error closing stream: %s", streamCloseErr) + } + + buf := make([]byte, 40) + n, readErr := stream.Read(buf) + if readErr != nil { + t.Fatalf("Error reading data from stream: %s", readErr) + } + if n != 31 { + t.Fatalf("Unexpected number of bytes read:\nActual: %d\nExpected: 5", n) + } + if bytes.Compare(buf[:n], message) != 0 { + t.Fatalf("Did not receive expected message:\nActual: %s\nExpectd: %s", buf, message) + } + + spdyCloseErr := spdyConn.Close() + if spdyCloseErr != nil { + t.Fatalf("Error closing spdy connection: %s", spdyCloseErr) + } + + closeErr := server.Close() + if closeErr != nil { + t.Fatalf("Error shutting down server: %s", closeErr) + } + wg.Wait() +} + +func TestUnexpectedRemoteConnectionClosed(t *testing.T) { + tt := []struct { + closeReceiver bool + closeSender bool + }{ + {closeReceiver: true, closeSender: false}, + {closeReceiver: false, closeSender: true}, + {closeReceiver: false, closeSender: false}, + } + for tix, tc := range tt { + listener, listenErr := net.Listen("tcp", "localhost:0") + if listenErr != nil { + t.Fatalf("Error listening: %v", listenErr) + } + + var serverConn net.Conn + var connErr error + go func() { + serverConn, connErr = listener.Accept() + if connErr != nil { + t.Fatalf("Error accepting: %v", connErr) + } + + serverSpdyConn, _ := NewConnection(serverConn, true) + go serverSpdyConn.Serve(func(stream *Stream) { + stream.SendReply(http.Header{}, tc.closeSender) + }) + }() + + conn, dialErr := net.Dial("tcp", listener.Addr().String()) + if dialErr != nil { + t.Fatalf("Error dialing server: %s", dialErr) + } + + spdyConn, spdyErr := NewConnection(conn, false) + if spdyErr != nil { + t.Fatalf("Error creating spdy connection: %s", spdyErr) + } + go spdyConn.Serve(NoOpStreamHandler) + + authenticated = true + stream, streamErr := spdyConn.CreateStream(http.Header{}, nil, false) + if streamErr != nil { + t.Fatalf("Error creating stream: %s", streamErr) + } + + waitErr := stream.Wait() + if waitErr != nil { + t.Fatalf("Error waiting for stream: %s", waitErr) + } + + if tc.closeReceiver { + // make stream half closed, receive only + stream.Close() + } + + streamch := make(chan error, 1) + go func() { + b := make([]byte, 1) + _, err := stream.Read(b) + streamch <- err + }() + + closeErr := serverConn.Close() + if closeErr != nil { + t.Fatalf("Error shutting down server: %s", closeErr) + } + + select { + case e := <-streamch: + if e == nil || e != io.EOF { + t.Fatalf("(%d) Expected to get an EOF stream error", tix) + } + case <-time.After(500 * time.Millisecond): + t.Fatalf("(%d) Timeout waiting for stream closure", tix) + } + + closeErr = conn.Close() + if closeErr != nil { + t.Fatalf("Error closing client connection: %s", closeErr) + } + + listenErr = listener.Close() + if listenErr != nil { + t.Fatalf("Error closing listener: %s", listenErr) + } + } +} + +func TestCloseNotification(t *testing.T) { + listener, listenErr := net.Listen("tcp", "localhost:0") + if listenErr != nil { + t.Fatalf("Error listening: %v", listenErr) + } + listen := listener.Addr().String() + + serverConnChan := make(chan net.Conn) + go func() { + serverConn, err := listener.Accept() + if err != nil { + t.Fatalf("Error accepting: %v", err) + } + + serverSpdyConn, err := NewConnection(serverConn, true) + if err != nil { + t.Fatalf("Error creating server connection: %v", err) + } + go serverSpdyConn.Serve(NoOpStreamHandler) + <-serverSpdyConn.CloseChan() + serverConnChan <- serverConn + }() + + conn, dialErr := net.Dial("tcp", listen) + if dialErr != nil { + t.Fatalf("Error dialing server: %s", dialErr) + } + + spdyConn, spdyErr := NewConnection(conn, false) + if spdyErr != nil { + t.Fatalf("Error creating spdy connection: %s", spdyErr) + } + go spdyConn.Serve(NoOpStreamHandler) + + // close client conn + err := conn.Close() + if err != nil { + t.Fatalf("Error closing client connection: %v", err) + } + + var serverConn net.Conn + select { + case serverConn = <-serverConnChan: + case <-time.After(500 * time.Millisecond): + t.Fatal("Timed out waiting for connection closed notification") + } + + err = serverConn.Close() + if err != nil { + t.Fatalf("Error closing serverConn: %v", err) + } + + listenErr = listener.Close() + if listenErr != nil { + t.Fatalf("Error closing listener: %s", listenErr) + } +} + +func TestIdleNoTimeoutSet(t *testing.T) { + var wg sync.WaitGroup + server, listen, serverErr := runServer(&wg) + if serverErr != nil { + t.Fatalf("Error initializing server: %s", serverErr) + } + + conn, dialErr := net.Dial("tcp", listen) + if dialErr != nil { + t.Fatalf("Error dialing server: %s", dialErr) + } + + spdyConn, spdyErr := NewConnection(conn, false) + if spdyErr != nil { + t.Fatalf("Error creating spdy connection: %s", spdyErr) + } + go spdyConn.Serve(NoOpStreamHandler) + + select { + case <-spdyConn.CloseChan(): + t.Fatal("Unexpected connection closure") + case <-time.After(10 * time.Millisecond): + } + + closeErr := server.Close() + if closeErr != nil { + t.Fatalf("Error shutting down server: %s", closeErr) + } + wg.Wait() +} + +func TestIdleClearTimeout(t *testing.T) { + var wg sync.WaitGroup + server, listen, serverErr := runServer(&wg) + if serverErr != nil { + t.Fatalf("Error initializing server: %s", serverErr) + } + + conn, dialErr := net.Dial("tcp", listen) + if dialErr != nil { + t.Fatalf("Error dialing server: %s", dialErr) + } + + spdyConn, spdyErr := NewConnection(conn, false) + if spdyErr != nil { + t.Fatalf("Error creating spdy connection: %s", spdyErr) + } + go spdyConn.Serve(NoOpStreamHandler) + + spdyConn.SetIdleTimeout(10 * time.Millisecond) + spdyConn.SetIdleTimeout(0) + select { + case <-spdyConn.CloseChan(): + t.Fatal("Unexpected connection closure") + case <-time.After(20 * time.Millisecond): + } + + closeErr := server.Close() + if closeErr != nil { + t.Fatalf("Error shutting down server: %s", closeErr) + } + wg.Wait() +} + +func TestIdleNoData(t *testing.T) { + var wg sync.WaitGroup + server, listen, serverErr := runServer(&wg) + if serverErr != nil { + t.Fatalf("Error initializing server: %s", serverErr) + } + + conn, dialErr := net.Dial("tcp", listen) + if dialErr != nil { + t.Fatalf("Error dialing server: %s", dialErr) + } + + spdyConn, spdyErr := NewConnection(conn, false) + if spdyErr != nil { + t.Fatalf("Error creating spdy connection: %s", spdyErr) + } + go spdyConn.Serve(NoOpStreamHandler) + + spdyConn.SetIdleTimeout(10 * time.Millisecond) + select { + case <-spdyConn.CloseChan(): + case <-time.After(20 * time.Millisecond): + t.Fatal("Timed out waiting for idle connection closure") + } + + closeErr := server.Close() + if closeErr != nil { + t.Fatalf("Error shutting down server: %s", closeErr) + } + wg.Wait() +} + +func TestIdleWithData(t *testing.T) { + var wg sync.WaitGroup + server, listen, serverErr := runServer(&wg) + if serverErr != nil { + t.Fatalf("Error initializing server: %s", serverErr) + } + + conn, dialErr := net.Dial("tcp", listen) + if dialErr != nil { + t.Fatalf("Error dialing server: %s", dialErr) + } + + spdyConn, spdyErr := NewConnection(conn, false) + if spdyErr != nil { + t.Fatalf("Error creating spdy connection: %s", spdyErr) + } + go spdyConn.Serve(NoOpStreamHandler) + + spdyConn.SetIdleTimeout(25 * time.Millisecond) + + authenticated = true + stream, err := spdyConn.CreateStream(http.Header{}, nil, false) + if err != nil { + t.Fatalf("Error creating stream: %v", err) + } + + writeCh := make(chan struct{}) + + go func() { + b := []byte{1, 2, 3, 4, 5} + for i := 0; i < 10; i++ { + _, err = stream.Write(b) + if err != nil { + t.Fatalf("Error writing to stream: %v", err) + } + time.Sleep(10 * time.Millisecond) + } + close(writeCh) + }() + + writesFinished := false + + expired := time.NewTimer(200 * time.Millisecond) + +Loop: + for { + select { + case <-writeCh: + writesFinished = true + case <-spdyConn.CloseChan(): + if !writesFinished { + t.Fatal("Connection closed before all writes finished") + } + break Loop + case <-expired.C: + t.Fatal("Timed out waiting for idle connection closure") + } + } + + closeErr := server.Close() + if closeErr != nil { + t.Fatalf("Error shutting down server: %s", closeErr) + } + wg.Wait() +} + +func TestHalfClosedIdleTimeout(t *testing.T) { + listener, listenErr := net.Listen("tcp", "localhost:0") + if listenErr != nil { + t.Fatalf("Error listening: %v", listenErr) + } + listen := listener.Addr().String() + + go func() { + serverConn, err := listener.Accept() + if err != nil { + t.Fatalf("Error accepting: %v", err) + } + + serverSpdyConn, err := NewConnection(serverConn, true) + if err != nil { + t.Fatalf("Error creating server connection: %v", err) + } + go serverSpdyConn.Serve(func(s *Stream) { + s.SendReply(http.Header{}, true) + }) + serverSpdyConn.SetIdleTimeout(10 * time.Millisecond) + }() + + conn, dialErr := net.Dial("tcp", listen) + if dialErr != nil { + t.Fatalf("Error dialing server: %s", dialErr) + } + + spdyConn, spdyErr := NewConnection(conn, false) + if spdyErr != nil { + t.Fatalf("Error creating spdy connection: %s", spdyErr) + } + go spdyConn.Serve(NoOpStreamHandler) + + stream, err := spdyConn.CreateStream(http.Header{}, nil, false) + if err != nil { + t.Fatalf("Error creating stream: %v", err) + } + + time.Sleep(20 * time.Millisecond) + + stream.Reset() + + err = spdyConn.Close() + if err != nil { + t.Fatalf("Error closing client spdy conn: %v", err) + } +} + +func TestStreamReset(t *testing.T) { + var wg sync.WaitGroup + server, listen, serverErr := runServer(&wg) + if serverErr != nil { + t.Fatalf("Error initializing server: %s", serverErr) + } + + conn, dialErr := net.Dial("tcp", listen) + if dialErr != nil { + t.Fatalf("Error dialing server: %s", dialErr) + } + + spdyConn, spdyErr := NewConnection(conn, false) + if spdyErr != nil { + t.Fatalf("Error creating spdy connection: %s", spdyErr) + } + go spdyConn.Serve(NoOpStreamHandler) + + authenticated = true + stream, streamErr := spdyConn.CreateStream(http.Header{}, nil, false) + if streamErr != nil { + t.Fatalf("Error creating stream: %s", streamErr) + } + + buf := []byte("dskjahfkdusahfkdsahfkdsafdkas") + for i := 0; i < 10; i++ { + if _, err := stream.Write(buf); err != nil { + t.Fatalf("Error writing to stream: %s", err) + } + } + for i := 0; i < 10; i++ { + if _, err := stream.Read(buf); err != nil { + t.Fatalf("Error reading from stream: %s", err) + } + } + + // fmt.Printf("Resetting...\n") + if err := stream.Reset(); err != nil { + t.Fatalf("Error reseting stream: %s", err) + } + + closeErr := server.Close() + if closeErr != nil { + t.Fatalf("Error shutting down server: %s", closeErr) + } + wg.Wait() +} + +func TestStreamResetWithDataRemaining(t *testing.T) { + var wg sync.WaitGroup + server, listen, serverErr := runServer(&wg) + if serverErr != nil { + t.Fatalf("Error initializing server: %s", serverErr) + } + + conn, dialErr := net.Dial("tcp", listen) + if dialErr != nil { + t.Fatalf("Error dialing server: %s", dialErr) + } + + spdyConn, spdyErr := NewConnection(conn, false) + if spdyErr != nil { + t.Fatalf("Error creating spdy connection: %s", spdyErr) + } + go spdyConn.Serve(NoOpStreamHandler) + + authenticated = true + stream, streamErr := spdyConn.CreateStream(http.Header{}, nil, false) + if streamErr != nil { + t.Fatalf("Error creating stream: %s", streamErr) + } + + buf := []byte("dskjahfkdusahfkdsahfkdsafdkas") + for i := 0; i < 10; i++ { + if _, err := stream.Write(buf); err != nil { + t.Fatalf("Error writing to stream: %s", err) + } + } + + // read a bit to make sure a goroutine gets to <-dataChan + if _, err := stream.Read(buf); err != nil { + t.Fatalf("Error reading from stream: %s", err) + } + + // fmt.Printf("Resetting...\n") + if err := stream.Reset(); err != nil { + t.Fatalf("Error reseting stream: %s", err) + } + + closeErr := server.Close() + if closeErr != nil { + t.Fatalf("Error shutting down server: %s", closeErr) + } + wg.Wait() +} + +var authenticated bool + +func authStreamHandler(stream *Stream) { + if !authenticated { + stream.Refuse() + } + MirrorStreamHandler(stream) +} + +func runServer(wg *sync.WaitGroup) (io.Closer, string, error) { + listener, listenErr := net.Listen("tcp", "localhost:0") + if listenErr != nil { + return nil, "", listenErr + } + wg.Add(1) + go func() { + for { + conn, connErr := listener.Accept() + if connErr != nil { + break + } + + spdyConn, _ := NewConnection(conn, true) + go spdyConn.Serve(authStreamHandler) + + } + wg.Done() + }() + return listener, listener.Addr().String(), nil +} diff --git a/Godeps/_workspace/src/github.com/docker/spdystream/stream.go b/Godeps/_workspace/src/github.com/docker/spdystream/stream.go new file mode 100644 index 00000000000..8ad700ed8a4 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/spdystream/stream.go @@ -0,0 +1,328 @@ +package spdystream + +import ( + "errors" + "fmt" + "io" + "net" + "net/http" + "sync" + "time" + + "code.google.com/p/go.net/spdy" +) + +var ( + ErrUnreadPartialData = errors.New("unread partial data") +) + +type Stream struct { + streamId spdy.StreamId + parent *Stream + conn *Connection + startChan chan error + + dataLock sync.RWMutex + dataChan chan []byte + unread []byte + + priority uint8 + headers http.Header + headerChan chan http.Header + finishLock sync.Mutex + finished bool + replyCond *sync.Cond + replied bool + closeLock sync.Mutex + closeChan chan bool +} + +// WriteData writes data to stream, sending a dataframe per call +func (s *Stream) WriteData(data []byte, fin bool) error { + s.waitWriteReply() + var flags spdy.DataFlags + + if fin { + flags = spdy.DataFlagFin + s.finishLock.Lock() + if s.finished { + s.finishLock.Unlock() + return ErrWriteClosedStream + } + s.finished = true + s.finishLock.Unlock() + } + + dataFrame := &spdy.DataFrame{ + StreamId: s.streamId, + Flags: flags, + Data: data, + } + + s.conn.writeLock.Lock() + defer s.conn.writeLock.Unlock() + debugMessage("(%p) (%d) Writing data frame", s, s.streamId) + return s.conn.framer.WriteFrame(dataFrame) +} + +// Write writes bytes to a stream, calling write data for each call. +func (s *Stream) Write(data []byte) (n int, err error) { + err = s.WriteData(data, false) + if err == nil { + n = len(data) + } + return +} + +// Read reads bytes from a stream, a single read will never get more +// than what is sent on a single data frame, but a multiple calls to +// read may get data from the same data frame. +func (s *Stream) Read(p []byte) (n int, err error) { + if s.unread == nil { + select { + case <-s.closeChan: + return 0, io.EOF + case read, ok := <-s.dataChan: + if !ok { + return 0, io.EOF + } + s.unread = read + } + } + n = copy(p, s.unread) + if n < len(s.unread) { + s.unread = s.unread[n:] + } else { + s.unread = nil + } + return +} + +// ReadData reads an entire data frame and returns the byte array +// from the data frame. If there is unread data from the result +// of a Read call, this function will return an ErrUnreadPartialData. +func (s *Stream) ReadData() ([]byte, error) { + debugMessage("(%p) Reading data from %d", s, s.streamId) + if s.unread != nil { + return nil, ErrUnreadPartialData + } + select { + case <-s.closeChan: + return nil, io.EOF + case read, ok := <-s.dataChan: + if !ok { + return nil, io.EOF + } + return read, nil + } +} + +func (s *Stream) waitWriteReply() { + if s.replyCond != nil { + s.replyCond.L.Lock() + for !s.replied { + s.replyCond.Wait() + } + s.replyCond.L.Unlock() + } +} + +// Wait waits for the stream to receive a reply. +func (s *Stream) Wait() error { + return s.WaitTimeout(time.Duration(0)) +} + +// WaitTimeout waits for the stream to receive a reply or for timeout. +// When the timeout is reached, ErrTimeout will be returned. +func (s *Stream) WaitTimeout(timeout time.Duration) error { + var timeoutChan <-chan time.Time + if timeout > time.Duration(0) { + timeoutChan = time.After(timeout) + } + + select { + case err := <-s.startChan: + if err != nil { + return err + } + break + case <-timeoutChan: + return ErrTimeout + } + return nil +} + +// Close closes the stream by sending an empty data frame with the +// finish flag set, indicating this side is finished with the stream. +func (s *Stream) Close() error { + select { + case <-s.closeChan: + // Stream is now fully closed + s.conn.removeStream(s) + default: + break + } + return s.WriteData([]byte{}, true) +} + +// Reset sends a reset frame, putting the stream into the fully closed state. +func (s *Stream) Reset() error { + s.conn.removeStream(s) + + s.finishLock.Lock() + if s.finished { + s.finishLock.Unlock() + return nil + } + s.finished = true + s.finishLock.Unlock() + + s.closeRemoteChannels() + + resetFrame := &spdy.RstStreamFrame{ + StreamId: s.streamId, + Status: spdy.Cancel, + } + s.conn.writeLock.Lock() + defer s.conn.writeLock.Unlock() + return s.conn.framer.WriteFrame(resetFrame) +} + +// CreateSubStream creates a stream using the current as the parent +func (s *Stream) CreateSubStream(headers http.Header, fin bool) (*Stream, error) { + return s.conn.CreateStream(headers, s, fin) +} + +// SetPriority sets the stream priority, does not affect the +// remote priority of this stream after Open has been called. +// Valid values are 0 through 7, 0 being the highest priority +// and 7 the lowest. +func (s *Stream) SetPriority(priority uint8) { + s.priority = priority +} + +// SendHeader sends a header frame across the stream +func (s *Stream) SendHeader(headers http.Header, fin bool) error { + return s.conn.sendHeaders(headers, s, fin) +} + +// SendReply sends a reply on a stream, only valid to be called once +// when handling a new stream +func (s *Stream) SendReply(headers http.Header, fin bool) error { + if s.replyCond == nil { + return errors.New("cannot reply on initiated stream") + } + s.replyCond.L.Lock() + defer s.replyCond.L.Unlock() + if s.replied { + return nil + } + + err := s.conn.sendReply(headers, s, fin) + if err != nil { + return err + } + + s.replied = true + s.replyCond.Broadcast() + return nil +} + +// Refuse sends a reset frame with the status refuse, only +// valid to be called once when handling a new stream. This +// may be used to indicate that a stream is not allowed +// when http status codes are not being used. +func (s *Stream) Refuse() error { + if s.replied { + return nil + } + s.replied = true + return s.conn.sendReset(spdy.RefusedStream, s) +} + +// Cancel sends a reset frame with the status canceled. This +// can be used at any time by the creator of the Stream to +// indicate the stream is no longer needed. +func (s *Stream) Cancel() error { + return s.conn.sendReset(spdy.Cancel, s) +} + +// ReceiveHeader receives a header sent on the other side +// of the stream. This function will block until a header +// is received or stream is closed. +func (s *Stream) ReceiveHeader() (http.Header, error) { + select { + case <-s.closeChan: + break + case header, ok := <-s.headerChan: + if !ok { + return nil, fmt.Errorf("header chan closed") + } + return header, nil + } + return nil, fmt.Errorf("stream closed") +} + +// Parent returns the parent stream +func (s *Stream) Parent() *Stream { + return s.parent +} + +// Headers returns the headers used to create the stream +func (s *Stream) Headers() http.Header { + return s.headers +} + +// String returns the string version of stream using the +// streamId to uniquely identify the stream +func (s *Stream) String() string { + return fmt.Sprintf("stream:%d", s.streamId) +} + +// Identifier returns a 32 bit identifier for the stream +func (s *Stream) Identifier() uint32 { + return uint32(s.streamId) +} + +// IsFinished returns whether the stream has finished +// sending data +func (s *Stream) IsFinished() bool { + return s.finished +} + +// Implement net.Conn interface + +func (s *Stream) LocalAddr() net.Addr { + return s.conn.conn.LocalAddr() +} + +func (s *Stream) RemoteAddr() net.Addr { + return s.conn.conn.RemoteAddr() +} + +// TODO set per stream values instead of connection-wide + +func (s *Stream) SetDeadline(t time.Time) error { + return s.conn.conn.SetDeadline(t) +} + +func (s *Stream) SetReadDeadline(t time.Time) error { + return s.conn.conn.SetReadDeadline(t) +} + +func (s *Stream) SetWriteDeadline(t time.Time) error { + return s.conn.conn.SetWriteDeadline(t) +} + +func (s *Stream) closeRemoteChannels() { + s.closeLock.Lock() + defer s.closeLock.Unlock() + select { + case <-s.closeChan: + default: + close(s.closeChan) + s.dataLock.Lock() + defer s.dataLock.Unlock() + close(s.dataChan) + } +} diff --git a/Godeps/_workspace/src/github.com/docker/spdystream/utils.go b/Godeps/_workspace/src/github.com/docker/spdystream/utils.go new file mode 100644 index 00000000000..1b2c199a402 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/spdystream/utils.go @@ -0,0 +1,16 @@ +package spdystream + +import ( + "log" + "os" +) + +var ( + DEBUG = os.Getenv("DEBUG") +) + +func debugMessage(fmt string, args ...interface{}) { + if DEBUG != "" { + log.Printf(fmt, args...) + } +} diff --git a/Godeps/_workspace/src/github.com/docker/spdystream/ws/connection.go b/Godeps/_workspace/src/github.com/docker/spdystream/ws/connection.go new file mode 100644 index 00000000000..d0ea001b454 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/spdystream/ws/connection.go @@ -0,0 +1,65 @@ +package ws + +import ( + "github.com/gorilla/websocket" + "io" + "log" + "time" +) + +// Wrap an HTTP2 connection over WebSockets and +// use the underlying WebSocket framing for proxy +// compatibility. +type Conn struct { + *websocket.Conn + reader io.Reader +} + +func NewConnection(w *websocket.Conn) *Conn { + return &Conn{Conn: w} +} + +func (c Conn) Write(b []byte) (int, error) { + err := c.WriteMessage(websocket.BinaryMessage, b) + if err != nil { + return 0, err + } + return len(b), nil +} + +func (c Conn) Read(b []byte) (int, error) { + if c.reader == nil { + t, r, err := c.NextReader() + if err != nil { + return 0, err + } + if t != websocket.BinaryMessage { + log.Printf("ws: ignored non-binary message in stream") + return 0, nil + } + c.reader = r + } + n, err := c.reader.Read(b) + if err != nil { + if err == io.EOF { + c.reader = nil + } + return n, err + } + return n, nil +} + +func (c Conn) SetDeadline(t time.Time) error { + if err := c.Conn.SetReadDeadline(t); err != nil { + return err + } + if err := c.Conn.SetWriteDeadline(t); err != nil { + return err + } + return nil +} + +func (c Conn) Close() error { + err := c.Conn.Close() + return err +} diff --git a/Godeps/_workspace/src/github.com/docker/spdystream/ws/ws_test.go b/Godeps/_workspace/src/github.com/docker/spdystream/ws/ws_test.go new file mode 100644 index 00000000000..36c4a46ac84 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/spdystream/ws/ws_test.go @@ -0,0 +1,175 @@ +package ws + +import ( + "bytes" + "github.com/docker/spdystream" + "github.com/gorilla/websocket" + "io" + "log" + "net/http" + "net/http/httptest" + "strings" + "testing" +) + +var upgrader = websocket.Upgrader{ + ReadBufferSize: 1024, + WriteBufferSize: 1024, +} + +var serverSpdyConn *spdystream.Connection + +// Connect to the Websocket endpoint at ws://localhost +// using SPDY over Websockets framing. +func ExampleConn() { + wsconn, _, _ := websocket.DefaultDialer.Dial("ws://localhost/", http.Header{"Origin": {"http://localhost/"}}) + conn, _ := spdystream.NewConnection(NewConnection(wsconn), false) + go conn.Serve(spdystream.NoOpStreamHandler, spdystream.NoAuthHandler) + stream, _ := conn.CreateStream(http.Header{}, nil, false) + stream.Wait() +} + +func serveWs(w http.ResponseWriter, r *http.Request) { + if r.Method != "GET" { + http.Error(w, "Method not allowed", 405) + return + } + + ws, err := upgrader.Upgrade(w, r, nil) + if err != nil { + if _, ok := err.(websocket.HandshakeError); !ok { + log.Println(err) + } + return + } + + wrap := NewConnection(ws) + spdyConn, err := spdystream.NewConnection(wrap, true) + if err != nil { + log.Fatal(err) + return + } + serverSpdyConn = spdyConn + go spdyConn.Serve(spdystream.MirrorStreamHandler, authStreamHandler) +} + +func TestSpdyStreamOverWs(t *testing.T) { + server := httptest.NewServer(http.HandlerFunc(serveWs)) + defer server.Close() + defer func() { + if serverSpdyConn != nil { + serverSpdyConn.Close() + } + }() + + wsconn, _, err := websocket.DefaultDialer.Dial(strings.Replace(server.URL, "http://", "ws://", 1), http.Header{"Origin": {server.URL}}) + if err != nil { + t.Fatal(err) + } + + wrap := NewConnection(wsconn) + spdyConn, err := spdystream.NewConnection(wrap, false) + if err != nil { + defer wsconn.Close() + t.Fatal(err) + } + defer spdyConn.Close() + authenticated = true + go spdyConn.Serve(spdystream.NoOpStreamHandler, spdystream.RejectAuthHandler) + + stream, streamErr := spdyConn.CreateStream(http.Header{}, nil, false) + if streamErr != nil { + t.Fatalf("Error creating stream: %s", streamErr) + } + + waitErr := stream.Wait() + if waitErr != nil { + t.Fatalf("Error waiting for stream: %s", waitErr) + } + + message := []byte("hello") + writeErr := stream.WriteData(message, false) + if writeErr != nil { + t.Fatalf("Error writing data") + } + + buf := make([]byte, 10) + n, readErr := stream.Read(buf) + if readErr != nil { + t.Fatalf("Error reading data from stream: %s", readErr) + } + if n != 5 { + t.Fatalf("Unexpected number of bytes read:\nActual: %d\nExpected: 5", n) + } + if bytes.Compare(buf[:n], message) != 0 { + t.Fatalf("Did not receive expected message:\nActual: %s\nExpectd: %s", buf, message) + } + + writeErr = stream.WriteData(message, true) + if writeErr != nil { + t.Fatalf("Error writing data") + } + + smallBuf := make([]byte, 3) + n, readErr = stream.Read(smallBuf) + if readErr != nil { + t.Fatalf("Error reading data from stream: %s", readErr) + } + if n != 3 { + t.Fatalf("Unexpected number of bytes read:\nActual: %d\nExpected: 3", n) + } + if bytes.Compare(smallBuf[:n], []byte("hel")) != 0 { + t.Fatalf("Did not receive expected message:\nActual: %s\nExpectd: %s", smallBuf[:n], message) + } + n, readErr = stream.Read(smallBuf) + if readErr != nil { + t.Fatalf("Error reading data from stream: %s", readErr) + } + if n != 2 { + t.Fatalf("Unexpected number of bytes read:\nActual: %d\nExpected: 2", n) + } + if bytes.Compare(smallBuf[:n], []byte("lo")) != 0 { + t.Fatalf("Did not receive expected message:\nActual: %s\nExpected: lo", smallBuf[:n]) + } + + n, readErr = stream.Read(buf) + if readErr != io.EOF { + t.Fatalf("Expected EOF reading from finished stream, read %d bytes", n) + } + + streamCloseErr := stream.Close() + if streamCloseErr != nil { + t.Fatalf("Error closing stream: %s", streamCloseErr) + } + + // Closing again should return nil + streamCloseErr = stream.Close() + if streamCloseErr != nil { + t.Fatalf("Error closing stream: %s", streamCloseErr) + } + + authenticated = false + badStream, badStreamErr := spdyConn.CreateStream(http.Header{}, nil, false) + if badStreamErr != nil { + t.Fatalf("Error creating stream: %s", badStreamErr) + } + + waitErr = badStream.Wait() + if waitErr == nil { + t.Fatalf("Did not receive error creating stream") + } + if waitErr != spdystream.ErrReset { + t.Fatalf("Unexpected error creating stream: %s", waitErr) + } + + spdyCloseErr := spdyConn.Close() + if spdyCloseErr != nil { + t.Fatalf("Error closing spdy connection: %s", spdyCloseErr) + } +} + +var authenticated bool + +func authStreamHandler(header http.Header, slot uint8, parent uint32) bool { + return authenticated +} diff --git a/Godeps/_workspace/src/github.com/kr/pty/.gitignore b/Godeps/_workspace/src/github.com/kr/pty/.gitignore new file mode 100644 index 00000000000..1f0a99f2f2b --- /dev/null +++ b/Godeps/_workspace/src/github.com/kr/pty/.gitignore @@ -0,0 +1,4 @@ +[568].out +_go* +_test* +_obj diff --git a/Godeps/_workspace/src/github.com/kr/pty/License b/Godeps/_workspace/src/github.com/kr/pty/License new file mode 100644 index 00000000000..6b7558b6b42 --- /dev/null +++ b/Godeps/_workspace/src/github.com/kr/pty/License @@ -0,0 +1,23 @@ +Copyright (c) 2011 Keith Rarick + +Permission is hereby granted, free of charge, to any person +obtaining a copy of this software and associated +documentation files (the "Software"), to deal in the +Software without restriction, including without limitation +the rights to use, copy, modify, merge, publish, distribute, +sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, +subject to the following conditions: + +The above copyright notice and this permission notice shall +be included in all copies or substantial portions of the +Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY +KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE +WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS +OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR +OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR +OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE +SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/Godeps/_workspace/src/github.com/kr/pty/README.md b/Godeps/_workspace/src/github.com/kr/pty/README.md new file mode 100644 index 00000000000..7b7900c3aed --- /dev/null +++ b/Godeps/_workspace/src/github.com/kr/pty/README.md @@ -0,0 +1,36 @@ +# pty + +Pty is a Go package for using unix pseudo-terminals. + +## Install + + go get github.com/kr/pty + +## Example + +```go +package main + +import ( + "github.com/kr/pty" + "io" + "os" + "os/exec" +) + +func main() { + c := exec.Command("grep", "--color=auto", "bar") + f, err := pty.Start(c) + if err != nil { + panic(err) + } + + go func() { + f.Write([]byte("foo\n")) + f.Write([]byte("bar\n")) + f.Write([]byte("baz\n")) + f.Write([]byte{4}) // EOT + }() + io.Copy(os.Stdout, f) +} +``` diff --git a/Godeps/_workspace/src/github.com/kr/pty/doc.go b/Godeps/_workspace/src/github.com/kr/pty/doc.go new file mode 100644 index 00000000000..190cfbea929 --- /dev/null +++ b/Godeps/_workspace/src/github.com/kr/pty/doc.go @@ -0,0 +1,16 @@ +// Package pty provides functions for working with Unix terminals. +package pty + +import ( + "errors" + "os" +) + +// ErrUnsupported is returned if a function is not +// available on the current platform. +var ErrUnsupported = errors.New("unsupported") + +// Opens a pty and its corresponding tty. +func Open() (pty, tty *os.File, err error) { + return open() +} diff --git a/Godeps/_workspace/src/github.com/kr/pty/ioctl.go b/Godeps/_workspace/src/github.com/kr/pty/ioctl.go new file mode 100644 index 00000000000..5b856e8711d --- /dev/null +++ b/Godeps/_workspace/src/github.com/kr/pty/ioctl.go @@ -0,0 +1,11 @@ +package pty + +import "syscall" + +func ioctl(fd, cmd, ptr uintptr) error { + _, _, e := syscall.Syscall(syscall.SYS_IOCTL, fd, cmd, ptr) + if e != 0 { + return e + } + return nil +} diff --git a/Godeps/_workspace/src/github.com/kr/pty/ioctl_bsd.go b/Godeps/_workspace/src/github.com/kr/pty/ioctl_bsd.go new file mode 100644 index 00000000000..73b12c53cf4 --- /dev/null +++ b/Godeps/_workspace/src/github.com/kr/pty/ioctl_bsd.go @@ -0,0 +1,39 @@ +// +build darwin dragonfly freebsd netbsd openbsd + +package pty + +// from +const ( + _IOC_VOID uintptr = 0x20000000 + _IOC_OUT uintptr = 0x40000000 + _IOC_IN uintptr = 0x80000000 + _IOC_IN_OUT uintptr = _IOC_OUT | _IOC_IN + _IOC_DIRMASK = _IOC_VOID | _IOC_OUT | _IOC_IN + + _IOC_PARAM_SHIFT = 13 + _IOC_PARAM_MASK = (1 << _IOC_PARAM_SHIFT) - 1 +) + +func _IOC_PARM_LEN(ioctl uintptr) uintptr { + return (ioctl >> 16) & _IOC_PARAM_MASK +} + +func _IOC(inout uintptr, group byte, ioctl_num uintptr, param_len uintptr) uintptr { + return inout | (param_len&_IOC_PARAM_MASK)<<16 | uintptr(group)<<8 | ioctl_num +} + +func _IO(group byte, ioctl_num uintptr) uintptr { + return _IOC(_IOC_VOID, group, ioctl_num, 0) +} + +func _IOR(group byte, ioctl_num uintptr, param_len uintptr) uintptr { + return _IOC(_IOC_OUT, group, ioctl_num, param_len) +} + +func _IOW(group byte, ioctl_num uintptr, param_len uintptr) uintptr { + return _IOC(_IOC_IN, group, ioctl_num, param_len) +} + +func _IOWR(group byte, ioctl_num uintptr, param_len uintptr) uintptr { + return _IOC(_IOC_IN_OUT, group, ioctl_num, param_len) +} diff --git a/Godeps/_workspace/src/github.com/kr/pty/mktypes.bash b/Godeps/_workspace/src/github.com/kr/pty/mktypes.bash new file mode 100644 index 00000000000..9952c888385 --- /dev/null +++ b/Godeps/_workspace/src/github.com/kr/pty/mktypes.bash @@ -0,0 +1,19 @@ +#!/usr/bin/env bash + +GOOSARCH="${GOOS}_${GOARCH}" +case "$GOOSARCH" in +_* | *_ | _) + echo 'undefined $GOOS_$GOARCH:' "$GOOSARCH" 1>&2 + exit 1 + ;; +esac + +GODEFS="go tool cgo -godefs" + +$GODEFS types.go |gofmt > ztypes_$GOARCH.go + +case $GOOS in +freebsd) + $GODEFS types_$GOOS.go |gofmt > ztypes_$GOOSARCH.go + ;; +esac diff --git a/Godeps/_workspace/src/github.com/kr/pty/pty_darwin.go b/Godeps/_workspace/src/github.com/kr/pty/pty_darwin.go new file mode 100644 index 00000000000..4f4d5ca26ee --- /dev/null +++ b/Godeps/_workspace/src/github.com/kr/pty/pty_darwin.go @@ -0,0 +1,60 @@ +package pty + +import ( + "errors" + "os" + "syscall" + "unsafe" +) + +func open() (pty, tty *os.File, err error) { + p, err := os.OpenFile("/dev/ptmx", os.O_RDWR, 0) + if err != nil { + return nil, nil, err + } + + sname, err := ptsname(p) + if err != nil { + return nil, nil, err + } + + err = grantpt(p) + if err != nil { + return nil, nil, err + } + + err = unlockpt(p) + if err != nil { + return nil, nil, err + } + + t, err := os.OpenFile(sname, os.O_RDWR, 0) + if err != nil { + return nil, nil, err + } + return p, t, nil +} + +func ptsname(f *os.File) (string, error) { + n := make([]byte, _IOC_PARM_LEN(syscall.TIOCPTYGNAME)) + + err := ioctl(f.Fd(), syscall.TIOCPTYGNAME, uintptr(unsafe.Pointer(&n[0]))) + if err != nil { + return "", err + } + + for i, c := range n { + if c == 0 { + return string(n[:i]), nil + } + } + return "", errors.New("TIOCPTYGNAME string not NUL-terminated") +} + +func grantpt(f *os.File) error { + return ioctl(f.Fd(), syscall.TIOCPTYGRANT, 0) +} + +func unlockpt(f *os.File) error { + return ioctl(f.Fd(), syscall.TIOCPTYUNLK, 0) +} diff --git a/Godeps/_workspace/src/github.com/kr/pty/pty_freebsd.go b/Godeps/_workspace/src/github.com/kr/pty/pty_freebsd.go new file mode 100644 index 00000000000..b341babd054 --- /dev/null +++ b/Godeps/_workspace/src/github.com/kr/pty/pty_freebsd.go @@ -0,0 +1,73 @@ +package pty + +import ( + "errors" + "os" + "syscall" + "unsafe" +) + +func posix_openpt(oflag int) (fd int, err error) { + r0, _, e1 := syscall.Syscall(syscall.SYS_POSIX_OPENPT, uintptr(oflag), 0, 0) + fd = int(r0) + if e1 != 0 { + err = e1 + } + return +} + +func open() (pty, tty *os.File, err error) { + fd, err := posix_openpt(syscall.O_RDWR | syscall.O_CLOEXEC) + if err != nil { + return nil, nil, err + } + + p := os.NewFile(uintptr(fd), "/dev/pts") + sname, err := ptsname(p) + if err != nil { + return nil, nil, err + } + + t, err := os.OpenFile("/dev/"+sname, os.O_RDWR, 0) + if err != nil { + return nil, nil, err + } + return p, t, nil +} + +func isptmaster(fd uintptr) (bool, error) { + err := ioctl(fd, syscall.TIOCPTMASTER, 0) + return err == nil, err +} + +var ( + emptyFiodgnameArg fiodgnameArg + ioctl_FIODGNAME = _IOW('f', 120, unsafe.Sizeof(emptyFiodgnameArg)) +) + +func ptsname(f *os.File) (string, error) { + master, err := isptmaster(f.Fd()) + if err != nil { + return "", err + } + if !master { + return "", syscall.EINVAL + } + + const n = _C_SPECNAMELEN + 1 + var ( + buf = make([]byte, n) + arg = fiodgnameArg{Len: n, Buf: (*byte)(unsafe.Pointer(&buf[0]))} + ) + err = ioctl(f.Fd(), ioctl_FIODGNAME, uintptr(unsafe.Pointer(&arg))) + if err != nil { + return "", err + } + + for i, c := range buf { + if c == 0 { + return string(buf[:i]), nil + } + } + return "", errors.New("FIODGNAME string not NUL-terminated") +} diff --git a/Godeps/_workspace/src/github.com/kr/pty/pty_linux.go b/Godeps/_workspace/src/github.com/kr/pty/pty_linux.go new file mode 100644 index 00000000000..cb901a21e00 --- /dev/null +++ b/Godeps/_workspace/src/github.com/kr/pty/pty_linux.go @@ -0,0 +1,46 @@ +package pty + +import ( + "os" + "strconv" + "syscall" + "unsafe" +) + +func open() (pty, tty *os.File, err error) { + p, err := os.OpenFile("/dev/ptmx", os.O_RDWR, 0) + if err != nil { + return nil, nil, err + } + + sname, err := ptsname(p) + if err != nil { + return nil, nil, err + } + + err = unlockpt(p) + if err != nil { + return nil, nil, err + } + + t, err := os.OpenFile(sname, os.O_RDWR|syscall.O_NOCTTY, 0) + if err != nil { + return nil, nil, err + } + return p, t, nil +} + +func ptsname(f *os.File) (string, error) { + var n _C_uint + err := ioctl(f.Fd(), syscall.TIOCGPTN, uintptr(unsafe.Pointer(&n))) + if err != nil { + return "", err + } + return "/dev/pts/" + strconv.Itoa(int(n)), nil +} + +func unlockpt(f *os.File) error { + var u _C_int + // use TIOCSPTLCK with a zero valued arg to clear the slave pty lock + return ioctl(f.Fd(), syscall.TIOCSPTLCK, uintptr(unsafe.Pointer(&u))) +} diff --git a/Godeps/_workspace/src/github.com/kr/pty/pty_unsupported.go b/Godeps/_workspace/src/github.com/kr/pty/pty_unsupported.go new file mode 100644 index 00000000000..898c7303c4f --- /dev/null +++ b/Godeps/_workspace/src/github.com/kr/pty/pty_unsupported.go @@ -0,0 +1,11 @@ +// +build !linux,!darwin,!freebsd + +package pty + +import ( + "os" +) + +func open() (pty, tty *os.File, err error) { + return nil, nil, ErrUnsupported +} diff --git a/Godeps/_workspace/src/github.com/kr/pty/run.go b/Godeps/_workspace/src/github.com/kr/pty/run.go new file mode 100644 index 00000000000..f0678d2a27e --- /dev/null +++ b/Godeps/_workspace/src/github.com/kr/pty/run.go @@ -0,0 +1,28 @@ +package pty + +import ( + "os" + "os/exec" + "syscall" +) + +// Start assigns a pseudo-terminal tty os.File to c.Stdin, c.Stdout, +// and c.Stderr, calls c.Start, and returns the File of the tty's +// corresponding pty. +func Start(c *exec.Cmd) (pty *os.File, err error) { + pty, tty, err := Open() + if err != nil { + return nil, err + } + defer tty.Close() + c.Stdout = tty + c.Stdin = tty + c.Stderr = tty + c.SysProcAttr = &syscall.SysProcAttr{Setctty: true, Setsid: true} + err = c.Start() + if err != nil { + pty.Close() + return nil, err + } + return pty, err +} diff --git a/Godeps/_workspace/src/github.com/kr/pty/types.go b/Godeps/_workspace/src/github.com/kr/pty/types.go new file mode 100644 index 00000000000..5aecb6bcdcb --- /dev/null +++ b/Godeps/_workspace/src/github.com/kr/pty/types.go @@ -0,0 +1,10 @@ +// +build ignore + +package pty + +import "C" + +type ( + _C_int C.int + _C_uint C.uint +) diff --git a/Godeps/_workspace/src/github.com/kr/pty/types_freebsd.go b/Godeps/_workspace/src/github.com/kr/pty/types_freebsd.go new file mode 100644 index 00000000000..ce3eb951810 --- /dev/null +++ b/Godeps/_workspace/src/github.com/kr/pty/types_freebsd.go @@ -0,0 +1,15 @@ +// +build ignore + +package pty + +/* +#include +#include +*/ +import "C" + +const ( + _C_SPECNAMELEN = C.SPECNAMELEN /* max length of devicename */ +) + +type fiodgnameArg C.struct_fiodgname_arg diff --git a/Godeps/_workspace/src/github.com/kr/pty/util.go b/Godeps/_workspace/src/github.com/kr/pty/util.go new file mode 100644 index 00000000000..67c52d06cdc --- /dev/null +++ b/Godeps/_workspace/src/github.com/kr/pty/util.go @@ -0,0 +1,35 @@ +package pty + +import ( + "os" + "syscall" + "unsafe" +) + +// Getsize returns the number of rows (lines) and cols (positions +// in each line) in terminal t. +func Getsize(t *os.File) (rows, cols int, err error) { + var ws winsize + err = windowrect(&ws, t.Fd()) + return int(ws.ws_row), int(ws.ws_col), err +} + +type winsize struct { + ws_row uint16 + ws_col uint16 + ws_xpixel uint16 + ws_ypixel uint16 +} + +func windowrect(ws *winsize, fd uintptr) error { + _, _, errno := syscall.Syscall( + syscall.SYS_IOCTL, + fd, + syscall.TIOCGWINSZ, + uintptr(unsafe.Pointer(ws)), + ) + if errno != 0 { + return syscall.Errno(errno) + } + return nil +} diff --git a/Godeps/_workspace/src/github.com/kr/pty/ztypes_386.go b/Godeps/_workspace/src/github.com/kr/pty/ztypes_386.go new file mode 100644 index 00000000000..ff0b8fd838f --- /dev/null +++ b/Godeps/_workspace/src/github.com/kr/pty/ztypes_386.go @@ -0,0 +1,9 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs types.go + +package pty + +type ( + _C_int int32 + _C_uint uint32 +) diff --git a/Godeps/_workspace/src/github.com/kr/pty/ztypes_amd64.go b/Godeps/_workspace/src/github.com/kr/pty/ztypes_amd64.go new file mode 100644 index 00000000000..ff0b8fd838f --- /dev/null +++ b/Godeps/_workspace/src/github.com/kr/pty/ztypes_amd64.go @@ -0,0 +1,9 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs types.go + +package pty + +type ( + _C_int int32 + _C_uint uint32 +) diff --git a/Godeps/_workspace/src/github.com/kr/pty/ztypes_arm.go b/Godeps/_workspace/src/github.com/kr/pty/ztypes_arm.go new file mode 100644 index 00000000000..ff0b8fd838f --- /dev/null +++ b/Godeps/_workspace/src/github.com/kr/pty/ztypes_arm.go @@ -0,0 +1,9 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs types.go + +package pty + +type ( + _C_int int32 + _C_uint uint32 +) diff --git a/Godeps/_workspace/src/github.com/kr/pty/ztypes_freebsd_386.go b/Godeps/_workspace/src/github.com/kr/pty/ztypes_freebsd_386.go new file mode 100644 index 00000000000..d9975374e3c --- /dev/null +++ b/Godeps/_workspace/src/github.com/kr/pty/ztypes_freebsd_386.go @@ -0,0 +1,13 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs types_freebsd.go + +package pty + +const ( + _C_SPECNAMELEN = 0x3f +) + +type fiodgnameArg struct { + Len int32 + Buf *byte +} diff --git a/Godeps/_workspace/src/github.com/kr/pty/ztypes_freebsd_amd64.go b/Godeps/_workspace/src/github.com/kr/pty/ztypes_freebsd_amd64.go new file mode 100644 index 00000000000..5fa102fcdf6 --- /dev/null +++ b/Godeps/_workspace/src/github.com/kr/pty/ztypes_freebsd_amd64.go @@ -0,0 +1,14 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs types_freebsd.go + +package pty + +const ( + _C_SPECNAMELEN = 0x3f +) + +type fiodgnameArg struct { + Len int32 + Pad_cgo_0 [4]byte + Buf *byte +} diff --git a/Godeps/_workspace/src/github.com/kr/pty/ztypes_freebsd_arm.go b/Godeps/_workspace/src/github.com/kr/pty/ztypes_freebsd_arm.go new file mode 100644 index 00000000000..d9975374e3c --- /dev/null +++ b/Godeps/_workspace/src/github.com/kr/pty/ztypes_freebsd_arm.go @@ -0,0 +1,13 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs types_freebsd.go + +package pty + +const ( + _C_SPECNAMELEN = 0x3f +) + +type fiodgnameArg struct { + Len int32 + Buf *byte +} diff --git a/Godeps/_workspace/src/github.com/kr/pty/ztypes_ppc64.go b/Godeps/_workspace/src/github.com/kr/pty/ztypes_ppc64.go new file mode 100644 index 00000000000..4e1af84312b --- /dev/null +++ b/Godeps/_workspace/src/github.com/kr/pty/ztypes_ppc64.go @@ -0,0 +1,11 @@ +// +build ppc64 + +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs types.go + +package pty + +type ( + _C_int int32 + _C_uint uint32 +) diff --git a/Godeps/_workspace/src/github.com/kr/pty/ztypes_ppc64le.go b/Godeps/_workspace/src/github.com/kr/pty/ztypes_ppc64le.go new file mode 100644 index 00000000000..e6780f4e237 --- /dev/null +++ b/Godeps/_workspace/src/github.com/kr/pty/ztypes_ppc64le.go @@ -0,0 +1,11 @@ +// +build ppc64le + +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs types.go + +package pty + +type ( + _C_int int32 + _C_uint uint32 +) diff --git a/Godeps/_workspace/src/github.com/kr/pty/ztypes_s390x.go b/Godeps/_workspace/src/github.com/kr/pty/ztypes_s390x.go new file mode 100644 index 00000000000..a7452b61cb3 --- /dev/null +++ b/Godeps/_workspace/src/github.com/kr/pty/ztypes_s390x.go @@ -0,0 +1,11 @@ +// +build s390x + +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs types.go + +package pty + +type ( + _C_int int32 + _C_uint uint32 +) From 5bd0e9ab05c99c76cc07b84c4134c225994cdcc2 Mon Sep 17 00:00:00 2001 From: Andy Goldstein Date: Thu, 8 Jan 2015 15:41:38 -0500 Subject: [PATCH 3/3] Add streaming command execution & port forwarding Add streaming command execution & port forwarding via HTTP connection upgrades (currently using SPDY). --- cmd/gendocs/gen_kubectl_docs.go | 3 +- cmd/genman/gen_kubectl_man.go | 3 +- cmd/kubectl/kubectl.go | 2 +- .../command_execution_port_forwarding.md | 144 +++++ docs/kubectl-exec.md | 60 ++ docs/kubectl-port-forward.md | 64 +++ docs/kubectl.md | 2 + docs/man/man1/kubectl-exec.1 | 52 ++ docs/man/man1/kubectl-port-forward.1 | 52 ++ docs/man/man1/kubectl.1 | 2 +- pkg/api/types.go | 29 + pkg/apiserver/handlers.go | 1 + pkg/apiserver/proxy.go | 69 ++- pkg/apiserver/proxy_test.go | 43 ++ pkg/client/portforward/doc.go | 19 + pkg/client/portforward/portforward.go | 300 ++++++++++ pkg/client/portforward/portforward_test.go | 321 +++++++++++ pkg/client/remotecommand/doc.go | 20 + pkg/client/remotecommand/remotecommand.go | 186 ++++++ .../remotecommand/remotecommand_test.go | 288 ++++++++++ pkg/client/request.go | 47 +- pkg/client/request_test.go | 128 ++++- pkg/conversion/deep_equal.go | 2 +- pkg/httplog/log.go | 13 +- pkg/kubectl/cmd/cmd.go | 5 +- pkg/kubectl/cmd/exec.go | 133 +++++ pkg/kubectl/cmd/portforward.go | 104 ++++ pkg/kubelet/dockertools/docker.go | 123 ++++ pkg/kubelet/dockertools/pty_linux.go | 30 + pkg/kubelet/dockertools/pty_unsupported.go | 28 + pkg/kubelet/kubelet.go | 90 ++- pkg/kubelet/kubelet_test.go | 280 ++++++++- pkg/kubelet/server.go | 269 ++++++++- pkg/kubelet/server/server.go | 171 +++--- pkg/kubelet/server_test.go | 539 +++++++++++++++++- pkg/util/httpstream/doc.go | 19 + pkg/util/httpstream/httpstream.go | 80 +++ pkg/util/httpstream/spdy/connection.go | 139 +++++ pkg/util/httpstream/spdy/roundtripper.go | 130 +++++ pkg/util/httpstream/spdy/roundtripper_test.go | 226 ++++++++ pkg/util/httpstream/spdy/upgrade.go | 78 +++ pkg/util/httpstream/spdy/upgrade_test.go | 93 +++ pkg/util/net.go | 22 + test/e2e/pods.go | 174 ++++++ test/e2e/util.go | 13 +- 45 files changed, 4439 insertions(+), 157 deletions(-) create mode 100644 docs/design/command_execution_port_forwarding.md create mode 100644 docs/kubectl-exec.md create mode 100644 docs/kubectl-port-forward.md create mode 100644 docs/man/man1/kubectl-exec.1 create mode 100644 docs/man/man1/kubectl-port-forward.1 create mode 100644 pkg/client/portforward/doc.go create mode 100644 pkg/client/portforward/portforward.go create mode 100644 pkg/client/portforward/portforward_test.go create mode 100644 pkg/client/remotecommand/doc.go create mode 100644 pkg/client/remotecommand/remotecommand.go create mode 100644 pkg/client/remotecommand/remotecommand_test.go create mode 100644 pkg/kubectl/cmd/exec.go create mode 100644 pkg/kubectl/cmd/portforward.go create mode 100644 pkg/kubelet/dockertools/pty_linux.go create mode 100644 pkg/kubelet/dockertools/pty_unsupported.go create mode 100644 pkg/util/httpstream/doc.go create mode 100644 pkg/util/httpstream/httpstream.go create mode 100644 pkg/util/httpstream/spdy/connection.go create mode 100644 pkg/util/httpstream/spdy/roundtripper.go create mode 100644 pkg/util/httpstream/spdy/roundtripper_test.go create mode 100644 pkg/util/httpstream/spdy/upgrade.go create mode 100644 pkg/util/httpstream/spdy/upgrade_test.go diff --git a/cmd/gendocs/gen_kubectl_docs.go b/cmd/gendocs/gen_kubectl_docs.go index 61a8cfca3be..4d71951f6b2 100644 --- a/cmd/gendocs/gen_kubectl_docs.go +++ b/cmd/gendocs/gen_kubectl_docs.go @@ -164,7 +164,8 @@ func main() { // Set environment variables used by kubectl so the output is consistent, // regardless of where we run. os.Setenv("HOME", "/home/username") - kubectl := cmd.NewFactory(nil).NewKubectlCommand(ioutil.Discard) + //TODO os.Stdin should really be something like ioutil.Discard, but a Reader + kubectl := cmd.NewFactory(nil).NewKubectlCommand(os.Stdin, ioutil.Discard, ioutil.Discard) genMarkdown(kubectl, "", docsDir) for _, c := range kubectl.Commands() { genMarkdown(c, "kubectl", docsDir) diff --git a/cmd/genman/gen_kubectl_man.go b/cmd/genman/gen_kubectl_man.go index 53391429edb..68878aba117 100644 --- a/cmd/genman/gen_kubectl_man.go +++ b/cmd/genman/gen_kubectl_man.go @@ -62,7 +62,8 @@ func main() { // Set environment variables used by kubectl so the output is consistent, // regardless of where we run. os.Setenv("HOME", "/home/username") - kubectl := cmd.NewFactory(nil).NewKubectlCommand(ioutil.Discard) + //TODO os.Stdin should really be something like ioutil.Discard, but a Reader + kubectl := cmd.NewFactory(nil).NewKubectlCommand(os.Stdin, ioutil.Discard, ioutil.Discard) genMarkdown(kubectl, "", docsDir) for _, c := range kubectl.Commands() { genMarkdown(c, "kubectl", docsDir) diff --git a/cmd/kubectl/kubectl.go b/cmd/kubectl/kubectl.go index 5e1e13c6e57..2b0a920e322 100644 --- a/cmd/kubectl/kubectl.go +++ b/cmd/kubectl/kubectl.go @@ -25,7 +25,7 @@ import ( func main() { runtime.GOMAXPROCS(runtime.NumCPU()) - cmd := cmd.NewFactory(nil).NewKubectlCommand(os.Stdout) + cmd := cmd.NewFactory(nil).NewKubectlCommand(os.Stdin, os.Stdout, os.Stderr) if err := cmd.Execute(); err != nil { os.Exit(1) } diff --git a/docs/design/command_execution_port_forwarding.md b/docs/design/command_execution_port_forwarding.md new file mode 100644 index 00000000000..3b9aeec7569 --- /dev/null +++ b/docs/design/command_execution_port_forwarding.md @@ -0,0 +1,144 @@ +# Container Command Execution & Port Forwarding in Kubernetes + +## Abstract + +This describes an approach for providing support for: + +- executing commands in containers, with stdin/stdout/stderr streams attached +- port forwarding to containers + +## Background + +There are several related issues/PRs: + +- [Support attach](https://github.com/GoogleCloudPlatform/kubernetes/issues/1521) +- [Real container ssh](https://github.com/GoogleCloudPlatform/kubernetes/issues/1513) +- [Provide easy debug network access to services](https://github.com/GoogleCloudPlatform/kubernetes/issues/1863) +- [OpenShift container command execution proposal](https://github.com/openshift/origin/pull/576) + +## Motivation + +Users and administrators are accustomed to being able to access their systems +via SSH to run remote commands, get shell access, and do port forwarding. + +Supporting SSH to containers in Kubernetes is a difficult task. You must +specify a "user" and a hostname to make an SSH connection, and `sshd` requires +real users (resolvable by NSS and PAM). Because a container belongs to a pod, +and the pod belongs to a namespace, you need to specify namespace/pod/container +to uniquely identify the target container. Unfortunately, a +namespace/pod/container is not a real user as far as SSH is concerned. Also, +most Linux systems limit user names to 32 characters, which is unlikely to be +large enough to contain namespace/pod/container. We could devise some scheme to +map each namespace/pod/container to a 32-character user name, adding entries to +`/etc/passwd` (or LDAP, etc.) and keeping those entries fully in sync all the +time. Alternatively, we could write custom NSS and PAM modules that allow the +host to resolve a namespace/pod/container to a user without needing to keep +files or LDAP in sync. + +As an alternative to SSH, we are using a multiplexed streaming protocol that +runs on top of HTTP. There are no requirements about users being real users, +nor is there any limitation on user name length, as the protocol is under our +control. The only downside is that standard tooling that expects to use SSH +won't be able to work with this mechanism, unless adapters can be written. + +## Constraints and Assumptions + +- SSH support is not currently in scope +- CGroup confinement is ultimately desired, but implementing that support is not currently in scope +- SELinux confinement is ultimately desired, but implementing that support is not currently in scope + +## Use Cases + +- As a user of a Kubernetes cluster, I want to run arbitrary commands in a container, attaching my local stdin/stdout/stderr to the container +- As a user of a Kubernetes cluster, I want to be able to connect to local ports on my computer and have them forwarded to ports in the container + +## Process Flow + +### Remote Command Execution Flow +1. The client connects to the Kubernetes Master to initiate a remote command execution +request +2. The Master proxies the request to the Kubelet where the container lives +3. The Kubelet executes nsenter + the requested command and streams stdin/stdout/stderr back and forth between the client and the container + +### Port Forwarding Flow +1. The client connects to the Kubernetes Master to initiate a remote command execution +request +2. The Master proxies the request to the Kubelet where the container lives +3. The client listens on each specified local port, awaiting local connections +4. The client connects to one of the local listening ports +4. The client notifies the Kubelet of the new connection +5. The Kubelet executes nsenter + socat and streams data back and forth between the client and the port in the container + + +## Design Considerations + +### Streaming Protocol + +The current multiplexed streaming protocol used is SPDY. This is not the +long-term desire, however. As soon as there is viable support for HTTP/2 in Go, +we will switch to that. + +### Master as First Level Proxy + +Clients should not be allowed to communicate directly with the Kubelet for +security reasons. Therefore, the Master is currently the only suggested entry +point to be used for remote command execution and port forwarding. This is not +necessarily desirable, as it means that all remote command execution and port +forwarding traffic must travel through the Master, potentially impacting other +API requests. + +In the future, it might make more sense to retrieve an authorization token from +the Master, and then use that token to initiate a remote command execution or +port forwarding request with a load balanced proxy service dedicated to this +functionality. This would keep the streaming traffic out of the Master. + +### Kubelet as Backend Proxy + +The kubelet is currently responsible for handling remote command execution and +port forwarding requests. Just like with the Master described above, this means +that all remote command execution and port forwarding streaming traffic must +travel through the Kubelet, which could result in a degraded ability to service +other requests. + +In the future, it might make more sense to use a separate service on the node. + +Alternatively, we could possibly inject a process into the container that only +listens for a single request, expose that process's listening port on the node, +and then issue a redirect to the client such that it would connect to the first +level proxy, which would then proxy directly to the injected process's exposed +port. This would minimize the amount of proxying that takes place. + +### Scalability + +There are at least 2 different ways to execute a command in a container: +`docker exec` and `nsenter`. While `docker exec` might seem like an easier and +more obvious choice, it has some drawbacks. + +#### `docker exec` + +We could expose `docker exec` (i.e. have Docker listen on an exposed TCP port +on the node), but this would require proxying from the edge and securing the +Docker API. `docker exec` calls go through the Docker daemon, meaning that all +stdin/stdout/stderr traffic is proxied through the Daemon, adding an extra hop. +Additionally, you can't isolate 1 malicious `docker exec` call from normal +usage, meaning an attacker could initiate a denial of service or other attack +and take down the Docker daemon, or the node itself. + +We expect remote command execution and port forwarding requests to be long +running and/or high bandwidth operations, and routing all the streaming data +through the Docker daemon feels like a bottleneck we can avoid. + +#### `nsenter` + +The implementation currently uses `nsenter` to run commands in containers, +joining the appropriate container namespaces. `nsenter` runs directly on the +node and is not proxied through any single daemon process. + +### Security + +Authentication and authorization hasn't specifically been tested yet with this +functionality. We need to make sure that users are not allowed to execute +remote commands or do port forwarding to containers they aren't allowed to +access. + +Additional work is required to ensure that multiple command execution or port forwarding connections from different clients are not able to see each other's data. This can most likely be achieved via SELinux labeling and unique process contexts. \ No newline at end of file diff --git a/docs/kubectl-exec.md b/docs/kubectl-exec.md new file mode 100644 index 00000000000..c5366b7eb08 --- /dev/null +++ b/docs/kubectl-exec.md @@ -0,0 +1,60 @@ +## kubectl exec + +Execute a command in a container. + +### Synopsis + +Execute a command in a container. +Examples: + $ kubectl exec -p 123456-7890 -c ruby-container date + + + $ kubectl exec -p 123456-7890 -c ruby-container -i -t -- bash -il + -c -- [] + +### Options + +``` + -c, --container="": Container name + -p, --pod="": Pod name + -i, --stdin=false: Pass stdin to the container + -t, --tty=false: Stdin is a TTY +``` + +### Options inherrited from parent commands + +``` + --alsologtostderr=false: log to standard error as well as files + --api-version="": The API version to use when talking to the server + -a, --auth-path="": Path to the auth info file. If missing, prompt the user. Only used if using https. + --certificate-authority="": Path to a cert. file for the certificate authority. + --client-certificate="": Path to a client key file for TLS. + --client-key="": Path to a client key file for TLS. + --cluster="": The name of the kubeconfig cluster to use + --context="": The name of the kubeconfig context to use + -h, --help=false: help for kubectl + --insecure-skip-tls-verify=false: If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure. + --kubeconfig="": Path to the kubeconfig file to use for CLI requests. + --log_backtrace_at=:0: when logging hits line file:N, emit a stack trace + --log_dir=: If non-empty, write log files in this directory + --log_flush_frequency=5s: Maximum number of seconds between log flushes + --logtostderr=true: log to standard error instead of files + --match-server-version=false: Require server version to match client version + --namespace="": If present, the namespace scope for this CLI request. + --password="": Password for basic authentication to the API server. + -s, --server="": The address and port of the Kubernetes API server + --stderrthreshold=2: logs at or above this threshold go to stderr + --token="": Bearer token for authentication to the API server. + --user="": The name of the kubeconfig user to use + --username="": Username for basic authentication to the API server. + --v=0: log level for V logs + --validate=false: If true, use a schema to validate the input before sending it + --vmodule=: comma-separated list of pattern=N settings for file-filtered logging +``` + +### SEE ALSO +* [kubectl](kubectl.md) + diff --git a/docs/kubectl-port-forward.md b/docs/kubectl-port-forward.md new file mode 100644 index 00000000000..e01f4679805 --- /dev/null +++ b/docs/kubectl-port-forward.md @@ -0,0 +1,64 @@ +## kubectl port-forward + +Forward 1 or more local ports to a pod. + +### Synopsis + +Forward 1 or more local ports to a pod. +Examples: + $ kubectl port-forward -p mypod 5000 6000 + + + $ kubectl port-forward -p mypod 8888:5000 + + + $ kubectl port-forward -p mypod :5000 + + + $ kubectl port-forward -p mypod 0:5000 + + + +kubectl port-forward -p [:] [...] + +### Options + +``` + -p, --pod="": Pod name +``` + +### Options inherrited from parent commands + +``` + --alsologtostderr=false: log to standard error as well as files + --api-version="": The API version to use when talking to the server + -a, --auth-path="": Path to the auth info file. If missing, prompt the user. Only used if using https. + --certificate-authority="": Path to a cert. file for the certificate authority. + --client-certificate="": Path to a client key file for TLS. + --client-key="": Path to a client key file for TLS. + --cluster="": The name of the kubeconfig cluster to use + --context="": The name of the kubeconfig context to use + -h, --help=false: help for kubectl + --insecure-skip-tls-verify=false: If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure. + --kubeconfig="": Path to the kubeconfig file to use for CLI requests. + --log_backtrace_at=:0: when logging hits line file:N, emit a stack trace + --log_dir=: If non-empty, write log files in this directory + --log_flush_frequency=5s: Maximum number of seconds between log flushes + --logtostderr=true: log to standard error instead of files + --match-server-version=false: Require server version to match client version + --namespace="": If present, the namespace scope for this CLI request. + --password="": Password for basic authentication to the API server. + -s, --server="": The address and port of the Kubernetes API server + --stderrthreshold=2: logs at or above this threshold go to stderr + --token="": Bearer token for authentication to the API server. + --user="": The name of the kubeconfig user to use + --username="": Username for basic authentication to the API server. + --v=0: log level for V logs + --validate=false: If true, use a schema to validate the input before sending it + --vmodule=: comma-separated list of pattern=N settings for file-filtered logging +``` + +### SEE ALSO +* [kubectl](kubectl.md) + diff --git a/docs/kubectl.md b/docs/kubectl.md index d8c873d9283..087951b2965 100644 --- a/docs/kubectl.md +++ b/docs/kubectl.md @@ -54,6 +54,8 @@ kubectl * [kubectl-log](kubectl-log.md) * [kubectl-rollingupdate](kubectl-rollingupdate.md) * [kubectl-resize](kubectl-resize.md) +* [kubectl-exec](kubectl-exec.md) +* [kubectl-port-forward](kubectl-port-forward.md) * [kubectl-run-container](kubectl-run-container.md) * [kubectl-stop](kubectl-stop.md) * [kubectl-expose](kubectl-expose.md) diff --git a/docs/man/man1/kubectl-exec.1 b/docs/man/man1/kubectl-exec.1 new file mode 100644 index 00000000000..6266ef68ddc --- /dev/null +++ b/docs/man/man1/kubectl-exec.1 @@ -0,0 +1,52 @@ +.TH "KUBERNETES" "1" " kubernetes User Manuals" "Eric Paris" "Jan 2015" "" + + +.SH NAME +.PP +kubectl exec \- Execute a command in a container. + + +.SH SYNOPSIS +.PP +\fBkubectl exec\fP [OPTIONS] + + +.SH DESCRIPTION +.PP +Execute a command in a container. +Examples: + $ kubectl exec \-p 123456\-7890 \-c ruby\-container date + + +.PP +$ kubectl exec \-p 123456\-7890 \-c ruby\-container \-i \-t \-\- bash \-il + + +.PP +.RS + +.nf +$ kubectl port\-forward \-p mypod 8888:5000 + + +$ kubectl port\-forward \-p mypod :5000 + + +$ kubectl port\-forward \-p mypod 0:5000 + + +.fi +.RE + + +.SH OPTIONS +.PP +\fB\-p\fP, \fB\-\-pod\fP="" + Pod name + + +.SH SEE ALSO +.PP +\fBkubectl(1)\fP, + + +.SH HISTORY +.PP +January 2015, Originally compiled by Eric Paris (eparis at redhat dot com) based on the kubernetes source material, but hopefully they have been automatically generated since! diff --git a/docs/man/man1/kubectl.1 b/docs/man/man1/kubectl.1 index 505afdf5cd5..3d9065e55ec 100644 --- a/docs/man/man1/kubectl.1 +++ b/docs/man/man1/kubectl.1 @@ -128,7 +128,7 @@ Find more information at .SH SEE ALSO .PP -\fBkubectl\-version(1)\fP, \fBkubectl\-proxy(1)\fP, \fBkubectl\-get(1)\fP, \fBkubectl\-describe(1)\fP, \fBkubectl\-create(1)\fP, \fBkubectl\-update(1)\fP, \fBkubectl\-delete(1)\fP, \fBkubectl\-config(1)\fP, \fBkubectl\-namespace(1)\fP, \fBkubectl\-log(1)\fP, \fBkubectl\-rollingupdate(1)\fP, \fBkubectl\-resize(1)\fP, \fBkubectl\-run\-container(1)\fP, \fBkubectl\-stop(1)\fP, \fBkubectl\-expose(1)\fP, \fBkubectl\-label(1)\fP, +\fBkubectl\-version(1)\fP, \fBkubectl\-proxy(1)\fP, \fBkubectl\-get(1)\fP, \fBkubectl\-describe(1)\fP, \fBkubectl\-create(1)\fP, \fBkubectl\-update(1)\fP, \fBkubectl\-delete(1)\fP, \fBkubectl\-config(1)\fP, \fBkubectl\-namespace(1)\fP, \fBkubectl\-log(1)\fP, \fBkubectl\-rollingupdate(1)\fP, \fBkubectl\-resize(1)\fP, \fBkubectl\-exec(1)\fP, \fBkubectl\-port\-forward(1)\fP, \fBkubectl\-run\-container(1)\fP, \fBkubectl\-stop(1)\fP, \fBkubectl\-expose(1)\fP, \fBkubectl\-label(1)\fP, .SH HISTORY diff --git a/pkg/api/types.go b/pkg/api/types.go index 162346c38ff..e103110f12d 100644 --- a/pkg/api/types.go +++ b/pkg/api/types.go @@ -1349,3 +1349,32 @@ type SecretList struct { Items []Secret `json:"items"` } + +// These constants are for remote command execution and port forwarding and are +// used by both the client side and server side components. +// +// This is probably not the ideal place for them, but it didn't seem worth it +// to create pkg/exec and pkg/portforward just to contain a single file with +// constants in it. Suggestions for more appropriate alternatives are +// definitely welcome! +const ( + // Enable stdin for remote command execution + ExecStdinParam = "input" + // Enable stdout for remote command execution + ExecStdoutParam = "output" + // Enable stderr for remote command execution + ExecStderrParam = "error" + // Enable TTY for remote command execution + ExecTTYParam = "tty" + // Command to run for remote command execution + ExecCommandParamm = "command" + + StreamType = "streamType" + StreamTypeStdin = "stdin" + StreamTypeStdout = "stdout" + StreamTypeStderr = "stderr" + StreamTypeData = "data" + StreamTypeError = "error" + + PortHeader = "port" +) diff --git a/pkg/apiserver/handlers.go b/pkg/apiserver/handlers.go index 53844124147..04d05b2d714 100644 --- a/pkg/apiserver/handlers.go +++ b/pkg/apiserver/handlers.go @@ -99,6 +99,7 @@ func RecoverPanics(handler http.Handler) http.Handler { http.StatusConflict, http.StatusNotFound, errors.StatusUnprocessableEntity, + http.StatusSwitchingProtocols, ), ).Log() diff --git a/pkg/apiserver/proxy.go b/pkg/apiserver/proxy.go index dda1ff8526d..a2e0dc88a8e 100644 --- a/pkg/apiserver/proxy.go +++ b/pkg/apiserver/proxy.go @@ -22,6 +22,7 @@ import ( "fmt" "io" "io/ioutil" + "net" "net/http" "net/http/httputil" "net/url" @@ -34,6 +35,7 @@ import ( "github.com/GoogleCloudPlatform/kubernetes/pkg/httplog" "github.com/GoogleCloudPlatform/kubernetes/pkg/runtime" "github.com/GoogleCloudPlatform/kubernetes/pkg/util" + "github.com/GoogleCloudPlatform/kubernetes/pkg/util/httpstream" "github.com/golang/glog" "golang.org/x/net/html" @@ -176,14 +178,67 @@ func (r *ProxyHandler) ServeHTTP(w http.ResponseWriter, req *http.Request) { httpCode = http.StatusOK newReq.Header = req.Header - proxy := httputil.NewSingleHostReverseProxy(&url.URL{Scheme: "http", Host: destURL.Host}) - proxy.Transport = &proxyTransport{ - proxyScheme: req.URL.Scheme, - proxyHost: req.URL.Host, - proxyPathPrepend: path.Join(r.prefix, "ns", namespace, resource, id), + // TODO convert this entire proxy to an UpgradeAwareProxy similar to + // https://github.com/openshift/origin/blob/master/pkg/util/httpproxy/upgradeawareproxy.go. + // That proxy needs to be modified to support multiple backends, not just 1. + connectionHeader := strings.ToLower(req.Header.Get(httpstream.HeaderConnection)) + if strings.Contains(connectionHeader, strings.ToLower(httpstream.HeaderUpgrade)) && len(req.Header.Get(httpstream.HeaderUpgrade)) > 0 { + //TODO support TLS? Doesn't look like proxyTransport does anything special ... + dialAddr := util.CanonicalAddr(destURL) + backendConn, err := net.Dial("tcp", dialAddr) + if err != nil { + status := errToAPIStatus(err) + writeJSON(status.Code, r.codec, status, w) + return + } + defer backendConn.Close() + + // TODO should we use _ (a bufio.ReadWriter) instead of requestHijackedConn + // when copying between the client and the backend? Docker doesn't when they + // hijack, just for reference... + requestHijackedConn, _, err := w.(http.Hijacker).Hijack() + if err != nil { + status := errToAPIStatus(err) + writeJSON(status.Code, r.codec, status, w) + return + } + defer requestHijackedConn.Close() + + if err = newReq.Write(backendConn); err != nil { + status := errToAPIStatus(err) + writeJSON(status.Code, r.codec, status, w) + return + } + + done := make(chan struct{}, 2) + + go func() { + _, err := io.Copy(backendConn, requestHijackedConn) + if err != nil && !strings.Contains(err.Error(), "use of closed network connection") { + glog.Errorf("Error proxying data from client to backend: %v", err) + } + done <- struct{}{} + }() + + go func() { + _, err := io.Copy(requestHijackedConn, backendConn) + if err != nil && !strings.Contains(err.Error(), "use of closed network connection") { + glog.Errorf("Error proxying data from backend to client: %v", err) + } + done <- struct{}{} + }() + + <-done + } else { + proxy := httputil.NewSingleHostReverseProxy(&url.URL{Scheme: "http", Host: destURL.Host}) + proxy.Transport = &proxyTransport{ + proxyScheme: req.URL.Scheme, + proxyHost: req.URL.Host, + proxyPathPrepend: path.Join(r.prefix, "ns", namespace, resource, id), + } + proxy.FlushInterval = 200 * time.Millisecond + proxy.ServeHTTP(w, newReq) } - proxy.FlushInterval = 200 * time.Millisecond - proxy.ServeHTTP(w, newReq) } type proxyTransport struct { diff --git a/pkg/apiserver/proxy_test.go b/pkg/apiserver/proxy_test.go index cf412aa9500..90b56ec9b33 100644 --- a/pkg/apiserver/proxy_test.go +++ b/pkg/apiserver/proxy_test.go @@ -29,6 +29,7 @@ import ( "testing" "golang.org/x/net/html" + "golang.org/x/net/websocket" ) func parseURLOrDie(inURL string) *url.URL { @@ -327,3 +328,45 @@ func TestProxy(t *testing.T) { } } } + +func TestProxyUpgrade(t *testing.T) { + backendServer := httptest.NewServer(websocket.Handler(func(ws *websocket.Conn) { + defer ws.Close() + body := make([]byte, 5) + ws.Read(body) + ws.Write([]byte("hello " + string(body))) + })) + defer backendServer.Close() + + simpleStorage := &SimpleRESTStorage{ + errors: map[string]error{}, + resourceLocation: backendServer.URL, + expectedResourceNamespace: "myns", + } + + namespaceHandler := Handle(map[string]RESTStorage{ + "foo": simpleStorage, + }, codec, "/prefix", "version", selfLinker, admissionControl, requestContextMapper, namespaceMapper) + + server := httptest.NewServer(namespaceHandler) + defer server.Close() + + ws, err := websocket.Dial("ws://"+server.Listener.Addr().String()+"/prefix/version/proxy/namespaces/myns/foo/123", "", "http://127.0.0.1/") + if err != nil { + t.Fatalf("websocket dial err: %s", err) + } + defer ws.Close() + + if _, err := ws.Write([]byte("world")); err != nil { + t.Fatalf("write err: %s", err) + } + + response := make([]byte, 20) + n, err := ws.Read(response) + if err != nil { + t.Fatalf("read err: %s", err) + } + if e, a := "hello world", string(response[0:n]); e != a { + t.Fatalf("expected '%#v', got '%#v'", e, a) + } +} diff --git a/pkg/client/portforward/doc.go b/pkg/client/portforward/doc.go new file mode 100644 index 00000000000..f4cd9153d8e --- /dev/null +++ b/pkg/client/portforward/doc.go @@ -0,0 +1,19 @@ +/* +Copyright 2015 Google Inc. All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package portforward adds support for SSH-like port forwarding from the client's +// local host to remote containers. +package portforward diff --git a/pkg/client/portforward/portforward.go b/pkg/client/portforward/portforward.go new file mode 100644 index 00000000000..6cf92c54173 --- /dev/null +++ b/pkg/client/portforward/portforward.go @@ -0,0 +1,300 @@ +/* +Copyright 2015 Google Inc. All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package portforward + +import ( + "errors" + "fmt" + "io" + "io/ioutil" + "net" + "net/http" + "strconv" + "strings" + + "github.com/GoogleCloudPlatform/kubernetes/pkg/api" + "github.com/GoogleCloudPlatform/kubernetes/pkg/client" + "github.com/GoogleCloudPlatform/kubernetes/pkg/util/httpstream" + "github.com/GoogleCloudPlatform/kubernetes/pkg/util/httpstream/spdy" + "github.com/golang/glog" +) + +type upgrader interface { + upgrade(*client.Request, *client.Config) (httpstream.Connection, error) +} + +type defaultUpgrader struct{} + +func (u *defaultUpgrader) upgrade(req *client.Request, config *client.Config) (httpstream.Connection, error) { + return req.Upgrade(config, spdy.NewRoundTripper) +} + +// PortForwarder knows how to listen for local connections and forward them to +// a remote pod via an upgraded HTTP request. +type PortForwarder struct { + req *client.Request + config *client.Config + ports []ForwardedPort + stopChan <-chan struct{} + + streamConn httpstream.Connection + listeners []io.Closer + upgrader upgrader + Ready chan struct{} +} + +// ForwardedPort contains a Local:Remote port pairing. +type ForwardedPort struct { + Local uint16 + Remote uint16 +} + +/* + valid port specifications: + + 5000 + - forwards from localhost:5000 to pod:5000 + + 8888:5000 + - forwards from localhost:8888 to pod:5000 + + 0:5000 + :5000 + - selects a random available local port, + forwards from localhost: to pod:5000 +*/ +func parsePorts(ports []string) ([]ForwardedPort, error) { + var forwards []ForwardedPort + for _, portString := range ports { + parts := strings.Split(portString, ":") + var localString, remoteString string + if len(parts) == 1 { + localString = parts[0] + remoteString = parts[0] + } else if len(parts) == 2 { + localString = parts[0] + if localString == "" { + // support :5000 + localString = "0" + } + remoteString = parts[1] + } else { + return nil, fmt.Errorf("Invalid port format '%s'", portString) + } + + localPort, err := strconv.ParseUint(localString, 10, 16) + if err != nil { + return nil, fmt.Errorf("Error parsing local port '%s': %s", localString, err) + } + + remotePort, err := strconv.ParseUint(remoteString, 10, 16) + if err != nil { + return nil, fmt.Errorf("Error parsing remote port '%s': %s", remoteString, err) + } + if remotePort == 0 { + return nil, fmt.Errorf("Remote port must be > 0") + } + + forwards = append(forwards, ForwardedPort{uint16(localPort), uint16(remotePort)}) + } + + return forwards, nil +} + +// New creates a new PortForwarder. +func New(req *client.Request, config *client.Config, ports []string, stopChan <-chan struct{}) (*PortForwarder, error) { + if len(ports) == 0 { + return nil, errors.New("You must specify at least 1 port") + } + parsedPorts, err := parsePorts(ports) + if err != nil { + return nil, err + } + + return &PortForwarder{ + req: req, + config: config, + ports: parsedPorts, + stopChan: stopChan, + Ready: make(chan struct{}), + }, nil +} + +// ForwardPorts formats and executes a port forwarding request. The connection will remain +// open until stopChan is closed. +func (pf *PortForwarder) ForwardPorts() error { + defer pf.Close() + + if pf.upgrader == nil { + pf.upgrader = &defaultUpgrader{} + } + var err error + pf.streamConn, err = pf.upgrader.upgrade(pf.req, pf.config) + if err != nil { + return fmt.Errorf("Error upgrading connection: %s", err) + } + defer pf.streamConn.Close() + + return pf.forward() +} + +// forward dials the remote host specific in req, upgrades the request, starts +// listeners for each port specified in ports, and forwards local connections +// to the remote host via streams. +func (pf *PortForwarder) forward() error { + var err error + + listenSuccess := false + for _, port := range pf.ports { + err = pf.listenOnPort(&port) + if err != nil { + glog.Warningf("Unable to listen on port %d: %v", port, err) + } + listenSuccess = true + } + + if !listenSuccess { + return fmt.Errorf("Unable to listen on any of the requested ports: %v", pf.ports) + } + + close(pf.Ready) + + // wait for interrupt or conn closure + select { + case <-pf.stopChan: + case <-pf.streamConn.CloseChan(): + glog.Errorf("Lost connection to pod") + } + + return nil +} + +// listenOnPort creates a new listener on port and waits for new connections +// in the background. +func (pf *PortForwarder) listenOnPort(port *ForwardedPort) error { + listener, err := net.Listen("tcp", fmt.Sprintf("localhost:%d", port.Local)) + if err != nil { + return err + } + parts := strings.Split(listener.Addr().String(), ":") + localPort, err := strconv.ParseUint(parts[1], 10, 16) + if err != nil { + return fmt.Errorf("Error parsing local part: %s", err) + } + port.Local = uint16(localPort) + glog.Infof("Forwarding from %d -> %d", localPort, port.Remote) + + pf.listeners = append(pf.listeners, listener) + + go pf.waitForConnection(listener, *port) + + return nil +} + +// waitForConnection waits for new connections to listener and handles them in +// the background. +func (pf *PortForwarder) waitForConnection(listener net.Listener, port ForwardedPort) { + for { + conn, err := listener.Accept() + if err != nil { + // TODO consider using something like https://github.com/hydrogen18/stoppableListener? + if !strings.Contains(strings.ToLower(err.Error()), "use of closed network connection") { + glog.Errorf("Error accepting connection on port %d: %v", port.Local, err) + } + return + } + go pf.handleConnection(conn, port) + } +} + +// handleConnection copies data between the local connection and the stream to +// the remote server. +func (pf *PortForwarder) handleConnection(conn net.Conn, port ForwardedPort) { + defer conn.Close() + + glog.Infof("Handling connection for %d", port.Local) + + errorChan := make(chan error) + doneChan := make(chan struct{}, 2) + + // create error stream + headers := http.Header{} + headers.Set(api.StreamType, api.StreamTypeError) + headers.Set(api.PortHeader, fmt.Sprintf("%d", port.Remote)) + errorStream, err := pf.streamConn.CreateStream(headers) + if err != nil { + glog.Errorf("Error creating error stream for port %d -> %d: %v", port.Local, port.Remote, err) + return + } + defer errorStream.Reset() + go func() { + message, err := ioutil.ReadAll(errorStream) + if err != nil && err != io.EOF { + errorChan <- fmt.Errorf("Error reading from error stream for port %d -> %d: %v", port.Local, port.Remote, err) + } + if len(message) > 0 { + errorChan <- fmt.Errorf("An error occurred forwarding %d -> %d: %v", port.Local, port.Remote, string(message)) + } + }() + + // create data stream + headers.Set(api.StreamType, api.StreamTypeData) + dataStream, err := pf.streamConn.CreateStream(headers) + if err != nil { + glog.Errorf("Error creating forwarding stream for port %d -> %d: %v", port.Local, port.Remote, err) + return + } + // Send a Reset when this function exits to completely tear down the stream here + // and in the remote server. + defer dataStream.Reset() + + go func() { + // Copy from the remote side to the local port. We won't get an EOF from + // the server as it has no way of knowing when to close the stream. We'll + // take care of closing both ends of the stream with the call to + // stream.Reset() when this function exits. + if _, err := io.Copy(conn, dataStream); err != nil && err != io.EOF && !strings.Contains(err.Error(), "use of closed network connection") { + glog.Errorf("Error copying from remote stream to local connection: %v", err) + } + doneChan <- struct{}{} + }() + + go func() { + // Copy from the local port to the remote side. Here we will be able to know + // when the Copy gets an EOF from conn, as that will happen as soon as conn is + // closed (i.e. client disconnected). + if _, err := io.Copy(dataStream, conn); err != nil && err != io.EOF && !strings.Contains(err.Error(), "use of closed network connection") { + glog.Errorf("Error copying from local connection to remote stream: %v", err) + } + doneChan <- struct{}{} + }() + + select { + case err := <-errorChan: + glog.Error(err) + case <-doneChan: + } +} + +func (pf *PortForwarder) Close() { + // stop all listeners + for _, l := range pf.listeners { + if err := l.Close(); err != nil { + glog.Errorf("Error closing listener: %v", err) + } + } +} diff --git a/pkg/client/portforward/portforward_test.go b/pkg/client/portforward/portforward_test.go new file mode 100644 index 00000000000..16a29f933ba --- /dev/null +++ b/pkg/client/portforward/portforward_test.go @@ -0,0 +1,321 @@ +/* +Copyright 2015 Google Inc. All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package portforward + +import ( + "bytes" + "errors" + "fmt" + "io" + "net" + "net/http" + "reflect" + "sync" + "testing" + "time" + + "github.com/GoogleCloudPlatform/kubernetes/pkg/api" + "github.com/GoogleCloudPlatform/kubernetes/pkg/client" + "github.com/GoogleCloudPlatform/kubernetes/pkg/util/httpstream" +) + +func TestParsePortsAndNew(t *testing.T) { + tests := []struct { + input []string + expected []ForwardedPort + expectParseError bool + expectNewError bool + }{ + {input: []string{}, expectNewError: true}, + {input: []string{"a"}, expectParseError: true, expectNewError: true}, + {input: []string{":a"}, expectParseError: true, expectNewError: true}, + {input: []string{"-1"}, expectParseError: true, expectNewError: true}, + {input: []string{"65536"}, expectParseError: true, expectNewError: true}, + {input: []string{"0"}, expectParseError: true, expectNewError: true}, + {input: []string{"0:0"}, expectParseError: true, expectNewError: true}, + {input: []string{"a:5000"}, expectParseError: true, expectNewError: true}, + {input: []string{"5000:a"}, expectParseError: true, expectNewError: true}, + { + input: []string{"5000", "5000:5000", "8888:5000", "5000:8888", ":5000", "0:5000"}, + expected: []ForwardedPort{ + {5000, 5000}, + {5000, 5000}, + {8888, 5000}, + {5000, 8888}, + {0, 5000}, + {0, 5000}, + }, + }, + } + + for i, test := range tests { + parsed, err := parsePorts(test.input) + haveError := err != nil + if e, a := test.expectParseError, haveError; e != a { + t.Fatalf("%d: parsePorts: error expected=%t, got %t: %s", i, e, a, err) + } + + expectedRequest := &client.Request{} + expectedConfig := &client.Config{} + expectedStopChan := make(chan struct{}) + pf, err := New(expectedRequest, expectedConfig, test.input, expectedStopChan) + haveError = err != nil + if e, a := test.expectNewError, haveError; e != a { + t.Fatalf("%d: New: error expected=%t, got %t: %s", i, e, a, err) + } + + if test.expectParseError || test.expectNewError { + continue + } + + for pi, expectedPort := range test.expected { + if e, a := expectedPort.Local, parsed[pi].Local; e != a { + t.Fatalf("%d: local expected: %d, got: %d", i, e, a) + } + if e, a := expectedPort.Remote, parsed[pi].Remote; e != a { + t.Fatalf("%d: remote expected: %d, got: %d", i, e, a) + } + } + + if e, a := expectedRequest, pf.req; e != a { + t.Fatalf("%d: req: expected %#v, got %#v", i, e, a) + } + if e, a := expectedConfig, pf.config; e != a { + t.Fatalf("%d: config: expected %#v, got %#v", i, e, a) + } + if e, a := test.expected, pf.ports; !reflect.DeepEqual(e, a) { + t.Fatalf("%d: ports: expected %#v, got %#v", i, e, a) + } + if e, a := expectedStopChan, pf.stopChan; e != a { + t.Fatalf("%d: stopChan: expected %#v, got %#v", i, e, a) + } + if pf.Ready == nil { + t.Fatalf("%d: Ready should be non-nil", i) + } + } +} + +type fakeUpgrader struct { + conn *fakeUpgradeConnection + err error +} + +func (u *fakeUpgrader) upgrade(req *client.Request, config *client.Config) (httpstream.Connection, error) { + return u.conn, u.err +} + +type fakeUpgradeConnection struct { + closeCalled bool + lock sync.Mutex + streams map[string]*fakeUpgradeStream + portData map[string]string +} + +func newFakeUpgradeConnection() *fakeUpgradeConnection { + return &fakeUpgradeConnection{ + streams: make(map[string]*fakeUpgradeStream), + portData: make(map[string]string), + } +} + +func (c *fakeUpgradeConnection) CreateStream(headers http.Header) (httpstream.Stream, error) { + c.lock.Lock() + defer c.lock.Unlock() + + stream := &fakeUpgradeStream{} + c.streams[headers.Get(api.PortHeader)] = stream + stream.data = c.portData[headers.Get(api.PortHeader)] + + return stream, nil +} + +func (c *fakeUpgradeConnection) Close() error { + c.lock.Lock() + defer c.lock.Unlock() + + c.closeCalled = true + return nil +} + +func (c *fakeUpgradeConnection) CloseChan() <-chan bool { + return make(chan bool) +} + +func (c *fakeUpgradeConnection) SetIdleTimeout(timeout time.Duration) { +} + +type fakeUpgradeStream struct { + readCalled bool + writeCalled bool + dataWritten []byte + closeCalled bool + resetCalled bool + data string + lock sync.Mutex +} + +func (s *fakeUpgradeStream) Read(p []byte) (int, error) { + s.lock.Lock() + defer s.lock.Unlock() + s.readCalled = true + b := []byte(s.data) + n := copy(p, b) + return n, io.EOF +} + +func (s *fakeUpgradeStream) Write(p []byte) (int, error) { + s.lock.Lock() + defer s.lock.Unlock() + s.writeCalled = true + s.dataWritten = make([]byte, len(p)) + copy(s.dataWritten, p) + return len(p), io.EOF +} + +func (s *fakeUpgradeStream) Close() error { + s.lock.Lock() + defer s.lock.Unlock() + s.closeCalled = true + return nil +} + +func (s *fakeUpgradeStream) Reset() error { + s.lock.Lock() + defer s.lock.Unlock() + s.resetCalled = true + return nil +} + +func (s *fakeUpgradeStream) Headers() http.Header { + s.lock.Lock() + defer s.lock.Unlock() + return http.Header{} +} + +func TestForwardPorts(t *testing.T) { + testCases := []struct { + Upgrader *fakeUpgrader + Ports []string + Send map[uint16]string + Receive map[uint16]string + Err bool + }{ + { + Upgrader: &fakeUpgrader{err: errors.New("bail")}, + Err: true, + }, + { + Upgrader: &fakeUpgrader{conn: newFakeUpgradeConnection()}, + Ports: []string{"5000"}, + }, + { + Upgrader: &fakeUpgrader{conn: newFakeUpgradeConnection()}, + Ports: []string{"5000", "6000"}, + Send: map[uint16]string{ + 5000: "abcd", + 6000: "ghij", + }, + Receive: map[uint16]string{ + 5000: "1234", + 6000: "5678", + }, + }, + } + + for i, testCase := range testCases { + stopChan := make(chan struct{}, 1) + + pf, err := New(&client.Request{}, &client.Config{}, testCase.Ports, stopChan) + hasErr := err != nil + if hasErr != testCase.Err { + t.Fatalf("%d: New: expected %t, got %t: %v", i, testCase.Err, hasErr, err) + } + if pf == nil { + continue + } + pf.upgrader = testCase.Upgrader + if testCase.Upgrader.err != nil { + err := pf.ForwardPorts() + hasErr := err != nil + if hasErr != testCase.Err { + t.Fatalf("%d: ForwardPorts: expected %t, got %t: %v", i, testCase.Err, hasErr, err) + } + continue + } + + doneChan := make(chan error) + go func() { + doneChan <- pf.ForwardPorts() + }() + select { + case <-pf.Ready: + case <-time.After(500 * time.Millisecond): + t.Fatalf("%d: timed out waiting for listeners", i) + } + + conn := testCase.Upgrader.conn + + for port, data := range testCase.Send { + conn.lock.Lock() + conn.portData[fmt.Sprintf("%d", port)] = testCase.Receive[port] + conn.lock.Unlock() + + clientConn, err := net.Dial("tcp", fmt.Sprintf("localhost:%d", port)) + if err != nil { + t.Fatalf("%d: error dialing %d: %s", i, port, err) + } + defer clientConn.Close() + + n, err := clientConn.Write([]byte(data)) + if err != nil && err != io.EOF { + t.Fatalf("%d: Error sending data '%s': %s", i, data, err) + } + if n == 0 { + t.Fatalf("%d: unexpected write of 0 bytes", i) + } + b := make([]byte, 4) + n, err = clientConn.Read(b) + if err != nil && err != io.EOF { + t.Fatalf("%d: Error reading data: %s", i, err) + } + if !bytes.Equal([]byte(testCase.Receive[port]), b) { + t.Fatalf("%d: expected to read '%s', got '%s'", i, testCase.Receive[port], b) + } + } + + // tell r.ForwardPorts to stop + close(stopChan) + + // wait for r.ForwardPorts to actually return + select { + case err := <-doneChan: + if err != nil { + t.Fatalf("%d: unexpected error: %s", err) + } + case <-time.After(200 * time.Millisecond): + t.Fatalf("%d: timeout waiting for ForwardPorts to finish") + } + + if e, a := len(testCase.Send), len(conn.streams); e != a { + t.Fatalf("%d: expected %d streams to be created, got %d", e, a) + } + + if !conn.closeCalled { + t.Fatalf("%d: expected conn closure", i) + } + } +} diff --git a/pkg/client/remotecommand/doc.go b/pkg/client/remotecommand/doc.go new file mode 100644 index 00000000000..882542ff62a --- /dev/null +++ b/pkg/client/remotecommand/doc.go @@ -0,0 +1,20 @@ +/* +Copyright 2015 Google Inc. All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package remotecommand adds support for executing commands in containers, +// with support for separate stdin, stdout, and stderr streams, as well as +// TTY. +package remotecommand diff --git a/pkg/client/remotecommand/remotecommand.go b/pkg/client/remotecommand/remotecommand.go new file mode 100644 index 00000000000..fa32353ba66 --- /dev/null +++ b/pkg/client/remotecommand/remotecommand.go @@ -0,0 +1,186 @@ +/* +Copyright 2015 Google Inc. All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package remotecommand + +import ( + "fmt" + "io" + "io/ioutil" + "net/http" + + "github.com/GoogleCloudPlatform/kubernetes/pkg/api" + "github.com/GoogleCloudPlatform/kubernetes/pkg/client" + "github.com/GoogleCloudPlatform/kubernetes/pkg/util/httpstream" + "github.com/GoogleCloudPlatform/kubernetes/pkg/util/httpstream/spdy" + "github.com/golang/glog" +) + +type upgrader interface { + upgrade(*client.Request, *client.Config) (httpstream.Connection, error) +} + +type defaultUpgrader struct{} + +func (u *defaultUpgrader) upgrade(req *client.Request, config *client.Config) (httpstream.Connection, error) { + return req.Upgrade(config, spdy.NewRoundTripper) +} + +type RemoteCommandExecutor struct { + req *client.Request + config *client.Config + command []string + stdin io.Reader + stdout io.Writer + stderr io.Writer + tty bool + + upgrader upgrader +} + +func New(req *client.Request, config *client.Config, command []string, stdin io.Reader, stdout, stderr io.Writer, tty bool) *RemoteCommandExecutor { + return &RemoteCommandExecutor{ + req: req, + config: config, + command: command, + stdin: stdin, + stdout: stdout, + stderr: stderr, + tty: tty, + } +} + +// Execute sends a remote command execution request, upgrading the +// connection and creating streams to represent stdin/stdout/stderr. Data is +// copied between these streams and the supplied stdin/stdout/stderr parameters. +func (e *RemoteCommandExecutor) Execute() error { + doStdin := (e.stdin != nil) + doStdout := (e.stdout != nil) + doStderr := (!e.tty && e.stderr != nil) + + if doStdin { + e.req.Param(api.ExecStdinParam, "1") + } + if doStdout { + e.req.Param(api.ExecStdoutParam, "1") + } + if doStderr { + e.req.Param(api.ExecStderrParam, "1") + } + if e.tty { + e.req.Param(api.ExecTTYParam, "1") + } + + for _, s := range e.command { + e.req.Param(api.ExecCommandParamm, s) + } + + if e.upgrader == nil { + e.upgrader = &defaultUpgrader{} + } + conn, err := e.upgrader.upgrade(e.req, e.config) + if err != nil { + return err + } + defer conn.Close() + + doneChan := make(chan struct{}, 2) + errorChan := make(chan error) + + cp := func(s string, dst io.Writer, src io.Reader) { + glog.V(4).Infof("Copying %s", s) + defer glog.V(4).Infof("Done copying %s", s) + if _, err := io.Copy(dst, src); err != nil && err != io.EOF { + glog.Errorf("Error copying %s: %v", s, err) + } + if s == api.StreamTypeStdout || s == api.StreamTypeStderr { + doneChan <- struct{}{} + } + } + + headers := http.Header{} + headers.Set(api.StreamType, api.StreamTypeError) + errorStream, err := conn.CreateStream(headers) + if err != nil { + return err + } + go func() { + message, err := ioutil.ReadAll(errorStream) + if err != nil && err != io.EOF { + errorChan <- fmt.Errorf("Error reading from error stream: %s", err) + return + } + if len(message) > 0 { + errorChan <- fmt.Errorf("Error executing remote command: %s", message) + return + } + }() + defer errorStream.Reset() + + if doStdin { + headers.Set(api.StreamType, api.StreamTypeStdin) + remoteStdin, err := conn.CreateStream(headers) + if err != nil { + return err + } + defer remoteStdin.Reset() + // TODO this goroutine will never exit cleanly (the io.Copy never unblocks) + // because stdin is not closed until the process exits. If we try to call + // stdin.Close(), it returns no error but doesn't unblock the copy. It will + // exit when the process exits, instead. + go cp(api.StreamTypeStdin, remoteStdin, e.stdin) + } + + waitCount := 0 + completedStreams := 0 + + if doStdout { + waitCount++ + headers.Set(api.StreamType, api.StreamTypeStdout) + remoteStdout, err := conn.CreateStream(headers) + if err != nil { + return err + } + defer remoteStdout.Reset() + go cp(api.StreamTypeStdout, e.stdout, remoteStdout) + } + + if doStderr && !e.tty { + waitCount++ + headers.Set(api.StreamType, api.StreamTypeStderr) + remoteStderr, err := conn.CreateStream(headers) + if err != nil { + return err + } + defer remoteStderr.Reset() + go cp(api.StreamTypeStderr, e.stderr, remoteStderr) + } + +Loop: + for { + select { + case <-doneChan: + completedStreams++ + if completedStreams == waitCount { + break Loop + } + case err := <-errorChan: + return err + } + } + + return nil +} diff --git a/pkg/client/remotecommand/remotecommand_test.go b/pkg/client/remotecommand/remotecommand_test.go new file mode 100644 index 00000000000..fdc7b17e524 --- /dev/null +++ b/pkg/client/remotecommand/remotecommand_test.go @@ -0,0 +1,288 @@ +/* +Copyright 2015 Google Inc. All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package remotecommand + +import ( + "bytes" + "errors" + "io" + "net/http" + "strings" + "sync" + "testing" + "time" + + "github.com/GoogleCloudPlatform/kubernetes/pkg/api" + "github.com/GoogleCloudPlatform/kubernetes/pkg/client" + "github.com/GoogleCloudPlatform/kubernetes/pkg/util/httpstream" +) + +type fakeUpgrader struct { + conn *fakeUpgradeConnection + err error +} + +func (u *fakeUpgrader) upgrade(req *client.Request, config *client.Config) (httpstream.Connection, error) { + return u.conn, u.err +} + +type fakeUpgradeConnection struct { + closeCalled bool + lock sync.Mutex + + stdin *fakeUpgradeStream + stdout *fakeUpgradeStream + stdoutData string + stderr *fakeUpgradeStream + stderrData string + errorStream *fakeUpgradeStream + errorData string + unexpectedStreamCreated bool +} + +func newFakeUpgradeConnection() *fakeUpgradeConnection { + return &fakeUpgradeConnection{} +} + +func (c *fakeUpgradeConnection) CreateStream(headers http.Header) (httpstream.Stream, error) { + c.lock.Lock() + defer c.lock.Unlock() + + stream := &fakeUpgradeStream{} + switch headers.Get(api.StreamType) { + case api.StreamTypeStdin: + c.stdin = stream + case api.StreamTypeStdout: + c.stdout = stream + stream.data = c.stdoutData + case api.StreamTypeStderr: + c.stderr = stream + stream.data = c.stderrData + case api.StreamTypeError: + c.errorStream = stream + stream.data = c.errorData + default: + c.unexpectedStreamCreated = true + } + + return stream, nil +} + +func (c *fakeUpgradeConnection) Close() error { + c.lock.Lock() + defer c.lock.Unlock() + + c.closeCalled = true + return nil +} + +func (c *fakeUpgradeConnection) CloseChan() <-chan bool { + return make(chan bool) +} + +func (c *fakeUpgradeConnection) SetIdleTimeout(timeout time.Duration) { +} + +type fakeUpgradeStream struct { + readCalled bool + writeCalled bool + dataWritten []byte + closeCalled bool + resetCalled bool + data string + lock sync.Mutex +} + +func (s *fakeUpgradeStream) Read(p []byte) (int, error) { + s.lock.Lock() + defer s.lock.Unlock() + s.readCalled = true + b := []byte(s.data) + n := copy(p, b) + return n, io.EOF +} + +func (s *fakeUpgradeStream) Write(p []byte) (int, error) { + s.lock.Lock() + defer s.lock.Unlock() + s.writeCalled = true + s.dataWritten = make([]byte, len(p)) + copy(s.dataWritten, p) + return len(p), io.EOF +} + +func (s *fakeUpgradeStream) Close() error { + s.lock.Lock() + defer s.lock.Unlock() + s.closeCalled = true + return nil +} + +func (s *fakeUpgradeStream) Reset() error { + s.lock.Lock() + defer s.lock.Unlock() + s.resetCalled = true + return nil +} + +func (s *fakeUpgradeStream) Headers() http.Header { + s.lock.Lock() + defer s.lock.Unlock() + return http.Header{} +} + +func TestRequestExecuteRemoteCommand(t *testing.T) { + testCases := []struct { + Upgrader *fakeUpgrader + Stdin string + Stdout string + Stderr string + Error string + Tty bool + ShouldError bool + }{ + { + Upgrader: &fakeUpgrader{err: errors.New("bail")}, + ShouldError: true, + }, + { + Upgrader: &fakeUpgrader{conn: newFakeUpgradeConnection()}, + Stdin: "a", + Stdout: "b", + Stderr: "c", + Error: "bail", + ShouldError: true, + }, + { + Upgrader: &fakeUpgrader{conn: newFakeUpgradeConnection()}, + Stdin: "a", + Stdout: "b", + Stderr: "c", + }, + { + Upgrader: &fakeUpgrader{conn: newFakeUpgradeConnection()}, + Stdin: "a", + Stdout: "b", + Stderr: "c", + Tty: true, + }, + } + + for i, testCase := range testCases { + if testCase.Error != "" { + testCase.Upgrader.conn.errorData = testCase.Error + } + if testCase.Stdout != "" { + testCase.Upgrader.conn.stdoutData = testCase.Stdout + } + if testCase.Stderr != "" { + testCase.Upgrader.conn.stderrData = testCase.Stderr + } + var localOut, localErr *bytes.Buffer + if testCase.Stdout != "" { + localOut = &bytes.Buffer{} + } + if testCase.Stderr != "" { + localErr = &bytes.Buffer{} + } + e := New(&client.Request{}, &client.Config{}, []string{"ls", "/"}, strings.NewReader(testCase.Stdin), localOut, localErr, testCase.Tty) + e.upgrader = testCase.Upgrader + err := e.Execute() + hasErr := err != nil + if hasErr != testCase.ShouldError { + t.Fatalf("%d: expected %t, got %t: %v", i, testCase.ShouldError, hasErr, err) + } + + conn := testCase.Upgrader.conn + if testCase.Error != "" { + if conn.errorStream == nil { + t.Fatalf("%d: expected error stream creation", i) + } + if !conn.errorStream.readCalled { + t.Fatalf("%d: expected error stream read", i) + } + if e, a := testCase.Error, err.Error(); !strings.Contains(a, e) { + t.Fatalf("%d: expected error stream read '%v', got '%v'", i, e, a) + } + if !conn.errorStream.resetCalled { + t.Fatalf("%d: expected error reset", i) + } + } + + if testCase.ShouldError { + continue + } + + if testCase.Stdin != "" { + if conn.stdin == nil { + t.Fatalf("%d: expected stdin stream creation", i) + } + if !conn.stdin.writeCalled { + t.Fatalf("%d: expected stdin stream write", i) + } + if e, a := testCase.Stdin, string(conn.stdin.dataWritten); e != a { + t.Fatalf("%d: expected stdin write %v, got %v", i, e, a) + } + if !conn.stdin.resetCalled { + t.Fatalf("%d: expected stdin reset", i) + } + } + + if testCase.Stdout != "" { + if conn.stdout == nil { + t.Fatalf("%d: expected stdout stream creation", i) + } + if !conn.stdout.readCalled { + t.Fatalf("%d: expected stdout stream read", i) + } + if e, a := testCase.Stdout, localOut; e != a.String() { + t.Fatalf("%d: expected stdout data '%s', got '%s'", i, e, a) + } + if !conn.stdout.resetCalled { + t.Fatalf("%d: expected stdout reset", i) + } + } + + if testCase.Stderr != "" { + if testCase.Tty { + if conn.stderr != nil { + t.Fatalf("%d: unexpected stderr stream creation", i) + } + if localErr.String() != "" { + t.Fatalf("%d: unexpected stderr data '%s'", i, localErr) + } + } else { + if conn.stderr == nil { + t.Fatalf("%d: expected stderr stream creation", i) + } + if !conn.stderr.readCalled { + t.Fatalf("%d: expected stderr stream read", i) + } + if e, a := testCase.Stderr, localErr; e != a.String() { + t.Fatalf("%d: expected stderr data '%s', got '%s'", i, e, a) + } + if !conn.stderr.resetCalled { + t.Fatalf("%d: expected stderr reset", i) + } + } + } + + if !conn.closeCalled { + t.Fatalf("%d: expected upgraded connection to get closed") + } + } +} diff --git a/pkg/client/request.go b/pkg/client/request.go index 051f94cb06e..b5773dd475a 100644 --- a/pkg/client/request.go +++ b/pkg/client/request.go @@ -18,6 +18,7 @@ package client import ( "bytes" + "crypto/tls" "fmt" "io" "io/ioutil" @@ -33,6 +34,7 @@ import ( "github.com/GoogleCloudPlatform/kubernetes/pkg/labels" "github.com/GoogleCloudPlatform/kubernetes/pkg/runtime" "github.com/GoogleCloudPlatform/kubernetes/pkg/util" + "github.com/GoogleCloudPlatform/kubernetes/pkg/util/httpstream" "github.com/GoogleCloudPlatform/kubernetes/pkg/watch" watchjson "github.com/GoogleCloudPlatform/kubernetes/pkg/watch/json" "github.com/golang/glog" @@ -277,7 +279,7 @@ func (r *Request) setParam(paramName, value string) *Request { if r.params == nil { r.params = make(url.Values) } - r.params[paramName] = []string{value} + r.params[paramName] = append(r.params[paramName], value) return r } @@ -347,8 +349,10 @@ func (r *Request) finalURL() string { finalURL.Path = p query := url.Values{} - for key, value := range r.params { - query[key] = value + for key, values := range r.params { + for _, value := range values { + query.Add(key, value) + } } if r.namespaceSet && r.namespaceInQuery { @@ -434,6 +438,41 @@ func (r *Request) Stream() (io.ReadCloser, error) { return resp.Body, nil } +// Upgrade upgrades the request so that it supports multiplexed bidirectional +// streams. The current implementation uses SPDY, but this could be replaced +// with HTTP/2 once it's available, or something else. +func (r *Request) Upgrade(config *Config, newRoundTripperFunc func(*tls.Config) httpstream.UpgradeRoundTripper) (httpstream.Connection, error) { + if r.err != nil { + return nil, r.err + } + + tlsConfig, err := TLSConfigFor(config) + if err != nil { + return nil, err + } + + upgradeRoundTripper := newRoundTripperFunc(tlsConfig) + wrapper, err := HTTPWrappersForConfig(config, upgradeRoundTripper) + if err != nil { + return nil, err + } + + r.client = &http.Client{Transport: wrapper} + + req, err := http.NewRequest(r.verb, r.finalURL(), nil) + if err != nil { + return nil, fmt.Errorf("Error creating request: %s", err) + } + + resp, err := r.client.Do(req) + if err != nil { + return nil, fmt.Errorf("Error sending request: %s", err) + } + defer resp.Body.Close() + + return upgradeRoundTripper.NewConnection(resp) +} + // Do formats and executes the request. Returns a Result object for easy response // processing. // @@ -513,6 +552,8 @@ func (r *Request) transformResponse(resp *http.Response, req *http.Request) ([]b } switch { + case resp.StatusCode == http.StatusSwitchingProtocols: + // no-op, we've been upgraded case resp.StatusCode < http.StatusOK || resp.StatusCode > http.StatusPartialContent: if !isStatusResponse { var err error = &UnexpectedStatusError{ diff --git a/pkg/client/request_test.go b/pkg/client/request_test.go index f23cbef97a7..942ccd7f532 100644 --- a/pkg/client/request_test.go +++ b/pkg/client/request_test.go @@ -18,6 +18,7 @@ package client import ( "bytes" + "crypto/tls" "encoding/base64" "errors" "io" @@ -40,6 +41,7 @@ import ( "github.com/GoogleCloudPlatform/kubernetes/pkg/labels" "github.com/GoogleCloudPlatform/kubernetes/pkg/runtime" "github.com/GoogleCloudPlatform/kubernetes/pkg/util" + "github.com/GoogleCloudPlatform/kubernetes/pkg/util/httpstream" "github.com/GoogleCloudPlatform/kubernetes/pkg/watch" watchjson "github.com/GoogleCloudPlatform/kubernetes/pkg/watch/json" ) @@ -151,16 +153,22 @@ func TestRequestParam(t *testing.T) { if !api.Semantic.DeepDerivative(r.params, url.Values{"foo": []string{"a"}}) { t.Errorf("should have set a param: %#v", r) } + + r.Param("bar", "1") + r.Param("bar", "2") + if !api.Semantic.DeepDerivative(r.params, url.Values{"foo": []string{"a"}, "bar": []string{"1", "2"}}) { + t.Errorf("should have set a param: %#v", r) + } } func TestRequestURI(t *testing.T) { r := (&Request{}).Param("foo", "a") r.Prefix("other") - r.RequestURI("/test?foo=b&a=b") + r.RequestURI("/test?foo=b&a=b&c=1&c=2") if r.path != "/test" { t.Errorf("path is wrong: %#v", r) } - if !api.Semantic.DeepDerivative(r.params, url.Values{"a": []string{"b"}, "foo": []string{"b"}}) { + if !api.Semantic.DeepDerivative(r.params, url.Values{"a": []string{"b"}, "foo": []string{"b"}, "c": []string{"1", "2"}}) { t.Errorf("should have set a param: %#v", r) } } @@ -443,6 +451,122 @@ func TestRequestStream(t *testing.T) { } } +type fakeUpgradeConnection struct{} + +func (c *fakeUpgradeConnection) CreateStream(headers http.Header) (httpstream.Stream, error) { + return nil, nil +} +func (c *fakeUpgradeConnection) Close() error { + return nil +} +func (c *fakeUpgradeConnection) CloseChan() <-chan bool { + return make(chan bool) +} +func (c *fakeUpgradeConnection) SetIdleTimeout(timeout time.Duration) { +} + +type fakeUpgradeRoundTripper struct { + req *http.Request + conn httpstream.Connection +} + +func (f *fakeUpgradeRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) { + f.req = req + b := []byte{} + body := ioutil.NopCloser(bytes.NewReader(b)) + resp := &http.Response{ + StatusCode: 101, + Body: body, + } + return resp, nil +} + +func (f *fakeUpgradeRoundTripper) NewConnection(resp *http.Response) (httpstream.Connection, error) { + return f.conn, nil +} + +func TestRequestUpgrade(t *testing.T) { + uri, _ := url.Parse("http://localhost/") + testCases := []struct { + Request *Request + Config *Config + RoundTripper *fakeUpgradeRoundTripper + Err bool + AuthBasicHeader bool + AuthBearerHeader bool + }{ + { + Request: &Request{err: errors.New("bail")}, + Err: true, + }, + { + Request: &Request{}, + Config: &Config{ + TLSClientConfig: TLSClientConfig{ + CAFile: "foo", + }, + Insecure: true, + }, + Err: true, + }, + { + Request: &Request{}, + Config: &Config{ + Username: "u", + Password: "p", + BearerToken: "b", + }, + Err: true, + }, + { + Request: NewRequest(nil, "", uri, testapi.Codec(), true, true), + Config: &Config{ + Username: "u", + Password: "p", + }, + AuthBasicHeader: true, + Err: false, + }, + { + Request: NewRequest(nil, "", uri, testapi.Codec(), true, true), + Config: &Config{ + BearerToken: "b", + }, + AuthBearerHeader: true, + Err: false, + }, + } + for i, testCase := range testCases { + r := testCase.Request + rt := &fakeUpgradeRoundTripper{} + expectedConn := &fakeUpgradeConnection{} + conn, err := r.Upgrade(testCase.Config, func(config *tls.Config) httpstream.UpgradeRoundTripper { + rt.conn = expectedConn + return rt + }) + _ = conn + hasErr := err != nil + if hasErr != testCase.Err { + t.Errorf("%d: expected %t, got %t: %v", i, testCase.Err, hasErr, r.err) + } + if testCase.Err { + continue + } + + if testCase.AuthBasicHeader && !strings.Contains(rt.req.Header.Get("Authorization"), "Basic") { + t.Errorf("%d: expected basic auth header, got: %s", rt.req.Header.Get("Authorization")) + } + + if testCase.AuthBearerHeader && !strings.Contains(rt.req.Header.Get("Authorization"), "Bearer") { + t.Errorf("%d: expected bearer auth header, got: %s", rt.req.Header.Get("Authorization")) + } + + if e, a := expectedConn, conn; e != a { + t.Errorf("%d: conn: expected %#v, got %#v", i, e, a) + } + } +} + func TestRequestDo(t *testing.T) { testCases := []struct { Request *Request diff --git a/pkg/conversion/deep_equal.go b/pkg/conversion/deep_equal.go index 617f3d793ca..2672cad6722 100644 --- a/pkg/conversion/deep_equal.go +++ b/pkg/conversion/deep_equal.go @@ -355,7 +355,7 @@ func (e Equalities) deepValueDerive(v1, v2 reflect.Value, visited map[visit]bool } // DeepDerivative is similar to DeepEqual except that unset fields in a1 are -// ignored (not compared). This allows we to focus on the fields that matter to +// ignored (not compared). This allows us to focus on the fields that matter to // the semantic comparison. // // The unset fields include a nil pointer and an empty string. diff --git a/pkg/httplog/log.go b/pkg/httplog/log.go index f66605bc324..ec26de718e9 100644 --- a/pkg/httplog/log.go +++ b/pkg/httplog/log.go @@ -17,7 +17,9 @@ limitations under the License. package httplog import ( + "bufio" "fmt" + "net" "net/http" "runtime" "time" @@ -46,6 +48,10 @@ type logger interface { // Add a layer on top of ResponseWriter, so we can track latency and error // message sources. +// +// TODO now that we're using go-restful, we shouldn't need to be wrapping +// the http.ResponseWriter. We can recover panics from go-restful, and +// the logging value is questionable. type respLogger struct { status int statusStack string @@ -68,7 +74,7 @@ func (passthroughLogger) Addf(format string, data ...interface{}) { // DefaultStacktracePred is the default implementation of StacktracePred. func DefaultStacktracePred(status int) bool { - return status < http.StatusOK || status >= http.StatusBadRequest + return (status < http.StatusOK || status >= http.StatusBadRequest) && status != http.StatusSwitchingProtocols } // NewLogged turns a normal response writer into a logged response writer. @@ -186,3 +192,8 @@ func (rl *respLogger) WriteHeader(status int) { } rl.w.WriteHeader(status) } + +// Hijack implements http.Hijacker. +func (rl *respLogger) Hijack() (net.Conn, *bufio.ReadWriter, error) { + return rl.w.(http.Hijacker).Hijack() +} diff --git a/pkg/kubectl/cmd/cmd.go b/pkg/kubectl/cmd/cmd.go index 51b7cdb135a..29f6ea29d33 100644 --- a/pkg/kubectl/cmd/cmd.go +++ b/pkg/kubectl/cmd/cmd.go @@ -183,7 +183,7 @@ func (f *Factory) BindFlags(flags *pflag.FlagSet) { } // NewKubectlCommand creates the `kubectl` command and its nested children. -func (f *Factory) NewKubectlCommand(out io.Writer) *cobra.Command { +func (f *Factory) NewKubectlCommand(in io.Reader, out, err io.Writer) *cobra.Command { // Parent command to which all subcommands are added. cmds := &cobra.Command{ Use: "kubectl", @@ -211,6 +211,9 @@ Find more information at https://github.com/GoogleCloudPlatform/kubernetes.`, cmds.AddCommand(f.NewCmdRollingUpdate(out)) cmds.AddCommand(f.NewCmdResize(out)) + cmds.AddCommand(f.NewCmdExec(in, out, err)) + cmds.AddCommand(f.NewCmdPortForward()) + cmds.AddCommand(f.NewCmdRunContainer(out)) cmds.AddCommand(f.NewCmdStop(out)) cmds.AddCommand(f.NewCmdExposeService(out)) diff --git a/pkg/kubectl/cmd/exec.go b/pkg/kubectl/cmd/exec.go new file mode 100644 index 00000000000..58ccab6f0ab --- /dev/null +++ b/pkg/kubectl/cmd/exec.go @@ -0,0 +1,133 @@ +/* +Copyright 2014 Google Inc. All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cmd + +import ( + "io" + "os" + "os/signal" + "syscall" + + "github.com/GoogleCloudPlatform/kubernetes/pkg/api" + "github.com/GoogleCloudPlatform/kubernetes/pkg/client/remotecommand" + "github.com/GoogleCloudPlatform/kubernetes/pkg/kubectl/cmd/util" + "github.com/docker/docker/pkg/term" + "github.com/golang/glog" + "github.com/spf13/cobra" +) + +func (f *Factory) NewCmdExec(cmdIn io.Reader, cmdOut, cmdErr io.Writer) *cobra.Command { + flags := &struct { + pod string + container string + stdin bool + tty bool + }{} + + cmd := &cobra.Command{ + Use: "exec -p -c -- []", + Short: "Execute a command in a container.", + Long: `Execute a command in a container. +Examples: + $ kubectl exec -p 123456-7890 -c ruby-container date + + + $ kubectl exec -p 123456-7890 -c ruby-container -i -t -- bash -il + is required for exec") + } + + if len(args) < 1 { + usageError(cmd, " is required for exec") + } + + namespace, err := f.DefaultNamespace(cmd) + checkErr(err) + + client, err := f.Client(cmd) + checkErr(err) + + pod, err := client.Pods(namespace).Get(flags.pod) + checkErr(err) + + if pod.Status.Phase != api.PodRunning { + glog.Fatalf("Unable to execute command because pod is not running. Current status=%v", pod.Status.Phase) + } + + if len(flags.container) == 0 { + flags.container = pod.Spec.Containers[0].Name + } + + var stdin io.Reader + if util.GetFlagBool(cmd, "stdin") { + stdin = cmdIn + if flags.tty { + if file, ok := cmdIn.(*os.File); ok { + inFd := file.Fd() + if term.IsTerminal(inFd) { + oldState, err := term.SetRawTerminal(inFd) + if err != nil { + glog.Fatal(err) + } + // this handles a clean exit, where the command finished + defer term.RestoreTerminal(inFd, oldState) + + // SIGINT is handled by term.SetRawTerminal (it runs a goroutine that listens + // for SIGINT and restores the terminal before exiting) + + // this handles SIGTERM + sigChan := make(chan os.Signal, 1) + signal.Notify(sigChan, syscall.SIGTERM) + go func() { + <-sigChan + term.RestoreTerminal(inFd, oldState) + os.Exit(0) + }() + } else { + glog.Warning("Stdin is not a terminal") + } + } else { + flags.tty = false + glog.Warning("Unable to use a TTY") + } + } + } + + config, err := f.ClientConfig(cmd) + checkErr(err) + + req := client.RESTClient.Get(). + Prefix("proxy"). + Resource("minions"). + Name(pod.Status.Host). + Suffix("exec", namespace, flags.pod, flags.container) + + e := remotecommand.New(req, config, args, stdin, cmdOut, cmdErr, flags.tty) + err = e.Execute() + checkErr(err) + }, + } + cmd.Flags().StringVarP(&flags.pod, "pod", "p", "", "Pod name") + // TODO support UID + cmd.Flags().StringVarP(&flags.container, "container", "c", "", "Container name") + cmd.Flags().BoolVarP(&flags.stdin, "stdin", "i", false, "Pass stdin to the container") + cmd.Flags().BoolVarP(&flags.tty, "tty", "t", false, "Stdin is a TTY") + return cmd +} diff --git a/pkg/kubectl/cmd/portforward.go b/pkg/kubectl/cmd/portforward.go new file mode 100644 index 00000000000..7104e232bee --- /dev/null +++ b/pkg/kubectl/cmd/portforward.go @@ -0,0 +1,104 @@ +/* +Copyright 2014 Google Inc. All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cmd + +import ( + "os" + "os/signal" + + "github.com/GoogleCloudPlatform/kubernetes/pkg/api" + "github.com/GoogleCloudPlatform/kubernetes/pkg/client/portforward" + "github.com/golang/glog" + "github.com/spf13/cobra" +) + +func (f *Factory) NewCmdPortForward() *cobra.Command { + flags := &struct { + pod string + container string + }{} + + cmd := &cobra.Command{ + Use: "port-forward -p [:] [...]", + Short: "Forward 1 or more local ports to a pod.", + Long: `Forward 1 or more local ports to a pod. +Examples: + $ kubectl port-forward -p mypod 5000 6000 + + + $ kubectl port-forward -p mypod 8888:5000 + + + $ kubectl port-forward -p mypod :5000 + + + $ kubectl port-forward -p mypod 0:5000 + + `, + Run: func(cmd *cobra.Command, args []string) { + if len(flags.pod) == 0 { + usageError(cmd, " is required for exec") + } + + if len(args) < 1 { + usageError(cmd, "at least 1 is required for port-forward") + } + + namespace, err := f.DefaultNamespace(cmd) + checkErr(err) + + client, err := f.Client(cmd) + checkErr(err) + + pod, err := client.Pods(namespace).Get(flags.pod) + checkErr(err) + + if pod.Status.Phase != api.PodRunning { + glog.Fatalf("Unable to execute command because pod is not running. Current status=%v", pod.Status.Phase) + } + + config, err := f.ClientConfig(cmd) + checkErr(err) + + signals := make(chan os.Signal, 1) + signal.Notify(signals, os.Interrupt) + defer signal.Stop(signals) + + stopCh := make(chan struct{}, 1) + go func() { + <-signals + close(stopCh) + }() + + req := client.RESTClient.Get(). + Prefix("proxy"). + Resource("minions"). + Name(pod.Status.Host). + Suffix("portForward", namespace, flags.pod) + + pf, err := portforward.New(req, config, args, stopCh) + checkErr(err) + + err = pf.ForwardPorts() + checkErr(err) + }, + } + cmd.Flags().StringVarP(&flags.pod, "pod", "p", "", "Pod name") + // TODO support UID + return cmd +} diff --git a/pkg/kubelet/dockertools/docker.go b/pkg/kubelet/dockertools/docker.go index 51d5e911e9a..e2e3e00fc91 100644 --- a/pkg/kubelet/dockertools/docker.go +++ b/pkg/kubelet/dockertools/docker.go @@ -198,6 +198,127 @@ func (d *dockerContainerCommandRunner) RunInContainer(containerID string, cmd [] return buf.Bytes(), <-errChan } +// ExecInContainer uses nsenter to run the command inside the container identified by containerID. +// +// TODO: +// - match cgroups of container +// - should we support `docker exec`? +// - should we support nsenter in a container, running with elevated privs and --pid=host? +func (d *dockerContainerCommandRunner) ExecInContainer(containerId string, cmd []string, stdin io.Reader, stdout, stderr io.WriteCloser, tty bool) error { + container, err := d.client.InspectContainer(containerId) + if err != nil { + return err + } + + if !container.State.Running { + return fmt.Errorf("container not running (%s)", container) + } + + containerPid := container.State.Pid + + // TODO what if the container doesn't have `env`??? + args := []string{"-t", fmt.Sprintf("%d", containerPid), "-m", "-i", "-u", "-n", "-p", "--", "env", "-i"} + args = append(args, fmt.Sprintf("HOSTNAME=%s", container.Config.Hostname)) + args = append(args, container.Config.Env...) + args = append(args, cmd...) + glog.Infof("ARGS %#v", args) + command := exec.Command("nsenter", args...) + // TODO use exec.LookPath + if tty { + p, err := StartPty(command) + if err != nil { + return err + } + defer p.Close() + + // make sure to close the stdout stream + defer stdout.Close() + + if stdin != nil { + go io.Copy(p, stdin) + } + + if stdout != nil { + go io.Copy(stdout, p) + } + + return command.Wait() + } else { + cp := func(dst io.WriteCloser, src io.Reader, closeDst bool) { + defer func() { + if closeDst { + dst.Close() + } + }() + io.Copy(dst, src) + } + if stdin != nil { + inPipe, err := command.StdinPipe() + if err != nil { + return err + } + go func() { + cp(inPipe, stdin, false) + inPipe.Close() + }() + } + + if stdout != nil { + outPipe, err := command.StdoutPipe() + if err != nil { + return err + } + go cp(stdout, outPipe, true) + } + + if stderr != nil { + errPipe, err := command.StderrPipe() + if err != nil { + return err + } + go cp(stderr, errPipe, true) + } + + return command.Run() + } +} + +// PortForward executes socat in the pod's network namespace and copies +// data between stream (representing the user's local connection on their +// computer) and the specified port in the container. +// +// TODO: +// - match cgroups of container +// - should we support nsenter + socat on the host? (current impl) +// - should we support nsenter + socat in a container, running with elevated privs and --pid=host? +func (d *dockerContainerCommandRunner) PortForward(podInfraContainerID string, port uint16, stream io.ReadWriteCloser) error { + container, err := d.client.InspectContainer(podInfraContainerID) + if err != nil { + return err + } + + if !container.State.Running { + return fmt.Errorf("container not running (%s)", container) + } + + containerPid := container.State.Pid + // TODO use exec.LookPath for socat / what if the host doesn't have it??? + args := []string{"-t", fmt.Sprintf("%d", containerPid), "-n", "socat", "-", fmt.Sprintf("TCP4:localhost:%d", port)} + // TODO use exec.LookPath + command := exec.Command("nsenter", args...) + in, err := command.StdinPipe() + if err != nil { + return err + } + out, err := command.StdoutPipe() + if err != nil { + return err + } + go io.Copy(in, stream) + go io.Copy(stream, out) + return command.Run() +} + // NewDockerContainerCommandRunner creates a ContainerCommandRunner which uses nsinit to run a command // inside a container. func NewDockerContainerCommandRunner(client DockerInterface) ContainerCommandRunner { @@ -690,4 +811,6 @@ func ConnectToDockerOrDie(dockerEndpoint string) DockerInterface { type ContainerCommandRunner interface { RunInContainer(containerID string, cmd []string) ([]byte, error) GetDockerServerVersion() ([]uint, error) + ExecInContainer(containerID string, cmd []string, in io.Reader, out, err io.WriteCloser, tty bool) error + PortForward(podInfraContainerID string, port uint16, stream io.ReadWriteCloser) error } diff --git a/pkg/kubelet/dockertools/pty_linux.go b/pkg/kubelet/dockertools/pty_linux.go new file mode 100644 index 00000000000..59e1c0a4662 --- /dev/null +++ b/pkg/kubelet/dockertools/pty_linux.go @@ -0,0 +1,30 @@ +// +build linux + +/* +Copyright 2015 Google Inc. All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package dockertools + +import ( + "os" + "os/exec" + + "github.com/kr/pty" +) + +func StartPty(c *exec.Cmd) (*os.File, error) { + return pty.Start(c) +} diff --git a/pkg/kubelet/dockertools/pty_unsupported.go b/pkg/kubelet/dockertools/pty_unsupported.go new file mode 100644 index 00000000000..da89938d843 --- /dev/null +++ b/pkg/kubelet/dockertools/pty_unsupported.go @@ -0,0 +1,28 @@ +// +build !linux + +/* +Copyright 2015 Google Inc. All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package dockertools + +import ( + "os" + "os/exec" +) + +func StartPty(c *exec.Cmd) (pty *os.File, err error) { + return nil, nil +} diff --git a/pkg/kubelet/kubelet.go b/pkg/kubelet/kubelet.go index 0fa3bc3c85a..99495dd7bfb 100644 --- a/pkg/kubelet/kubelet.go +++ b/pkg/kubelet/kubelet.go @@ -86,7 +86,8 @@ func NewMainKubelet( clusterDomain string, clusterDNS net.IP, masterServiceNamespace string, - volumePlugins []volume.Plugin) (*Kubelet, error) { + volumePlugins []volume.Plugin, + streamingConnectionIdleTimeout time.Duration) (*Kubelet, error) { if rootDirectory == "" { return nil, fmt.Errorf("invalid root directory %q", rootDirectory) } @@ -104,28 +105,29 @@ func NewMainKubelet( serviceLister := &cache.StoreToServiceLister{serviceStore} klet := &Kubelet{ - hostname: hostname, - dockerClient: dockerClient, - etcdClient: etcdClient, - kubeClient: kubeClient, - rootDirectory: rootDirectory, - resyncInterval: resyncInterval, - podInfraContainerImage: podInfraContainerImage, - podWorkers: newPodWorkers(), - dockerIDToRef: map[dockertools.DockerID]*api.ObjectReference{}, - runner: dockertools.NewDockerContainerCommandRunner(dockerClient), - httpClient: &http.Client{}, - pullQPS: pullQPS, - pullBurst: pullBurst, - minimumGCAge: minimumGCAge, - maxContainerCount: maxContainerCount, - sourceReady: sourceReady, - clusterDomain: clusterDomain, - clusterDNS: clusterDNS, - serviceLister: serviceLister, - masterServiceNamespace: masterServiceNamespace, - prober: newProbeHolder(), - readiness: newReadinessStates(), + hostname: hostname, + dockerClient: dockerClient, + etcdClient: etcdClient, + kubeClient: kubeClient, + rootDirectory: rootDirectory, + resyncInterval: resyncInterval, + podInfraContainerImage: podInfraContainerImage, + podWorkers: newPodWorkers(), + dockerIDToRef: map[dockertools.DockerID]*api.ObjectReference{}, + runner: dockertools.NewDockerContainerCommandRunner(dockerClient), + httpClient: &http.Client{}, + pullQPS: pullQPS, + pullBurst: pullBurst, + minimumGCAge: minimumGCAge, + maxContainerCount: maxContainerCount, + sourceReady: sourceReady, + clusterDomain: clusterDomain, + clusterDNS: clusterDNS, + serviceLister: serviceLister, + masterServiceNamespace: masterServiceNamespace, + prober: newProbeHolder(), + readiness: newReadinessStates(), + streamingConnectionIdleTimeout: streamingConnectionIdleTimeout, } if err := klet.setupDataDirs(); err != nil { @@ -207,6 +209,10 @@ type Kubelet struct { prober probeHolder // container readiness state holder readiness *readinessStates + + // how long to keep idle streaming command execution/port forwarding + // connections open before terminating them + streamingConnectionIdleTimeout time.Duration } // getRootDir returns the full path to the directory under which kubelet can @@ -1686,6 +1692,40 @@ func (kl *Kubelet) RunInContainer(podFullName string, uid types.UID, container s return kl.runner.RunInContainer(dockerContainer.ID, cmd) } +// ExecInContainer executes a command in a container, connecting the supplied +// stdin/stdout/stderr to the command's IO streams. +func (kl *Kubelet) ExecInContainer(podFullName string, uid types.UID, container string, cmd []string, stdin io.Reader, stdout, stderr io.WriteCloser, tty bool) error { + if kl.runner == nil { + return fmt.Errorf("no runner specified.") + } + dockerContainers, err := dockertools.GetKubeletDockerContainers(kl.dockerClient, false) + if err != nil { + return err + } + dockerContainer, found, _ := dockerContainers.FindPodContainer(podFullName, uid, container) + if !found { + return fmt.Errorf("container not found (%q)", container) + } + return kl.runner.ExecInContainer(dockerContainer.ID, cmd, stdin, stdout, stderr, tty) +} + +// PortForward connects to the pod's port and copies data between the port +// and the stream. +func (kl *Kubelet) PortForward(podFullName string, uid types.UID, port uint16, stream io.ReadWriteCloser) error { + if kl.runner == nil { + return fmt.Errorf("no runner specified.") + } + dockerContainers, err := dockertools.GetKubeletDockerContainers(kl.dockerClient, false) + if err != nil { + return err + } + podInfraContainer, found, _ := dockerContainers.FindPodContainer(podFullName, uid, dockertools.PodInfraContainerName) + if !found { + return fmt.Errorf("Unable to find pod infra container for pod %s, uid %v", podFullName, uid) + } + return kl.runner.PortForward(podInfraContainer.ID, port, stream) +} + // BirthCry sends an event that the kubelet has started up. func (kl *Kubelet) BirthCry() { // Make an event that kubelet restarted. @@ -1699,3 +1739,7 @@ func (kl *Kubelet) BirthCry() { } record.Eventf(ref, "starting", "Starting kubelet.") } + +func (kl *Kubelet) StreamingConnectionIdleTimeout() time.Duration { + return kl.streamingConnectionIdleTimeout +} diff --git a/pkg/kubelet/kubelet_test.go b/pkg/kubelet/kubelet_test.go index 9b0682ca2a0..b1e45591e3e 100644 --- a/pkg/kubelet/kubelet_test.go +++ b/pkg/kubelet/kubelet_test.go @@ -17,7 +17,9 @@ limitations under the License. package kubelet import ( + "bytes" "fmt" + "io" "io/ioutil" "net/http" "os" @@ -1486,9 +1488,15 @@ func TestGetContainerInfoWithNoMatchingContainers(t *testing.T) { } type fakeContainerCommandRunner struct { - Cmd []string - ID string - E error + Cmd []string + ID string + E error + Stdin io.Reader + Stdout io.WriteCloser + Stderr io.WriteCloser + TTY bool + Port uint16 + Stream io.ReadWriteCloser } func (f *fakeContainerCommandRunner) RunInContainer(id string, cmd []string) ([]byte, error) { @@ -1501,6 +1509,23 @@ func (f *fakeContainerCommandRunner) GetDockerServerVersion() ([]uint, error) { return nil, nil } +func (f *fakeContainerCommandRunner) ExecInContainer(id string, cmd []string, in io.Reader, out, err io.WriteCloser, tty bool) error { + f.Cmd = cmd + f.ID = id + f.Stdin = in + f.Stdout = out + f.Stderr = err + f.TTY = tty + return f.E +} + +func (f *fakeContainerCommandRunner) PortForward(podInfraContainerID string, port uint16, stream io.ReadWriteCloser) error { + f.ID = podInfraContainerID + f.Port = port + f.Stream = stream + return nil +} + func TestRunInContainerNoSuchPod(t *testing.T) { fakeCommandRunner := fakeContainerCommandRunner{} kubelet, fakeDocker := newTestKubelet(t) @@ -2805,5 +2830,252 @@ func TestGetPodReadyCondition(t *testing.T) { t.Errorf("On test case %v, expected:\n%+v\ngot\n%+v\n", i, test.expected, condition) } } - +} + +func TestExecInContainerNoSuchPod(t *testing.T) { + fakeCommandRunner := fakeContainerCommandRunner{} + kubelet, fakeDocker := newTestKubelet(t) + fakeDocker.ContainerList = []docker.APIContainers{} + kubelet.runner = &fakeCommandRunner + + podName := "podFoo" + podNamespace := "etcd" + containerName := "containerFoo" + err := kubelet.ExecInContainer( + GetPodFullName(&api.BoundPod{ObjectMeta: api.ObjectMeta{Name: podName, Namespace: podNamespace}}), + "", + containerName, + []string{"ls"}, + nil, + nil, + nil, + false, + ) + if err == nil { + t.Fatal("unexpected non-error") + } + if fakeCommandRunner.ID != "" { + t.Fatal("unexpected invocation of runner.ExecInContainer") + } +} + +func TestExecInContainerNoSuchContainer(t *testing.T) { + fakeCommandRunner := fakeContainerCommandRunner{} + kubelet, fakeDocker := newTestKubelet(t) + kubelet.runner = &fakeCommandRunner + + podName := "podFoo" + podNamespace := "etcd" + containerID := "containerFoo" + + fakeDocker.ContainerList = []docker.APIContainers{ + { + ID: "notfound", + Names: []string{"/k8s_notfound_" + podName + "." + podNamespace + ".test_12345678_42"}, + }, + } + + err := kubelet.ExecInContainer( + GetPodFullName(&api.BoundPod{ObjectMeta: api.ObjectMeta{ + UID: "12345678", + Name: podName, + Namespace: podNamespace, + Annotations: map[string]string{ConfigSourceAnnotationKey: "test"}, + }}), + "", + containerID, + []string{"ls"}, + nil, + nil, + nil, + false, + ) + if err == nil { + t.Fatal("unexpected non-error") + } + if fakeCommandRunner.ID != "" { + t.Fatal("unexpected invocation of runner.ExecInContainer") + } +} + +type fakeReadWriteCloser struct{} + +func (f *fakeReadWriteCloser) Write(data []byte) (int, error) { + return 0, nil +} + +func (f *fakeReadWriteCloser) Read(data []byte) (int, error) { + return 0, nil +} + +func (f *fakeReadWriteCloser) Close() error { + return nil +} + +func TestExecInContainer(t *testing.T) { + fakeCommandRunner := fakeContainerCommandRunner{} + kubelet, fakeDocker := newTestKubelet(t) + kubelet.runner = &fakeCommandRunner + + podName := "podFoo" + podNamespace := "etcd" + containerID := "containerFoo" + command := []string{"ls"} + stdin := &bytes.Buffer{} + stdout := &fakeReadWriteCloser{} + stderr := &fakeReadWriteCloser{} + tty := true + + fakeDocker.ContainerList = []docker.APIContainers{ + { + ID: containerID, + Names: []string{"/k8s_" + containerID + "_" + podName + "." + podNamespace + ".test_12345678_42"}, + }, + } + + err := kubelet.ExecInContainer( + GetPodFullName(&api.BoundPod{ObjectMeta: api.ObjectMeta{ + UID: "12345678", + Name: podName, + Namespace: podNamespace, + Annotations: map[string]string{ConfigSourceAnnotationKey: "test"}, + }}), + "", + containerID, + []string{"ls"}, + stdin, + stdout, + stderr, + tty, + ) + if err != nil { + t.Fatalf("unexpected error: %s", err) + } + if e, a := containerID, fakeCommandRunner.ID; e != a { + t.Fatalf("container id: expected %s, got %s", e, a) + } + if e, a := command, fakeCommandRunner.Cmd; !reflect.DeepEqual(e, a) { + t.Fatalf("command: expected '%v', got '%v'", e, a) + } + if e, a := stdin, fakeCommandRunner.Stdin; e != a { + t.Fatalf("stdin: expected %#v, got %#v", e, a) + } + if e, a := stdout, fakeCommandRunner.Stdout; e != a { + t.Fatalf("stdout: expected %#v, got %#v", e, a) + } + if e, a := stderr, fakeCommandRunner.Stderr; e != a { + t.Fatalf("stderr: expected %#v, got %#v", e, a) + } + if e, a := tty, fakeCommandRunner.TTY; e != a { + t.Fatalf("tty: expected %t, got %t", e, a) + } +} + +func TestPortForwardNoSuchPod(t *testing.T) { + fakeCommandRunner := fakeContainerCommandRunner{} + kubelet, fakeDocker := newTestKubelet(t) + fakeDocker.ContainerList = []docker.APIContainers{} + kubelet.runner = &fakeCommandRunner + + podName := "podFoo" + podNamespace := "etcd" + var port uint16 = 5000 + + err := kubelet.PortForward( + GetPodFullName(&api.BoundPod{ObjectMeta: api.ObjectMeta{Name: podName, Namespace: podNamespace}}), + "", + port, + nil, + ) + if err == nil { + t.Fatal("unexpected non-error") + } + if fakeCommandRunner.ID != "" { + t.Fatal("unexpected invocation of runner.PortForward") + } +} + +func TestPortForwardNoSuchContainer(t *testing.T) { + fakeCommandRunner := fakeContainerCommandRunner{} + kubelet, fakeDocker := newTestKubelet(t) + kubelet.runner = &fakeCommandRunner + + podName := "podFoo" + podNamespace := "etcd" + var port uint16 = 5000 + + fakeDocker.ContainerList = []docker.APIContainers{ + { + ID: "notfound", + Names: []string{"/k8s_notfound_" + podName + "." + podNamespace + ".test_12345678_42"}, + }, + } + + err := kubelet.PortForward( + GetPodFullName(&api.BoundPod{ObjectMeta: api.ObjectMeta{ + UID: "12345678", + Name: podName, + Namespace: podNamespace, + Annotations: map[string]string{ConfigSourceAnnotationKey: "test"}, + }}), + "", + port, + nil, + ) + if err == nil { + t.Fatal("unexpected non-error") + } + if fakeCommandRunner.ID != "" { + t.Fatal("unexpected invocation of runner.PortForward") + } +} + +func TestPortForward(t *testing.T) { + fakeCommandRunner := fakeContainerCommandRunner{} + kubelet, fakeDocker := newTestKubelet(t) + kubelet.runner = &fakeCommandRunner + + podName := "podFoo" + podNamespace := "etcd" + containerID := "containerFoo" + var port uint16 = 5000 + stream := &fakeReadWriteCloser{} + + infraContainerID := "infra" + kubelet.podInfraContainerImage = "POD" + + fakeDocker.ContainerList = []docker.APIContainers{ + { + ID: infraContainerID, + Names: []string{"/k8s_" + kubelet.podInfraContainerImage + "_" + podName + "." + podNamespace + ".test_12345678_42"}, + }, + { + ID: containerID, + Names: []string{"/k8s_" + containerID + "_" + podName + "." + podNamespace + ".test_12345678_42"}, + }, + } + + err := kubelet.PortForward( + GetPodFullName(&api.BoundPod{ObjectMeta: api.ObjectMeta{ + UID: "12345678", + Name: podName, + Namespace: podNamespace, + Annotations: map[string]string{ConfigSourceAnnotationKey: "test"}, + }}), + "", + port, + stream, + ) + if err != nil { + t.Fatalf("unexpected error: %s", err) + } + if e, a := infraContainerID, fakeCommandRunner.ID; e != a { + t.Fatalf("container id: expected %s, got %s", e, a) + } + if e, a := port, fakeCommandRunner.Port; e != a { + t.Fatalf("port: expected %v, got %v", e, a) + } + if e, a := stream, fakeCommandRunner.Stream; e != a { + t.Fatalf("stream: expected %v, got %v", e, a) + } } diff --git a/pkg/kubelet/server.go b/pkg/kubelet/server.go index 876a1d7278c..22a227269ac 100644 --- a/pkg/kubelet/server.go +++ b/pkg/kubelet/server.go @@ -27,6 +27,7 @@ import ( "path" "strconv" "strings" + "sync" "time" "github.com/GoogleCloudPlatform/kubernetes/pkg/api" @@ -34,6 +35,8 @@ import ( "github.com/GoogleCloudPlatform/kubernetes/pkg/httplog" "github.com/GoogleCloudPlatform/kubernetes/pkg/runtime" "github.com/GoogleCloudPlatform/kubernetes/pkg/types" + "github.com/GoogleCloudPlatform/kubernetes/pkg/util/httpstream" + "github.com/GoogleCloudPlatform/kubernetes/pkg/util/httpstream/spdy" "github.com/golang/glog" "github.com/google/cadvisor/info" ) @@ -69,8 +72,11 @@ type HostInterface interface { GetPodByName(namespace, name string) (*api.BoundPod, bool) GetPodStatus(name string, uid types.UID) (api.PodStatus, error) RunInContainer(name string, uid types.UID, container string, cmd []string) ([]byte, error) + ExecInContainer(name string, uid types.UID, container string, cmd []string, in io.Reader, out, err io.WriteCloser, tty bool) error GetKubeletContainerLogs(podFullName, containerName, tail string, follow bool, stdout, stderr io.Writer) error ServeLogs(w http.ResponseWriter, req *http.Request) + PortForward(name string, uid types.UID, port uint16, stream io.ReadWriteCloser) error + StreamingConnectionIdleTimeout() time.Duration } // NewServer initializes and configures a kubelet.Server object to handle HTTP requests. @@ -99,6 +105,8 @@ func (s *Server) InstallDefaultHandlers() { // InstallDeguggingHandlers registers the HTTP request patterns that serve logs or run commands/containers func (s *Server) InstallDebuggingHandlers() { s.mux.HandleFunc("/run/", s.handleRun) + s.mux.HandleFunc("/exec/", s.handleExec) + s.mux.HandleFunc("/portForward/", s.handlePortForward) s.mux.HandleFunc("/logs/", s.handleLogs) s.mux.HandleFunc("/containerLogs/", s.handleContainerLogs) @@ -301,6 +309,28 @@ func (s *Server) handleSpec(w http.ResponseWriter, req *http.Request) { w.Write(data) } +func parseContainerCoordinates(path string) (namespace, pod string, uid types.UID, container string, err error) { + parts := strings.Split(path, "/") + + if len(parts) == 5 { + namespace = parts[2] + pod = parts[3] + container = parts[4] + return + } + + if len(parts) == 6 { + namespace = parts[2] + pod = parts[3] + uid = types.UID(parts[4]) + container = parts[5] + return + } + + err = fmt.Errorf("Unexpected path %s. Expected /.../.../// or /.../...////", path) + return +} + // handleRun handles requests to run a command inside a container. func (s *Server) handleRun(w http.ResponseWriter, req *http.Request) { u, err := url.ParseRequestURI(req.RequestURI) @@ -308,20 +338,9 @@ func (s *Server) handleRun(w http.ResponseWriter, req *http.Request) { s.error(w, err) return } - parts := strings.Split(u.Path, "/") - var podNamespace, podID, container string - var uid types.UID - if len(parts) == 5 { - podNamespace = parts[2] - podID = parts[3] - container = parts[4] - } else if len(parts) == 6 { - podNamespace = parts[2] - podID = parts[3] - uid = types.UID(parts[4]) - container = parts[5] - } else { - http.Error(w, "Unexpected path for command running", http.StatusBadRequest) + podNamespace, podID, uid, container, err := parseContainerCoordinates(u.Path) + if err != nil { + http.Error(w, err.Error(), http.StatusBadRequest) return } pod, ok := s.host.GetPodByName(podNamespace, podID) @@ -339,6 +358,227 @@ func (s *Server) handleRun(w http.ResponseWriter, req *http.Request) { w.Write(data) } +// handleExec handles requests to run a command inside a container. +func (s *Server) handleExec(w http.ResponseWriter, req *http.Request) { + u, err := url.ParseRequestURI(req.RequestURI) + if err != nil { + s.error(w, err) + return + } + podNamespace, podID, uid, container, err := parseContainerCoordinates(u.Path) + if err != nil { + http.Error(w, err.Error(), http.StatusBadRequest) + return + } + pod, ok := s.host.GetPodByName(podNamespace, podID) + if !ok { + http.Error(w, "Pod does not exist", http.StatusNotFound) + return + } + + req.ParseForm() + // start at 1 for error stream + expectedStreams := 1 + if req.FormValue(api.ExecStdinParam) == "1" { + expectedStreams++ + } + if req.FormValue(api.ExecStdoutParam) == "1" { + expectedStreams++ + } + tty := req.FormValue(api.ExecTTYParam) == "1" + if !tty && req.FormValue(api.ExecStderrParam) == "1" { + expectedStreams++ + } + + if expectedStreams == 1 { + http.Error(w, "You must specify at least 1 of stdin, stdout, stderr", http.StatusBadRequest) + return + } + + streamCh := make(chan httpstream.Stream) + + upgrader := spdy.NewResponseUpgrader() + conn := upgrader.UpgradeResponse(w, req, func(stream httpstream.Stream) error { + streamCh <- stream + return nil + }) + // from this point on, we can no longer call methods on w + if conn == nil { + // The upgrader is responsible for notifying the client of any errors that + // occurred during upgrading. All we can do is return here at this point + // if we weren't successful in upgrading. + return + } + defer conn.Close() + + conn.SetIdleTimeout(s.host.StreamingConnectionIdleTimeout()) + + // TODO find a good default timeout value + // TODO make it configurable? + expired := time.NewTimer(2 * time.Second) + + var errorStream, stdinStream, stdoutStream, stderrStream httpstream.Stream + receivedStreams := 0 +WaitForStreams: + for { + select { + case stream := <-streamCh: + streamType := stream.Headers().Get(api.StreamType) + switch streamType { + case api.StreamTypeError: + errorStream = stream + defer errorStream.Reset() + receivedStreams++ + case api.StreamTypeStdin: + stdinStream = stream + receivedStreams++ + case api.StreamTypeStdout: + stdoutStream = stream + receivedStreams++ + case api.StreamTypeStderr: + stderrStream = stream + receivedStreams++ + default: + glog.Errorf("Unexpected stream type: '%s'", streamType) + } + if receivedStreams == expectedStreams { + break WaitForStreams + } + case <-expired.C: + // TODO find a way to return the error to the user. Maybe use a separate + // stream to report errors? + glog.Error("Timed out waiting for client to create streams") + return + } + } + + if stdinStream != nil { + // close our half of the input stream, since we won't be writing to it + stdinStream.Close() + } + + err = s.host.ExecInContainer(GetPodFullName(pod), uid, container, u.Query()[api.ExecCommandParamm], stdinStream, stdoutStream, stderrStream, tty) + if err != nil { + msg := fmt.Sprintf("Error executing command in container: %v", err) + glog.Error(msg) + errorStream.Write([]byte(msg)) + } +} + +func parsePodCoordinates(path string) (namespace, pod string, uid types.UID, err error) { + parts := strings.Split(path, "/") + + if len(parts) == 4 { + namespace = parts[2] + pod = parts[3] + return + } + + if len(parts) == 5 { + namespace = parts[2] + pod = parts[3] + uid = types.UID(parts[4]) + return + } + + err = fmt.Errorf("Unexpected path %s. Expected /.../...// or /.../...///", path) + return +} + +func (s *Server) handlePortForward(w http.ResponseWriter, req *http.Request) { + u, err := url.ParseRequestURI(req.RequestURI) + if err != nil { + s.error(w, err) + return + } + podNamespace, podID, uid, err := parsePodCoordinates(u.Path) + if err != nil { + http.Error(w, err.Error(), http.StatusBadRequest) + return + } + pod, ok := s.host.GetPodByName(podNamespace, podID) + if !ok { + http.Error(w, "Pod does not exist", http.StatusNotFound) + return + } + + streamChan := make(chan httpstream.Stream, 1) + upgrader := spdy.NewResponseUpgrader() + conn := upgrader.UpgradeResponse(w, req, func(stream httpstream.Stream) error { + portString := stream.Headers().Get(api.PortHeader) + port, err := strconv.ParseUint(portString, 10, 16) + if err != nil { + return fmt.Errorf("Unable to parse '%s' as a port: %v", portString, err) + } + if port < 1 { + return fmt.Errorf("Port '%d' must be greater than 0", port) + } + streamChan <- stream + return nil + }) + if conn == nil { + return + } + defer conn.Close() + conn.SetIdleTimeout(s.host.StreamingConnectionIdleTimeout()) + + var dataStreamLock sync.Mutex + dataStreamChans := make(map[string]chan httpstream.Stream) + +Loop: + for { + select { + case <-conn.CloseChan(): + break Loop + case stream := <-streamChan: + streamType := stream.Headers().Get(api.StreamType) + port := stream.Headers().Get(api.PortHeader) + dataStreamLock.Lock() + switch streamType { + case "error": + ch := make(chan httpstream.Stream) + dataStreamChans[port] = ch + go waitForPortForwardDataStreamAndRun(GetPodFullName(pod), uid, stream, ch, s.host) + case "data": + ch, ok := dataStreamChans[port] + if ok { + ch <- stream + delete(dataStreamChans, port) + } else { + glog.Errorf("Unable to locate data stream channel for port %s", port) + } + default: + glog.Errorf("streamType header must be 'error' or 'data', got: '%s'", streamType) + stream.Reset() + } + dataStreamLock.Unlock() + } + } +} + +func waitForPortForwardDataStreamAndRun(pod string, uid types.UID, errorStream httpstream.Stream, dataStreamChan chan httpstream.Stream, host HostInterface) { + defer errorStream.Reset() + + var dataStream httpstream.Stream + + select { + case dataStream = <-dataStreamChan: + case <-time.After(1 * time.Second): + errorStream.Write([]byte("Timed out waiting for data stream")) + //TODO delete from dataStreamChans[port] + return + } + + portString := dataStream.Headers().Get(api.PortHeader) + port, _ := strconv.ParseUint(portString, 10, 16) + err := host.PortForward(pod, uid, uint16(port), dataStream) + if err != nil { + msg := fmt.Errorf("Error forwarding port %d to pod %s, uid %v: %v", port, pod, uid, err) + glog.Error(msg) + errorStream.Write([]byte(msg.Error())) + } +} + // ServeHTTP responds to HTTP requests on the Kubelet. func (s *Server) ServeHTTP(w http.ResponseWriter, req *http.Request) { defer httplog.NewLogged(req, &w).StacktraceWhen( @@ -347,6 +587,7 @@ func (s *Server) ServeHTTP(w http.ResponseWriter, req *http.Request) { http.StatusMovedPermanently, http.StatusTemporaryRedirect, http.StatusNotFound, + http.StatusSwitchingProtocols, ), ).Log() s.mux.ServeHTTP(w, req) diff --git a/pkg/kubelet/server/server.go b/pkg/kubelet/server/server.go index 866ddeb14b9..0d35b789979 100644 --- a/pkg/kubelet/server/server.go +++ b/pkg/kubelet/server/server.go @@ -46,35 +46,36 @@ const defaultRootDir = "/var/lib/kubelet" // KubeletServer encapsulates all of the parameters necessary for starting up // a kubelet. These can either be set via command line or directly. type KubeletServer struct { - Config string - SyncFrequency time.Duration - FileCheckFrequency time.Duration - HTTPCheckFrequency time.Duration - ManifestURL string - EnableServer bool - Address util.IP - Port uint - HostnameOverride string - PodInfraContainerImage string - DockerEndpoint string - EtcdServerList util.StringList - EtcdConfigFile string - RootDirectory string - AllowPrivileged bool - RegistryPullQPS float64 - RegistryBurst int - RunOnce bool - EnableDebuggingHandlers bool - MinimumGCAge time.Duration - MaxContainerCount int - AuthPath string - CAdvisorPort uint - OOMScoreAdj int - APIServerList util.StringList - ClusterDomain string - MasterServiceNamespace string - ClusterDNS util.IP - ReallyCrashForTesting bool + Config string + SyncFrequency time.Duration + FileCheckFrequency time.Duration + HTTPCheckFrequency time.Duration + ManifestURL string + EnableServer bool + Address util.IP + Port uint + HostnameOverride string + PodInfraContainerImage string + DockerEndpoint string + EtcdServerList util.StringList + EtcdConfigFile string + RootDirectory string + AllowPrivileged bool + RegistryPullQPS float64 + RegistryBurst int + RunOnce bool + EnableDebuggingHandlers bool + MinimumGCAge time.Duration + MaxContainerCount int + AuthPath string + CAdvisorPort uint + OOMScoreAdj int + APIServerList util.StringList + ClusterDomain string + MasterServiceNamespace string + ClusterDNS util.IP + ReallyCrashForTesting bool + StreamingConnectionIdleTimeout time.Duration } // NewKubeletServer will create a new KubeletServer with default values. @@ -149,6 +150,7 @@ func (s *KubeletServer) AddFlags(fs *pflag.FlagSet) { fs.StringVar(&s.MasterServiceNamespace, "master_service_namespace", s.MasterServiceNamespace, "The namespace from which the kubernetes master services should be injected into pods") fs.Var(&s.ClusterDNS, "cluster_dns", "IP address for a cluster DNS server. If set, kubelet will configure all containers to use this for DNS resolution in addition to the host's DNS servers") fs.BoolVar(&s.ReallyCrashForTesting, "really_crash_for_testing", s.ReallyCrashForTesting, "If true, crash with panics more often.") + fs.DurationVar(&s.StreamingConnectionIdleTimeout, "streaming_connection_idle_timeout", 0, "Maximum time a streaming connection can be idle before the connection is automatically closed. Example: '5m'") } // Run runs the specified KubeletServer. This should never exit. @@ -184,32 +186,33 @@ func (s *KubeletServer) Run(_ []string) error { credentialprovider.SetPreferredDockercfgPath(s.RootDirectory) kcfg := KubeletConfig{ - Address: s.Address, - AllowPrivileged: s.AllowPrivileged, - HostnameOverride: s.HostnameOverride, - RootDirectory: s.RootDirectory, - ConfigFile: s.Config, - ManifestURL: s.ManifestURL, - FileCheckFrequency: s.FileCheckFrequency, - HTTPCheckFrequency: s.HTTPCheckFrequency, - PodInfraContainerImage: s.PodInfraContainerImage, - SyncFrequency: s.SyncFrequency, - RegistryPullQPS: s.RegistryPullQPS, - RegistryBurst: s.RegistryBurst, - MinimumGCAge: s.MinimumGCAge, - MaxContainerCount: s.MaxContainerCount, - ClusterDomain: s.ClusterDomain, - ClusterDNS: s.ClusterDNS, - Runonce: s.RunOnce, - Port: s.Port, - CAdvisorPort: s.CAdvisorPort, - EnableServer: s.EnableServer, - EnableDebuggingHandlers: s.EnableDebuggingHandlers, - DockerClient: dockertools.ConnectToDockerOrDie(s.DockerEndpoint), - KubeClient: client, - EtcdClient: kubelet.EtcdClientOrDie(s.EtcdServerList, s.EtcdConfigFile), - MasterServiceNamespace: s.MasterServiceNamespace, - VolumePlugins: ProbeVolumePlugins(), + Address: s.Address, + AllowPrivileged: s.AllowPrivileged, + HostnameOverride: s.HostnameOverride, + RootDirectory: s.RootDirectory, + ConfigFile: s.Config, + ManifestURL: s.ManifestURL, + FileCheckFrequency: s.FileCheckFrequency, + HTTPCheckFrequency: s.HTTPCheckFrequency, + PodInfraContainerImage: s.PodInfraContainerImage, + SyncFrequency: s.SyncFrequency, + RegistryPullQPS: s.RegistryPullQPS, + RegistryBurst: s.RegistryBurst, + MinimumGCAge: s.MinimumGCAge, + MaxContainerCount: s.MaxContainerCount, + ClusterDomain: s.ClusterDomain, + ClusterDNS: s.ClusterDNS, + Runonce: s.RunOnce, + Port: s.Port, + CAdvisorPort: s.CAdvisorPort, + EnableServer: s.EnableServer, + EnableDebuggingHandlers: s.EnableDebuggingHandlers, + DockerClient: dockertools.ConnectToDockerOrDie(s.DockerEndpoint), + KubeClient: client, + EtcdClient: kubelet.EtcdClientOrDie(s.EtcdServerList, s.EtcdConfigFile), + MasterServiceNamespace: s.MasterServiceNamespace, + VolumePlugins: ProbeVolumePlugins(), + StreamingConnectionIdleTimeout: s.StreamingConnectionIdleTimeout, } RunKubelet(&kcfg) @@ -368,33 +371,34 @@ func makePodSourceConfig(kc *KubeletConfig) *config.PodConfig { // KubeletConfig is all of the parameters necessary for running a kubelet. // TODO: This should probably be merged with KubeletServer. The extra object is a consequence of refactoring. type KubeletConfig struct { - EtcdClient tools.EtcdClient - KubeClient *client.Client - DockerClient dockertools.DockerInterface - CAdvisorPort uint - Address util.IP - AllowPrivileged bool - HostnameOverride string - RootDirectory string - ConfigFile string - ManifestURL string - FileCheckFrequency time.Duration - HTTPCheckFrequency time.Duration - Hostname string - PodInfraContainerImage string - SyncFrequency time.Duration - RegistryPullQPS float64 - RegistryBurst int - MinimumGCAge time.Duration - MaxContainerCount int - ClusterDomain string - ClusterDNS util.IP - EnableServer bool - EnableDebuggingHandlers bool - Port uint - Runonce bool - MasterServiceNamespace string - VolumePlugins []volume.Plugin + EtcdClient tools.EtcdClient + KubeClient *client.Client + DockerClient dockertools.DockerInterface + CAdvisorPort uint + Address util.IP + AllowPrivileged bool + HostnameOverride string + RootDirectory string + ConfigFile string + ManifestURL string + FileCheckFrequency time.Duration + HTTPCheckFrequency time.Duration + Hostname string + PodInfraContainerImage string + SyncFrequency time.Duration + RegistryPullQPS float64 + RegistryBurst int + MinimumGCAge time.Duration + MaxContainerCount int + ClusterDomain string + ClusterDNS util.IP + EnableServer bool + EnableDebuggingHandlers bool + Port uint + Runonce bool + MasterServiceNamespace string + VolumePlugins []volume.Plugin + StreamingConnectionIdleTimeout time.Duration } func createAndInitKubelet(kc *KubeletConfig, pc *config.PodConfig) (*kubelet.Kubelet, error) { @@ -417,7 +421,8 @@ func createAndInitKubelet(kc *KubeletConfig, pc *config.PodConfig) (*kubelet.Kub kc.ClusterDomain, net.IP(kc.ClusterDNS), kc.MasterServiceNamespace, - kc.VolumePlugins) + kc.VolumePlugins, + kc.StreamingConnectionIdleTimeout) if err != nil { return nil, err diff --git a/pkg/kubelet/server_test.go b/pkg/kubelet/server_test.go index 48625df2548..ef1822f6707 100644 --- a/pkg/kubelet/server_test.go +++ b/pkg/kubelet/server_test.go @@ -25,25 +25,32 @@ import ( "net/http/httptest" "net/http/httputil" "reflect" + "strconv" "strings" "testing" + "time" "github.com/GoogleCloudPlatform/kubernetes/pkg/api" "github.com/GoogleCloudPlatform/kubernetes/pkg/types" + "github.com/GoogleCloudPlatform/kubernetes/pkg/util/httpstream" + "github.com/GoogleCloudPlatform/kubernetes/pkg/util/httpstream/spdy" "github.com/google/cadvisor/info" ) type fakeKubelet struct { - podByNameFunc func(namespace, name string) (*api.BoundPod, bool) - statusFunc func(name string) (api.PodStatus, error) - containerInfoFunc func(podFullName string, uid types.UID, containerName string, req *info.ContainerInfoRequest) (*info.ContainerInfo, error) - rootInfoFunc func(query *info.ContainerInfoRequest) (*info.ContainerInfo, error) - machineInfoFunc func() (*info.MachineInfo, error) - boundPodsFunc func() ([]api.BoundPod, error) - logFunc func(w http.ResponseWriter, req *http.Request) - runFunc func(podFullName string, uid types.UID, containerName string, cmd []string) ([]byte, error) - dockerVersionFunc func() ([]uint, error) - containerLogsFunc func(podFullName, containerName, tail string, follow bool, stdout, stderr io.Writer) error + podByNameFunc func(namespace, name string) (*api.BoundPod, bool) + statusFunc func(name string) (api.PodStatus, error) + containerInfoFunc func(podFullName string, uid types.UID, containerName string, req *info.ContainerInfoRequest) (*info.ContainerInfo, error) + rootInfoFunc func(query *info.ContainerInfoRequest) (*info.ContainerInfo, error) + machineInfoFunc func() (*info.MachineInfo, error) + boundPodsFunc func() ([]api.BoundPod, error) + logFunc func(w http.ResponseWriter, req *http.Request) + runFunc func(podFullName string, uid types.UID, containerName string, cmd []string) ([]byte, error) + dockerVersionFunc func() ([]uint, error) + execFunc func(pod string, uid types.UID, container string, cmd []string, in io.Reader, out, err io.WriteCloser, tty bool) error + portForwardFunc func(name string, uid types.UID, port uint16, stream io.ReadWriteCloser) error + containerLogsFunc func(podFullName, containerName, tail string, follow bool, stdout, stderr io.Writer) error + streamingConnectionIdleTimeoutFunc func() time.Duration } func (fk *fakeKubelet) GetPodByName(namespace, name string) (*api.BoundPod, bool) { @@ -86,6 +93,18 @@ func (fk *fakeKubelet) RunInContainer(podFullName string, uid types.UID, contain return fk.runFunc(podFullName, uid, containerName, cmd) } +func (fk *fakeKubelet) ExecInContainer(name string, uid types.UID, container string, cmd []string, in io.Reader, out, err io.WriteCloser, tty bool) error { + return fk.execFunc(name, uid, container, cmd, in, out, err, tty) +} + +func (fk *fakeKubelet) PortForward(name string, uid types.UID, port uint16, stream io.ReadWriteCloser) error { + return fk.portForwardFunc(name, uid, port, stream) +} + +func (fk *fakeKubelet) StreamingConnectionIdleTimeout() time.Duration { + return fk.streamingConnectionIdleTimeoutFunc() +} + type serverTestFramework struct { updateChan chan interface{} updateReader *channelReader @@ -542,3 +561,503 @@ func TestContainerLogsWithFollow(t *testing.T) { t.Errorf("Expected: '%v', got: '%v'", output, result) } } + +func TestServeExecInContainerIdleTimeout(t *testing.T) { + fw := newServerTest() + + fw.fakeKubelet.streamingConnectionIdleTimeoutFunc = func() time.Duration { + return 100 * time.Millisecond + } + + idleSuccess := make(chan struct{}) + + fw.fakeKubelet.execFunc = func(podFullName string, uid types.UID, containerName string, cmd []string, in io.Reader, out, stderr io.WriteCloser, tty bool) error { + select { + case <-idleSuccess: + case <-time.After(150 * time.Millisecond): + t.Fatalf("execFunc timed out waiting for idle timeout") + } + return nil + } + + podNamespace := "other" + podName := "foo" + expectedContainerName := "baz" + + url := fw.testHTTPServer.URL + "/exec/" + podNamespace + "/" + podName + "/" + expectedContainerName + "?c=ls&c=-a&" + api.ExecStdinParam + "=1" + + upgradeRoundTripper := spdy.NewRoundTripper(nil) + c := &http.Client{Transport: upgradeRoundTripper} + + resp, err := c.Get(url) + if err != nil { + t.Fatalf("Got error GETing: %v", err) + } + defer resp.Body.Close() + + conn, err := upgradeRoundTripper.NewConnection(resp) + if err != nil { + t.Fatalf("Unexpected error creating streaming connection: %s", err) + } + if conn == nil { + t.Fatal("Unexpected nil connection") + } + defer conn.Close() + + h := http.Header{} + h.Set("type", "input") + stream, err := conn.CreateStream(h) + if err != nil { + t.Fatalf("error creating input stream: %v", err) + } + defer stream.Reset() + + select { + case <-conn.CloseChan(): + close(idleSuccess) + case <-time.After(150 * time.Millisecond): + t.Fatalf("Timed out waiting for connection closure due to idle timeout") + } +} + +func TestServeExecInContainer(t *testing.T) { + tests := []struct { + stdin bool + stdout bool + stderr bool + tty bool + responseStatusCode int + uid bool + }{ + {responseStatusCode: http.StatusBadRequest}, + {stdin: true, responseStatusCode: http.StatusSwitchingProtocols}, + {stdout: true, responseStatusCode: http.StatusSwitchingProtocols}, + {stderr: true, responseStatusCode: http.StatusSwitchingProtocols}, + {stdout: true, stderr: true, responseStatusCode: http.StatusSwitchingProtocols}, + {stdout: true, stderr: true, tty: true, responseStatusCode: http.StatusSwitchingProtocols}, + {stdin: true, stdout: true, stderr: true, responseStatusCode: http.StatusSwitchingProtocols}, + } + + for i, test := range tests { + fw := newServerTest() + + fw.fakeKubelet.streamingConnectionIdleTimeoutFunc = func() time.Duration { + return 0 + } + + podNamespace := "other" + podName := "foo" + expectedPodName := podName + "." + podNamespace + ".etcd" + expectedUid := "9b01b80f-8fb4-11e4-95ab-4200af06647" + expectedContainerName := "baz" + expectedCommand := "ls -a" + expectedStdin := "stdin" + expectedStdout := "stdout" + expectedStderr := "stderr" + execFuncDone := make(chan struct{}) + clientStdoutReadDone := make(chan struct{}) + clientStderrReadDone := make(chan struct{}) + + fw.fakeKubelet.execFunc = func(podFullName string, uid types.UID, containerName string, cmd []string, in io.Reader, out, stderr io.WriteCloser, tty bool) error { + defer close(execFuncDone) + if podFullName != expectedPodName { + t.Fatalf("%d: podFullName: expected %s, got %s", i, expectedPodName, podFullName) + } + if test.uid && string(uid) != expectedUid { + t.Fatalf("%d: uid: expected %v, got %v", i, expectedUid, uid) + } + if containerName != expectedContainerName { + t.Fatalf("%d: containerName: expected %s, got %s", i, expectedContainerName, containerName) + } + if strings.Join(cmd, " ") != expectedCommand { + t.Fatalf("%d: cmd: expected: %s, got %v", i, expectedCommand, cmd) + } + + if test.stdin { + if in == nil { + t.Fatalf("%d: stdin: expected non-nil", i) + } + b := make([]byte, 10) + n, err := in.Read(b) + if err != nil { + t.Fatalf("%d: error reading from stdin: %v", i, err) + } + if e, a := expectedStdin, string(b[0:n]); e != a { + t.Fatalf("%d: stdin: expected to read %v, got %v", i, e, a) + } + } else if in != nil { + t.Fatalf("%d: stdin: expected nil: %#v", i, in) + } + + if test.stdout { + if out == nil { + t.Fatalf("%d: stdout: expected non-nil", i) + } + _, err := out.Write([]byte(expectedStdout)) + if err != nil { + t.Fatalf("%d:, error writing to stdout: %v", i, err) + } + out.Close() + select { + case <-clientStdoutReadDone: + case <-time.After(10 * time.Millisecond): + t.Fatalf("%d: timed out waiting for client to read stdout", i) + } + } else if out != nil { + t.Fatalf("%d: stdout: expected nil: %#v", i, out) + } + + if tty { + if stderr != nil { + t.Fatalf("%d: tty set but received non-nil stderr: %v", i, stderr) + } + } else if test.stderr { + if stderr == nil { + t.Fatalf("%d: stderr: expected non-nil", i) + } + _, err := stderr.Write([]byte(expectedStderr)) + if err != nil { + t.Fatalf("%d:, error writing to stderr: %v", i, err) + } + stderr.Close() + select { + case <-clientStderrReadDone: + case <-time.After(10 * time.Millisecond): + t.Fatalf("%d: timed out waiting for client to read stderr", i) + } + } else if stderr != nil { + t.Fatalf("%d: stderr: expected nil: %#v", i, stderr) + } + + return nil + } + + var url string + if test.uid { + url = fw.testHTTPServer.URL + "/exec/" + podNamespace + "/" + podName + "/" + expectedUid + "/" + expectedContainerName + "?command=ls&command=-a" + } else { + url = fw.testHTTPServer.URL + "/exec/" + podNamespace + "/" + podName + "/" + expectedContainerName + "?command=ls&command=-a" + } + if test.stdin { + url += "&" + api.ExecStdinParam + "=1" + } + if test.stdout { + url += "&" + api.ExecStdoutParam + "=1" + } + if test.stderr && !test.tty { + url += "&" + api.ExecStderrParam + "=1" + } + if test.tty { + url += "&" + api.ExecTTYParam + "=1" + } + + var ( + resp *http.Response + err error + upgradeRoundTripper httpstream.UpgradeRoundTripper + c *http.Client + ) + + if test.responseStatusCode != http.StatusSwitchingProtocols { + c = &http.Client{} + } else { + upgradeRoundTripper = spdy.NewRoundTripper(nil) + c = &http.Client{Transport: upgradeRoundTripper} + } + + resp, err = c.Get(url) + if err != nil { + t.Fatalf("%d: Got error GETing: %v", i, err) + } + defer resp.Body.Close() + + _, err = ioutil.ReadAll(resp.Body) + if err != nil { + t.Errorf("%d: Error reading response body: %v", i, err) + } + + if e, a := test.responseStatusCode, resp.StatusCode; e != a { + t.Fatalf("%d: response status: expected %v, got %v", e, a) + } + + if test.responseStatusCode != http.StatusSwitchingProtocols { + continue + } + + conn, err := upgradeRoundTripper.NewConnection(resp) + if err != nil { + t.Fatalf("Unexpected error creating streaming connection: %s", err) + } + if conn == nil { + t.Fatalf("%d: unexpected nil conn", i) + } + defer conn.Close() + + h := http.Header{} + h.Set(api.StreamType, api.StreamTypeError) + errorStream, err := conn.CreateStream(h) + if err != nil { + t.Fatalf("%d: error creating error stream: %v", i, err) + } + defer errorStream.Reset() + + if test.stdin { + h.Set(api.StreamType, api.StreamTypeStdin) + stream, err := conn.CreateStream(h) + if err != nil { + t.Fatalf("%d: error creating stdin stream: %v", i, err) + } + defer stream.Reset() + _, err = stream.Write([]byte(expectedStdin)) + if err != nil { + t.Fatalf("%d: error writing to stdin stream: %v", i, err) + } + } + + var stdoutStream httpstream.Stream + if test.stdout { + h.Set(api.StreamType, api.StreamTypeStdout) + stdoutStream, err = conn.CreateStream(h) + if err != nil { + t.Fatalf("%d: error creating stdout stream: %v", i, err) + } + defer stdoutStream.Reset() + } + + var stderrStream httpstream.Stream + if test.stderr && !test.tty { + h.Set(api.StreamType, api.StreamTypeStderr) + stderrStream, err = conn.CreateStream(h) + if err != nil { + t.Fatalf("%d: error creating stderr stream: %v", i, err) + } + defer stderrStream.Reset() + } + + if test.stdout { + output := make([]byte, 10) + n, err := stdoutStream.Read(output) + close(clientStdoutReadDone) + if err != nil { + t.Fatalf("%d: error reading from stdout stream: %v", i, err) + } + if e, a := expectedStdout, string(output[0:n]); e != a { + t.Fatalf("%d: stdout: expected '%v', got '%v'", i, e, a) + } + } + + if test.stderr && !test.tty { + output := make([]byte, 10) + n, err := stderrStream.Read(output) + close(clientStderrReadDone) + if err != nil { + t.Fatalf("%d: error reading from stderr stream: %v", i, err) + } + if e, a := expectedStderr, string(output[0:n]); e != a { + t.Fatalf("%d: stderr: expected '%v', got '%v'", i, e, a) + } + } + + select { + case <-execFuncDone: + case <-time.After(10 * time.Millisecond): + t.Fatalf("%d: timed out waiting for execFunc to complete", i) + } + } +} + +func TestServePortForwardIdleTimeout(t *testing.T) { + fw := newServerTest() + + fw.fakeKubelet.streamingConnectionIdleTimeoutFunc = func() time.Duration { + return 100 * time.Millisecond + } + + idleSuccess := make(chan struct{}) + + fw.fakeKubelet.portForwardFunc = func(name string, uid types.UID, port uint16, stream io.ReadWriteCloser) error { + select { + case <-idleSuccess: + case <-time.After(150 * time.Millisecond): + t.Fatalf("execFunc timed out waiting for idle timeout") + } + return nil + } + + podNamespace := "other" + podName := "foo" + + url := fw.testHTTPServer.URL + "/portForward/" + podNamespace + "/" + podName + + upgradeRoundTripper := spdy.NewRoundTripper(nil) + c := &http.Client{Transport: upgradeRoundTripper} + + resp, err := c.Get(url) + if err != nil { + t.Fatalf("Got error GETing: %v", err) + } + defer resp.Body.Close() + + conn, err := upgradeRoundTripper.NewConnection(resp) + if err != nil { + t.Fatalf("Unexpected error creating streaming connection: %s", err) + } + if conn == nil { + t.Fatal("Unexpected nil connection") + } + defer conn.Close() + + select { + case <-conn.CloseChan(): + close(idleSuccess) + case <-time.After(150 * time.Millisecond): + t.Fatalf("Timed out waiting for connection closure due to idle timeout") + } +} + +func TestServePortForward(t *testing.T) { + tests := []struct { + port string + uid bool + clientData string + containerData string + shouldError bool + }{ + {port: "", shouldError: true}, + {port: "abc", shouldError: true}, + {port: "-1", shouldError: true}, + {port: "65536", shouldError: true}, + {port: "0", shouldError: true}, + {port: "1", shouldError: false}, + {port: "8000", shouldError: false}, + {port: "8000", clientData: "client data", containerData: "container data", shouldError: false}, + {port: "65535", shouldError: false}, + {port: "65535", uid: true, shouldError: false}, + } + + podNamespace := "other" + podName := "foo" + expectedPodName := podName + "." + podNamespace + ".etcd" + expectedUid := "9b01b80f-8fb4-11e4-95ab-4200af06647" + + for i, test := range tests { + fw := newServerTest() + + fw.fakeKubelet.streamingConnectionIdleTimeoutFunc = func() time.Duration { + return 0 + } + + portForwardFuncDone := make(chan struct{}) + + fw.fakeKubelet.portForwardFunc = func(name string, uid types.UID, port uint16, stream io.ReadWriteCloser) error { + defer close(portForwardFuncDone) + + if e, a := expectedPodName, name; e != a { + t.Fatalf("%d: pod name: expected '%v', got '%v'", i, e, a) + } + + if e, a := expectedUid, uid; test.uid && e != string(a) { + t.Fatalf("%d: uid: expected '%v', got '%v'", i, e, a) + } + + p, err := strconv.ParseUint(test.port, 10, 16) + if err != nil { + t.Fatalf("%d: error parsing port string '%s': %v", i, port, err) + } + if e, a := uint16(p), port; e != a { + t.Fatalf("%d: port: expected '%v', got '%v'", i, e, a) + } + + if test.clientData != "" { + fromClient := make([]byte, 32) + n, err := stream.Read(fromClient) + if err != nil { + t.Fatalf("%d: error reading client data: %v", i, err) + } + if e, a := test.clientData, string(fromClient[0:n]); e != a { + t.Fatalf("%d: client data: expected to receive '%v', got '%v'", i, e, a) + } + } + + if test.containerData != "" { + _, err := stream.Write([]byte(test.containerData)) + if err != nil { + t.Fatalf("%d: error writing container data: %v", i, err) + } + } + + return nil + } + + var url string + if test.uid { + url = fmt.Sprintf("%s/portForward/%s/%s/%s", fw.testHTTPServer.URL, podNamespace, podName, expectedUid) + } else { + url = fmt.Sprintf("%s/portForward/%s/%s", fw.testHTTPServer.URL, podNamespace, podName) + } + + upgradeRoundTripper := spdy.NewRoundTripper(nil) + c := &http.Client{Transport: upgradeRoundTripper} + + resp, err := c.Get(url) + if err != nil { + t.Fatalf("%d: Got error GETing: %v", i, err) + } + defer resp.Body.Close() + + conn, err := upgradeRoundTripper.NewConnection(resp) + if err != nil { + t.Fatalf("Unexpected error creating streaming connection: %s", err) + } + if conn == nil { + t.Fatal("%d: Unexpected nil connection", i) + } + defer conn.Close() + + headers := http.Header{} + headers.Set("streamType", "error") + headers.Set("port", test.port) + errorStream, err := conn.CreateStream(headers) + _ = errorStream + haveErr := err != nil + if e, a := test.shouldError, haveErr; e != a { + t.Fatalf("%d: create stream: expected err=%t, got %t: %v", i, e, a, err) + } + + if test.shouldError { + continue + } + + headers.Set("streamType", "data") + headers.Set("port", test.port) + dataStream, err := conn.CreateStream(headers) + haveErr = err != nil + if e, a := test.shouldError, haveErr; e != a { + t.Fatalf("%d: create stream: expected err=%t, got %t: %v", i, e, a, err) + } + + if test.clientData != "" { + _, err := dataStream.Write([]byte(test.clientData)) + if err != nil { + t.Fatalf("%d: unexpected error writing client data: %v", i, err) + } + } + + if test.containerData != "" { + fromContainer := make([]byte, 32) + n, err := dataStream.Read(fromContainer) + if err != nil { + t.Fatalf("%d: unexpected error reading container data: %v", i, err) + } + if e, a := test.containerData, string(fromContainer[0:n]); e != a { + t.Fatalf("%d: expected to receive '%v' from container, got '%v'", i, e, a) + } + } + + select { + case <-portForwardFuncDone: + case <-time.After(100 * time.Millisecond): + t.Fatalf("%d: timed out waiting for portForwardFuncDone", i) + } + } +} diff --git a/pkg/util/httpstream/doc.go b/pkg/util/httpstream/doc.go new file mode 100644 index 00000000000..95105e1ccaa --- /dev/null +++ b/pkg/util/httpstream/doc.go @@ -0,0 +1,19 @@ +/* +Copyright 2015 Google Inc. All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package httpstream adds multiplexed streaming support to HTTP requests and +// responses via connection upgrades. +package httpstream diff --git a/pkg/util/httpstream/httpstream.go b/pkg/util/httpstream/httpstream.go new file mode 100644 index 00000000000..6568a13a7d9 --- /dev/null +++ b/pkg/util/httpstream/httpstream.go @@ -0,0 +1,80 @@ +/* +Copyright 2015 Google Inc. All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package httpstream + +import ( + "io" + "net/http" + "time" +) + +const ( + HeaderConnection = "Connection" + HeaderUpgrade = "Upgrade" +) + +// NewStreamHandler defines a function that is called when a new Stream is +// received. If no error is returned, the Stream is accepted; otherwise, +// the stream is rejected. +type NewStreamHandler func(Stream) error + +// NoOpNewStreamHandler is a stream handler that accepts a new stream and +// performs no other logic. +func NoOpNewStreamHandler(stream Stream) error { return nil } + +// UpgradeRoundTripper is a type of http.RoundTripper that is able to upgrade +// HTTP requests to support multiplexed bidirectional streams. After RoundTrip() +// is invoked, if the upgrade is successful, clients may retrieve the upgraded +// connection by calling UpgradeRoundTripper.Connection(). +type UpgradeRoundTripper interface { + http.RoundTripper + // NewConnection validates the response and creates a new Connection. + NewConnection(resp *http.Response) (Connection, error) +} + +// ResponseUpgrader knows how to upgrade HTTP requests and responses to +// add streaming support to them. +type ResponseUpgrader interface { + // UpgradeResponse upgrades an HTTP response to one that supports multiplexed + // streams. newStreamHandler will be called synchronously whenever the + // other end of the upgraded connection creates a new stream. + UpgradeResponse(w http.ResponseWriter, req *http.Request, newStreamHandler NewStreamHandler) Connection +} + +// Connection represents an upgraded HTTP connection. +type Connection interface { + // CreateStream creates a new Stream with the supplied headers. + CreateStream(headers http.Header) (Stream, error) + // Close resets all streams and closes the connection. + Close() error + // CloseChan returns a channel that is closed when the underlying connection is closed. + CloseChan() <-chan bool + // SetIdleTimeout sets the amount of time the connection may remain idle before + // it is automatically closed. + SetIdleTimeout(timeout time.Duration) +} + +// Stream represents a bidirectional communications channel that is part of an +// upgraded connection. +type Stream interface { + io.ReadWriteCloser + // Reset closes both directions of the stream, indicating that neither client + // or server can use it any more. + Reset() error + // Headers returns the headers used to create the stream. + Headers() http.Header +} diff --git a/pkg/util/httpstream/spdy/connection.go b/pkg/util/httpstream/spdy/connection.go new file mode 100644 index 00000000000..46792427891 --- /dev/null +++ b/pkg/util/httpstream/spdy/connection.go @@ -0,0 +1,139 @@ +/* +Copyright 2015 Google Inc. All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package spdy + +import ( + "net" + "net/http" + "sync" + "time" + + "github.com/GoogleCloudPlatform/kubernetes/pkg/util/httpstream" + "github.com/docker/spdystream" + "github.com/golang/glog" +) + +// connection maintains state about a spdystream.Connection and its associated +// streams. +type connection struct { + conn *spdystream.Connection + streams []httpstream.Stream + streamLock sync.Mutex + newStreamHandler httpstream.NewStreamHandler +} + +// NewClientConnection creates a new SPDY client connection. +func NewClientConnection(conn net.Conn) (httpstream.Connection, error) { + spdyConn, err := spdystream.NewConnection(conn, false) + if err != nil { + defer conn.Close() + return nil, err + } + + return newConnection(spdyConn, httpstream.NoOpNewStreamHandler), nil +} + +// NewServerConnection creates a new SPDY server connection. newStreamHandler +// will be invoked when the server receives a newly created stream from the +// client. +func NewServerConnection(conn net.Conn, newStreamHandler httpstream.NewStreamHandler) (httpstream.Connection, error) { + spdyConn, err := spdystream.NewConnection(conn, true) + if err != nil { + defer conn.Close() + return nil, err + } + + return newConnection(spdyConn, newStreamHandler), nil +} + +// newConnection returns a new connection wrapping conn. newStreamHandler +// will be invoked when the server receives a newly created stream from the +// client. +func newConnection(conn *spdystream.Connection, newStreamHandler httpstream.NewStreamHandler) httpstream.Connection { + c := &connection{conn: conn, newStreamHandler: newStreamHandler} + go conn.Serve(c.newSpdyStream) + return c +} + +// createStreamResponseTimeout indicates how long to wait for the other side to +// acknowledge the new stream before timing out. +const createStreamResponseTimeout = 2 * time.Second + +// Close first sends a reset for all of the connection's streams, and then +// closes the underlying spdystream.Connection. +func (c *connection) Close() error { + c.streamLock.Lock() + for _, s := range c.streams { + s.Reset() + } + c.streams = make([]httpstream.Stream, 0) + c.streamLock.Unlock() + + return c.conn.Close() +} + +// CreateStream creates a new stream with the specified headers and registers +// it with the connection. +func (c *connection) CreateStream(headers http.Header) (httpstream.Stream, error) { + stream, err := c.conn.CreateStream(headers, nil, false) + if err != nil { + return nil, err + } + if err = stream.WaitTimeout(createStreamResponseTimeout); err != nil { + return nil, err + } + + c.registerStream(stream) + return stream, nil +} + +// registerStream adds the stream s to the connection's list of streams that +// it owns. +func (c *connection) registerStream(s httpstream.Stream) { + c.streamLock.Lock() + c.streams = append(c.streams, s) + c.streamLock.Unlock() +} + +// CloseChan returns a channel that, when closed, indicates that the underlying +// spdystream.Connection has been closed. +func (c *connection) CloseChan() <-chan bool { + return c.conn.CloseChan() +} + +// newSpdyStream is the internal new stream handler used by spdystream.Connection.Serve. +// It calls connection's newStreamHandler, giving it the opportunity to accept or reject +// the stream. If newStreamHandler returns an error, the stream is rejected. If not, the +// stream is accepted and registered with the connection. +func (c *connection) newSpdyStream(stream *spdystream.Stream) { + err := c.newStreamHandler(stream) + rejectStream := (err != nil) + if rejectStream { + glog.Warningf("Stream rejected: %v", err) + stream.Reset() + return + } + + c.registerStream(stream) + stream.SendReply(http.Header{}, rejectStream) +} + +// SetIdleTimeout sets the amount of time the connection may remain idle before +// it is automatically closed. +func (c *connection) SetIdleTimeout(timeout time.Duration) { + c.conn.SetIdleTimeout(timeout) +} diff --git a/pkg/util/httpstream/spdy/roundtripper.go b/pkg/util/httpstream/spdy/roundtripper.go new file mode 100644 index 00000000000..acbcc879fe6 --- /dev/null +++ b/pkg/util/httpstream/spdy/roundtripper.go @@ -0,0 +1,130 @@ +/* +Copyright 2015 Google Inc. All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package spdy + +import ( + "bufio" + "crypto/tls" + "fmt" + "io/ioutil" + "net" + "net/http" + "strings" + + "github.com/GoogleCloudPlatform/kubernetes/pkg/util" + "github.com/GoogleCloudPlatform/kubernetes/pkg/util/httpstream" +) + +// SpdyRoundTripper knows how to upgrade an HTTP request to one that supports +// multiplexed streams. After RoundTrip() is invoked, Conn will be set +// and usable. SpdyRoundTripper implements the UpgradeRoundTripper interface. +type SpdyRoundTripper struct { + //tlsConfig holds the TLS configuration settings to use when connecting + //to the remote server. + tlsConfig *tls.Config + + /* TODO according to http://golang.org/pkg/net/http/#RoundTripper, a RoundTripper + must be safe for use by multiple concurrent goroutines. If this is absolutely + necessary, we could keep a map from http.Request to net.Conn. In practice, + a client will create an http.Client, set the transport to a new insteace of + SpdyRoundTripper, and use it a single time, so this hopefully won't be an issue. + */ + // conn is the underlying network connection to the remote server. + conn net.Conn +} + +// NewSpdyRoundTripper creates a new SpdyRoundTripper that will use +// the specified tlsConfig. +func NewRoundTripper(tlsConfig *tls.Config) httpstream.UpgradeRoundTripper { + return &SpdyRoundTripper{tlsConfig: tlsConfig} +} + +// dial dials the host specified by req, using TLS if appropriate. +func (s *SpdyRoundTripper) dial(req *http.Request) (net.Conn, error) { + dialAddr := util.CanonicalAddr(req.URL) + + if req.URL.Scheme == "http" { + return net.Dial("tcp", dialAddr) + } + + // TODO validate the TLSClientConfig is set up? + conn, err := tls.Dial("tcp", dialAddr, s.tlsConfig) + if err != nil { + return nil, err + } + + host, _, err := net.SplitHostPort(dialAddr) + if err != nil { + return nil, err + } + err = conn.VerifyHostname(host) + if err != nil { + return nil, err + } + + return conn, nil +} + +// RoundTrip executes the Request and upgrades it. After a successful upgrade, +// clients may call SpdyRoundTripper.Connection() to retrieve the upgraded +// connection. +func (s *SpdyRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) { + // TODO what's the best way to clone the request? + r := *req + req = &r + req.Header.Add(httpstream.HeaderConnection, httpstream.HeaderUpgrade) + req.Header.Add(httpstream.HeaderUpgrade, HeaderSpdy31) + + conn, err := s.dial(req) + if err != nil { + return nil, err + } + + err = req.Write(conn) + if err != nil { + return nil, err + } + + resp, err := http.ReadResponse(bufio.NewReader(conn), req) + if err != nil { + return nil, err + } + + s.conn = conn + + return resp, nil +} + +// NewConnection validates the upgrade response, creating and returning a new +// httpstream.Connection if there were no errors. +func (s *SpdyRoundTripper) NewConnection(resp *http.Response) (httpstream.Connection, error) { + connectionHeader := strings.ToLower(resp.Header.Get(httpstream.HeaderConnection)) + upgradeHeader := strings.ToLower(resp.Header.Get(httpstream.HeaderUpgrade)) + if !strings.Contains(connectionHeader, strings.ToLower(httpstream.HeaderUpgrade)) || !strings.Contains(upgradeHeader, strings.ToLower(HeaderSpdy31)) { + responseError := "" + responseErrorBytes, err := ioutil.ReadAll(resp.Body) + if err != nil { + responseError = "Unable to read error from server response" + } else { + responseError = string(responseErrorBytes) + } + + return nil, fmt.Errorf("Unable to upgrade connection: %s", responseError) + } + + return NewClientConnection(s.conn) +} diff --git a/pkg/util/httpstream/spdy/roundtripper_test.go b/pkg/util/httpstream/spdy/roundtripper_test.go new file mode 100644 index 00000000000..eff1e423212 --- /dev/null +++ b/pkg/util/httpstream/spdy/roundtripper_test.go @@ -0,0 +1,226 @@ +/* +Copyright 2015 Google Inc. All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package spdy + +import ( + "bytes" + "crypto/rand" + "crypto/rsa" + "crypto/tls" + "crypto/x509" + "crypto/x509/pkix" + "encoding/pem" + "io" + "math/big" + "net" + "net/http" + "net/http/httptest" + "testing" + "time" + + "github.com/GoogleCloudPlatform/kubernetes/pkg/util/httpstream" +) + +func TestRoundTripAndNewConnection(t *testing.T) { + testCases := []struct { + serverConnectionHeader string + serverUpgradeHeader string + useTLS bool + shouldError bool + }{ + { + serverConnectionHeader: "", + serverUpgradeHeader: "", + shouldError: true, + }, + { + serverConnectionHeader: "Upgrade", + serverUpgradeHeader: "", + shouldError: true, + }, + { + serverConnectionHeader: "", + serverUpgradeHeader: "SPDY/3.1", + shouldError: true, + }, + { + serverConnectionHeader: "Upgrade", + serverUpgradeHeader: "SPDY/3.1", + shouldError: false, + }, + { + serverConnectionHeader: "Upgrade", + serverUpgradeHeader: "SPDY/3.1", + useTLS: true, + shouldError: false, + }, + } + + for i, testCase := range testCases { + server := httptest.NewUnstartedServer(http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { + if testCase.shouldError { + if e, a := httpstream.HeaderUpgrade, req.Header.Get(httpstream.HeaderConnection); e != a { + t.Fatalf("%d: Expected connection=upgrade header, got '%s", i, a) + } + + w.Header().Set(httpstream.HeaderConnection, testCase.serverConnectionHeader) + w.Header().Set(httpstream.HeaderUpgrade, testCase.serverUpgradeHeader) + w.WriteHeader(http.StatusSwitchingProtocols) + + return + } + + streamCh := make(chan httpstream.Stream) + + responseUpgrader := NewResponseUpgrader() + spdyConn := responseUpgrader.UpgradeResponse(w, req, func(s httpstream.Stream) error { + streamCh <- s + return nil + }) + if spdyConn == nil { + t.Fatalf("%d: unexpected nil spdyConn", i) + } + defer spdyConn.Close() + + stream := <-streamCh + io.Copy(stream, stream) + })) + + clientTLS := &tls.Config{} + + if testCase.useTLS { + privateKey, err := rsa.GenerateKey(rand.Reader, 2048) + if err != nil { + t.Fatalf("%d: error generating keypair: %s", i, err) + } + + notBefore := time.Now() + notAfter := notBefore.Add(1 * time.Hour) + + template := x509.Certificate{ + SerialNumber: big.NewInt(1), + Subject: pkix.Name{ + Organization: []string{"Localhost Co"}, + }, + NotBefore: notBefore, + NotAfter: notAfter, + KeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature | x509.KeyUsageCertSign, + ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth}, + BasicConstraintsValid: true, + IsCA: true, + } + + host := "127.0.0.1" + if ip := net.ParseIP(host); ip != nil { + template.IPAddresses = append(template.IPAddresses, ip) + } + template.DNSNames = append(template.DNSNames, host) + + derBytes, err := x509.CreateCertificate(rand.Reader, &template, &template, &privateKey.PublicKey, privateKey) + if err != nil { + t.Fatalf("%d: error creating cert: %s", i, err) + } + + cert, err := x509.ParseCertificate(derBytes) + if err != nil { + t.Fatalf("%d: error parsing cert: %s", i, err) + } + + roots := x509.NewCertPool() + roots.AddCert(cert) + server.TLS = &tls.Config{ + RootCAs: roots, + } + clientTLS.RootCAs = roots + + certBuf := bytes.Buffer{} + err = pem.Encode(&certBuf, &pem.Block{Type: "CERTIFICATE", Bytes: cert.Raw}) + if err != nil { + t.Fatalf("%d: error encoding cert: %s", i, err) + } + + keyBuf := bytes.Buffer{} + err = pem.Encode(&keyBuf, &pem.Block{Type: "RSA PRIVATE KEY", Bytes: x509.MarshalPKCS1PrivateKey(privateKey)}) + if err != nil { + t.Fatalf("%d: error encoding key: %s", i, err) + } + + tlsCert, err := tls.X509KeyPair(certBuf.Bytes(), keyBuf.Bytes()) + if err != nil { + t.Fatalf("%d: error calling tls.X509KeyPair: %s", i, err) + } + server.TLS.Certificates = []tls.Certificate{tlsCert} + clientTLS.Certificates = []tls.Certificate{tlsCert} + server.StartTLS() + } else { + server.Start() + } + defer server.Close() + + req, err := http.NewRequest("GET", server.URL, nil) + if err != nil { + t.Fatalf("%d: Error creating request: %s", i, err) + } + + spdyTransport := NewRoundTripper(clientTLS) + client := &http.Client{Transport: spdyTransport} + + resp, err := client.Do(req) + if err != nil { + t.Fatalf("%d: unexpected error from client.Do: %s", i, err) + } + + conn, err := spdyTransport.NewConnection(resp) + haveErr := err != nil + if e, a := testCase.shouldError, haveErr; e != a { + t.Fatalf("%d: shouldError=%t, got %t: %v", i, e, a, err) + } + if testCase.shouldError { + continue + } + defer conn.Close() + + if resp.StatusCode != http.StatusSwitchingProtocols { + t.Fatalf("%d: expected http 101 switching protocols, got %d", i, resp.StatusCode) + } + + stream, err := conn.CreateStream(http.Header{}) + if err != nil { + t.Fatalf("%d: error creating client stream: %s", i, err) + } + + n, err := stream.Write([]byte("hello")) + if err != nil { + t.Fatalf("%d: error writing to stream: %s", i, err) + } + if n != 5 { + t.Fatalf("%d: Expected to write 5 bytes, but actually wrote %d", i, n) + } + + b := make([]byte, 5) + n, err = stream.Read(b) + if err != nil { + t.Fatalf("%d: error reading from stream: %s", i, err) + } + if n != 5 { + t.Fatalf("%d: Expected to read 5 bytes, but actually read %d", i, n) + } + if e, a := "hello", string(b[0:n]); e != a { + t.Fatalf("%d: expected '%s', got '%s'", i, e, a) + } + } +} diff --git a/pkg/util/httpstream/spdy/upgrade.go b/pkg/util/httpstream/spdy/upgrade.go new file mode 100644 index 00000000000..54bb32d4071 --- /dev/null +++ b/pkg/util/httpstream/spdy/upgrade.go @@ -0,0 +1,78 @@ +/* +Copyright 2015 Google Inc. All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package spdy + +import ( + "fmt" + "net/http" + "strings" + + "github.com/GoogleCloudPlatform/kubernetes/pkg/util/httpstream" + "github.com/golang/glog" +) + +const HeaderSpdy31 = "SPDY/3.1" + +// responseUpgrader knows how to upgrade HTTP responses. It +// implements the httpstream.ResponseUpgrader interface. +type responseUpgrader struct { +} + +// NewResponseUpgrader returns a new httpstream.ResponseUpgrader that is +// capable of upgrading HTTP responses using SPDY/3.1 via the +// spdystream package. +func NewResponseUpgrader() httpstream.ResponseUpgrader { + return responseUpgrader{} +} + +// UpgradeResponse upgrades an HTTP response to one that supports multiplexed +// streams. newStreamHandler will be called synchronously whenever the +// other end of the upgraded connection creates a new stream. +func (u responseUpgrader) UpgradeResponse(w http.ResponseWriter, req *http.Request, newStreamHandler httpstream.NewStreamHandler) httpstream.Connection { + connectionHeader := strings.ToLower(req.Header.Get(httpstream.HeaderConnection)) + upgradeHeader := strings.ToLower(req.Header.Get(httpstream.HeaderUpgrade)) + if !strings.Contains(connectionHeader, strings.ToLower(httpstream.HeaderUpgrade)) || !strings.Contains(upgradeHeader, strings.ToLower(HeaderSpdy31)) { + w.Write([]byte(fmt.Sprintf("Unable to upgrade: missing upgrade headers in request: %#v", req.Header))) + w.WriteHeader(http.StatusBadRequest) + return nil + } + + hijacker, ok := w.(http.Hijacker) + if !ok { + w.Write([]byte("Unable to upgrade: unable to hijack response")) + w.WriteHeader(http.StatusInternalServerError) + return nil + } + + w.Header().Add(httpstream.HeaderConnection, httpstream.HeaderUpgrade) + w.Header().Add(httpstream.HeaderUpgrade, HeaderSpdy31) + w.WriteHeader(http.StatusSwitchingProtocols) + + conn, _, err := hijacker.Hijack() + if err != nil { + glog.Errorf("Unable to upgrade: error hijacking response: %v", err) + return nil + } + + spdyConn, err := NewServerConnection(conn, newStreamHandler) + if err != nil { + glog.Errorf("Unable to upgrade: error creating SPDY server connection: %v", err) + return nil + } + + return spdyConn +} diff --git a/pkg/util/httpstream/spdy/upgrade_test.go b/pkg/util/httpstream/spdy/upgrade_test.go new file mode 100644 index 00000000000..55bbe2ae1d0 --- /dev/null +++ b/pkg/util/httpstream/spdy/upgrade_test.go @@ -0,0 +1,93 @@ +/* +Copyright 2015 Google Inc. All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package spdy + +import ( + "net/http" + "net/http/httptest" + "testing" +) + +func TestUpgradeResponse(t *testing.T) { + testCases := []struct { + connectionHeader string + upgradeHeader string + shouldError bool + }{ + { + connectionHeader: "", + upgradeHeader: "", + shouldError: true, + }, + { + connectionHeader: "Upgrade", + upgradeHeader: "", + shouldError: true, + }, + { + connectionHeader: "", + upgradeHeader: "SPDY/3.1", + shouldError: true, + }, + { + connectionHeader: "Upgrade", + upgradeHeader: "SPDY/3.1", + shouldError: false, + }, + } + + for i, testCase := range testCases { + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { + upgrader := NewResponseUpgrader() + conn := upgrader.UpgradeResponse(w, req, nil) + haveErr := conn == nil + if e, a := testCase.shouldError, haveErr; e != a { + t.Fatalf("%d: expected shouldErr=%t, got %t", i, testCase.shouldError, haveErr) + } + if haveErr { + return + } + if conn == nil { + t.Fatalf("%d: unexpected nil conn", i) + } + defer conn.Close() + })) + defer server.Close() + + req, err := http.NewRequest("GET", server.URL, nil) + if err != nil { + t.Fatalf("%d: error creating request: %s", i, err) + } + + req.Header.Set("Connection", testCase.connectionHeader) + req.Header.Set("Upgrade", testCase.upgradeHeader) + + client := &http.Client{} + resp, err := client.Do(req) + if err != nil { + t.Fatalf("%d: unexpected non-nil err from client.Do: %s", i, err) + } + + if testCase.shouldError { + continue + } + + if resp.StatusCode != http.StatusSwitchingProtocols { + t.Fatalf("%d: expected status 101 switching protocols, got %d", i, resp.StatusCode) + } + } +} diff --git a/pkg/util/net.go b/pkg/util/net.go index f36b1cae3fd..b35274ceaad 100644 --- a/pkg/util/net.go +++ b/pkg/util/net.go @@ -19,6 +19,7 @@ package util import ( "fmt" "net" + "net/url" "strings" ) @@ -61,3 +62,24 @@ func (ipnet *IPNet) Set(value string) error { func (*IPNet) Type() string { return "ipNet" } + +// FROM: http://golang.org/src/net/http/client.go +// Given a string of the form "host", "host:port", or "[ipv6::address]:port", +// return true if the string includes a port. +func hasPort(s string) bool { return strings.LastIndex(s, ":") > strings.LastIndex(s, "]") } + +// FROM: http://golang.org/src/net/http/transport.go +var portMap = map[string]string{ + "http": "80", + "https": "443", +} + +// FROM: http://golang.org/src/net/http/transport.go +// canonicalAddr returns url.Host but always with a ":port" suffix +func CanonicalAddr(url *url.URL) string { + addr := url.Host + if !hasPort(addr) { + return addr + ":" + portMap[url.Scheme] + } + return addr +} diff --git a/test/e2e/pods.go b/test/e2e/pods.go index 325e7ff8fd3..464e7f77591 100644 --- a/test/e2e/pods.go +++ b/test/e2e/pods.go @@ -415,4 +415,178 @@ var _ = Describe("Pods", func() { }, }) }) + + // The following tests for remote command execution and port forwarding are + // commented out because the GCE environment does not currently have nsenter + // in the kubelet's PATH, nor does it have socat installed. Once we figure + // out the best way to have nsenter and socat available in GCE (and hopefully + // all providers), we can enable these tests. + /* + It("should support remote command execution", func() { + clientConfig, err := loadConfig() + if err != nil { + Fail(fmt.Sprintf("Failed to create client config: %v", err)) + } + + podClient := c.Pods(api.NamespaceDefault) + + By("creating the pod") + name := "pod-exec-" + string(util.NewUUID()) + value := strconv.Itoa(time.Now().Nanosecond()) + pod := &api.Pod{ + ObjectMeta: api.ObjectMeta{ + Name: name, + Labels: map[string]string{ + "name": "foo", + "time": value, + }, + }, + Spec: api.PodSpec{ + Containers: []api.Container{ + { + Name: "nginx", + Image: "dockerfile/nginx", + }, + }, + }, + } + + By("submitting the pod to kubernetes") + _, err = podClient.Create(pod) + if err != nil { + Fail(fmt.Sprintf("Failed to create pod: %v", err)) + } + defer func() { + // We call defer here in case there is a problem with + // the test so we can ensure that we clean up after + // ourselves + podClient.Delete(pod.Name) + }() + + By("waiting for the pod to start running") + expectNoError(waitForPodRunning(c, pod.Name, 300*time.Second)) + + By("verifying the pod is in kubernetes") + pods, err := podClient.List(labels.SelectorFromSet(labels.Set(map[string]string{"time": value}))) + if err != nil { + Fail(fmt.Sprintf("Failed to query for pods: %v", err)) + } + Expect(len(pods.Items)).To(Equal(1)) + + pod = &pods.Items[0] + By(fmt.Sprintf("executing command on host %s pod %s in container %s", + pod.Status.Host, pod.Name, pod.Spec.Containers[0].Name)) + req := c.Get(). + Prefix("proxy"). + Resource("minions"). + Name(pod.Status.Host). + Suffix("exec", api.NamespaceDefault, pod.Name, pod.Spec.Containers[0].Name) + + out := &bytes.Buffer{} + e := remotecommand.New(req, clientConfig, []string{"whoami"}, nil, out, nil, false) + err = e.Execute() + if err != nil { + Fail(fmt.Sprintf("Failed to execute command on host %s pod %s in container %s: %v", + pod.Status.Host, pod.Name, pod.Spec.Containers[0].Name, err)) + } + if e, a := "root\n", out.String(); e != a { + Fail(fmt.Sprintf("exec: whoami: expected '%s', got '%s'", e, a)) + } + }) + + It("should support port forwarding", func() { + clientConfig, err := loadConfig() + if err != nil { + Fail(fmt.Sprintf("Failed to create client config: %v", err)) + } + + podClient := c.Pods(api.NamespaceDefault) + + By("creating the pod") + name := "pod-portforward-" + string(util.NewUUID()) + value := strconv.Itoa(time.Now().Nanosecond()) + pod := &api.Pod{ + ObjectMeta: api.ObjectMeta{ + Name: name, + Labels: map[string]string{ + "name": "foo", + "time": value, + }, + }, + Spec: api.PodSpec{ + Containers: []api.Container{ + { + Name: "nginx", + Image: "dockerfile/nginx", + Ports: []api.Port{{ContainerPort: 80}}, + }, + }, + }, + } + + By("submitting the pod to kubernetes") + _, err = podClient.Create(pod) + if err != nil { + Fail(fmt.Sprintf("Failed to create pod: %v", err)) + } + defer func() { + // We call defer here in case there is a problem with + // the test so we can ensure that we clean up after + // ourselves + podClient.Delete(pod.Name) + }() + + By("waiting for the pod to start running") + expectNoError(waitForPodRunning(c, pod.Name, 300*time.Second)) + + By("verifying the pod is in kubernetes") + pods, err := podClient.List(labels.SelectorFromSet(labels.Set(map[string]string{"time": value}))) + if err != nil { + Fail(fmt.Sprintf("Failed to query for pods: %v", err)) + } + Expect(len(pods.Items)).To(Equal(1)) + + pod = &pods.Items[0] + By(fmt.Sprintf("initiating port forwarding to host %s pod %s in container %s", + pod.Status.Host, pod.Name, pod.Spec.Containers[0].Name)) + + req := c.Get(). + Prefix("proxy"). + Resource("minions"). + Name(pod.Status.Host). + Suffix("portForward", api.NamespaceDefault, pod.Name) + + stopChan := make(chan struct{}) + pf, err := portforward.New(req, clientConfig, []string{"5678:80"}, stopChan) + if err != nil { + Fail(fmt.Sprintf("Error creating port forwarder: %s", err)) + } + + errorChan := make(chan error) + go func() { + errorChan <- pf.ForwardPorts() + }() + + // wait for listeners to start + <-pf.Ready + + resp, err := http.Get("http://localhost:5678/") + if err != nil { + Fail(fmt.Sprintf("Error with http get to localhost:5678: %s", err)) + } + body, err := ioutil.ReadAll(resp.Body) + if err != nil { + Fail(fmt.Sprintf("Error reading response body: %s", err)) + } + + titleRegex := regexp.MustCompile("(.+)") + matches := titleRegex.FindStringSubmatch(string(body)) + if len(matches) != 2 { + Fail("Unable to locate page title in response HTML") + } + if e, a := "Welcome to nginx on Debian!", matches[1]; e != a { + Fail(fmt.Sprintf(": expected '%s', got '%s'", e, a)) + } + }) + */ }) diff --git a/test/e2e/util.go b/test/e2e/util.go index 0cbb1d16803..96736ebafc4 100644 --- a/test/e2e/util.go +++ b/test/e2e/util.go @@ -119,8 +119,8 @@ func waitForPodSuccess(c *client.Client, podName string, contName string, tryFor return fmt.Errorf("Gave up waiting for pod %q status to be success or failure after %d seconds", podName, trySecs) } -func loadClient() (*client.Client, error) { - config := client.Config{ +func loadConfig() (*client.Config, error) { + config := &client.Config{ Host: testContext.host, } info, err := clientauth.LoadFromFile(testContext.authConfig) @@ -134,11 +134,16 @@ func loadClient() (*client.Client, error) { info.CertFile = filepath.Join(testContext.certDir, "kubecfg.crt") info.KeyFile = filepath.Join(testContext.certDir, "kubecfg.key") } - config, err = info.MergeWithConfig(config) + mergedConfig, err := info.MergeWithConfig(*config) + return &mergedConfig, err +} + +func loadClient() (*client.Client, error) { + config, err := loadConfig() if err != nil { return nil, fmt.Errorf("Error creating client: %v", err.Error()) } - c, err := client.New(&config) + c, err := client.New(config) if err != nil { return nil, fmt.Errorf("Error creating client: %v", err.Error()) }