mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-23 11:50:44 +00:00
Switch to glog for logging, bridge logging to glog.
1) imported glog to third_party (previous commit) 2) add support for third_party/update.sh to update just one pkg 3) search-and-replace: s/log.Printf/glog.Infof/ s/log.Print/glog.Info/ s/log.Fatalf/glog.Fatalf/ s/log.Fatal/glog.Fatal/ 4) convert glog.Info.*, err into glog.Error* Adds some util interfaces to logging and calls them from each cmd, which will set the default log output to write to glog. Pass glog-wrapped Loggers to etcd for logging. Log files will go to /tmp - we should probably follow this up with a default log dir for each cmd. The glog lib is sort of weak in that it only flushes every 30 seconds, so we spin up our own flushing goroutine.
This commit is contained in:
parent
381ac4328c
commit
9f9e75f508
@ -20,13 +20,13 @@ package main
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"log"
|
||||
"net"
|
||||
"strconv"
|
||||
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/cloudprovider"
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/master"
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/util"
|
||||
"github.com/golang/glog"
|
||||
)
|
||||
|
||||
var (
|
||||
@ -44,9 +44,11 @@ func init() {
|
||||
|
||||
func main() {
|
||||
flag.Parse()
|
||||
util.InitLogs()
|
||||
defer util.FlushLogs()
|
||||
|
||||
if len(machineList) == 0 {
|
||||
log.Fatal("No machines specified!")
|
||||
glog.Fatal("No machines specified!")
|
||||
}
|
||||
|
||||
var cloud cloudprovider.Interface
|
||||
@ -55,13 +57,13 @@ func main() {
|
||||
var err error
|
||||
cloud, err = cloudprovider.NewGCECloud()
|
||||
if err != nil {
|
||||
log.Fatal("Couldn't connect to GCE cloud: %#v", err)
|
||||
glog.Fatal("Couldn't connect to GCE cloud: %#v", err)
|
||||
}
|
||||
default:
|
||||
if len(*cloudProvider) > 0 {
|
||||
log.Printf("Unknown cloud provider: %s", *cloudProvider)
|
||||
glog.Infof("Unknown cloud provider: %s", *cloudProvider)
|
||||
} else {
|
||||
log.Print("No cloud provider specified.")
|
||||
glog.Info("No cloud provider specified.")
|
||||
}
|
||||
}
|
||||
|
||||
@ -72,5 +74,5 @@ func main() {
|
||||
m = master.NewMemoryServer(machineList, cloud)
|
||||
}
|
||||
|
||||
log.Fatal(m.Run(net.JoinHostPort(*address, strconv.Itoa(int(*port))), *apiPrefix))
|
||||
glog.Fatal(m.Run(net.JoinHostPort(*address, strconv.Itoa(int(*port))), *apiPrefix))
|
||||
}
|
||||
|
@ -20,7 +20,6 @@ import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"net/url"
|
||||
"os"
|
||||
"strconv"
|
||||
@ -29,6 +28,8 @@ import (
|
||||
|
||||
kube_client "github.com/GoogleCloudPlatform/kubernetes/pkg/client"
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/cloudcfg"
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/util"
|
||||
"github.com/golang/glog"
|
||||
)
|
||||
|
||||
const APP_VERSION = "0.1"
|
||||
@ -66,21 +67,21 @@ func usage() {
|
||||
flag.PrintDefaults()
|
||||
}
|
||||
|
||||
// Reads & parses config file. On error, calls log.Fatal().
|
||||
// Reads & parses config file. On error, calls glog.Fatal().
|
||||
func readConfig(storage string) []byte {
|
||||
if len(*config) == 0 {
|
||||
log.Fatal("Need config file (-c)")
|
||||
glog.Fatal("Need config file (-c)")
|
||||
}
|
||||
data, err := ioutil.ReadFile(*config)
|
||||
if err != nil {
|
||||
log.Fatalf("Unable to read %v: %#v\n", *config, err)
|
||||
glog.Fatalf("Unable to read %v: %#v\n", *config, err)
|
||||
}
|
||||
data, err = cloudcfg.ToWireFormat(data, storage)
|
||||
if err != nil {
|
||||
log.Fatalf("Error parsing %v as an object for %v: %#v\n", *config, storage, err)
|
||||
glog.Fatalf("Error parsing %v as an object for %v: %#v\n", *config, storage, err)
|
||||
}
|
||||
if *verbose {
|
||||
log.Printf("Parsed config file successfully; sending:\n%v\n", string(data))
|
||||
glog.Infof("Parsed config file successfully; sending:\n%v\n", string(data))
|
||||
}
|
||||
return data
|
||||
}
|
||||
@ -92,6 +93,8 @@ func main() {
|
||||
}
|
||||
|
||||
flag.Parse() // Scan the arguments list
|
||||
util.InitLogs()
|
||||
defer util.FlushLogs()
|
||||
|
||||
if *versionFlag {
|
||||
fmt.Println("Version:", APP_VERSION)
|
||||
@ -101,7 +104,7 @@ func main() {
|
||||
secure := true
|
||||
parsedUrl, err := url.Parse(*httpServer)
|
||||
if err != nil {
|
||||
log.Fatalf("Unable to parse %v as a URL\n", err)
|
||||
glog.Fatalf("Unable to parse %v as a URL\n", err)
|
||||
}
|
||||
if parsedUrl.Scheme != "" && parsedUrl.Scheme != "https" {
|
||||
secure = false
|
||||
@ -111,14 +114,14 @@ func main() {
|
||||
if secure {
|
||||
auth, err = cloudcfg.LoadAuthInfo(*authConfig)
|
||||
if err != nil {
|
||||
log.Fatalf("Error loading auth: %#v", err)
|
||||
glog.Fatalf("Error loading auth: %#v", err)
|
||||
}
|
||||
}
|
||||
|
||||
if *proxy {
|
||||
log.Println("Starting to serve on localhost:8001")
|
||||
glog.Info("Starting to serve on localhost:8001")
|
||||
server := cloudcfg.NewProxyServer(*www, *httpServer, auth)
|
||||
log.Fatal(server.Serve())
|
||||
glog.Fatal(server.Serve())
|
||||
}
|
||||
|
||||
if len(flag.Args()) < 1 {
|
||||
@ -129,7 +132,7 @@ func main() {
|
||||
|
||||
matchFound := executeAPIRequest(method, auth) || executeControllerRequest(method, auth)
|
||||
if matchFound == false {
|
||||
log.Fatalf("Unknown command %s", method)
|
||||
glog.Fatalf("Unknown command %s", method)
|
||||
}
|
||||
}
|
||||
|
||||
@ -137,7 +140,7 @@ func main() {
|
||||
func executeAPIRequest(method string, auth *kube_client.AuthInfo) bool {
|
||||
parseStorage := func() string {
|
||||
if len(flag.Args()) != 2 {
|
||||
log.Fatal("usage: cloudcfg [OPTIONS] get|list|create|update|delete <url>")
|
||||
glog.Fatal("usage: cloudcfg [OPTIONS] get|list|create|update|delete <url>")
|
||||
}
|
||||
return strings.Trim(flag.Arg(1), "/")
|
||||
}
|
||||
@ -165,7 +168,7 @@ func executeAPIRequest(method string, auth *kube_client.AuthInfo) bool {
|
||||
}
|
||||
obj, err := r.Do().Get()
|
||||
if err != nil {
|
||||
log.Fatalf("Got request error: %v\n", err)
|
||||
glog.Fatalf("Got request error: %v\n", err)
|
||||
return false
|
||||
}
|
||||
|
||||
@ -179,7 +182,7 @@ func executeAPIRequest(method string, auth *kube_client.AuthInfo) bool {
|
||||
}
|
||||
|
||||
if err = printer.PrintObj(obj, os.Stdout); err != nil {
|
||||
log.Fatalf("Failed to print: %#v\nRaw received object:\n%#v\n", err, obj)
|
||||
glog.Fatalf("Failed to print: %#v\nRaw received object:\n%#v\n", err, obj)
|
||||
}
|
||||
fmt.Print("\n")
|
||||
|
||||
@ -190,7 +193,7 @@ func executeAPIRequest(method string, auth *kube_client.AuthInfo) bool {
|
||||
func executeControllerRequest(method string, auth *kube_client.AuthInfo) bool {
|
||||
parseController := func() string {
|
||||
if len(flag.Args()) != 2 {
|
||||
log.Fatal("usage: cloudcfg [OPTIONS] stop|rm|rollingupdate <controller>")
|
||||
glog.Fatal("usage: cloudcfg [OPTIONS] stop|rm|rollingupdate <controller>")
|
||||
}
|
||||
return flag.Arg(1)
|
||||
}
|
||||
@ -207,31 +210,31 @@ func executeControllerRequest(method string, auth *kube_client.AuthInfo) bool {
|
||||
err = cloudcfg.Update(parseController(), c, *updatePeriod)
|
||||
case "run":
|
||||
if len(flag.Args()) != 4 {
|
||||
log.Fatal("usage: cloudcfg [OPTIONS] run <image> <replicas> <controller>")
|
||||
glog.Fatal("usage: cloudcfg [OPTIONS] run <image> <replicas> <controller>")
|
||||
}
|
||||
image := flag.Arg(1)
|
||||
replicas, err := strconv.Atoi(flag.Arg(2))
|
||||
name := flag.Arg(3)
|
||||
if err != nil {
|
||||
log.Fatalf("Error parsing replicas: %#v", err)
|
||||
glog.Fatalf("Error parsing replicas: %#v", err)
|
||||
}
|
||||
err = cloudcfg.RunController(image, name, replicas, c, *portSpec, *servicePort)
|
||||
case "resize":
|
||||
args := flag.Args()
|
||||
if len(args) < 3 {
|
||||
log.Fatal("usage: cloudcfg resize <controller> <replicas>")
|
||||
glog.Fatal("usage: cloudcfg resize <controller> <replicas>")
|
||||
}
|
||||
name := args[1]
|
||||
replicas, err := strconv.Atoi(args[2])
|
||||
if err != nil {
|
||||
log.Fatalf("Error parsing replicas: %#v", err)
|
||||
glog.Fatalf("Error parsing replicas: %#v", err)
|
||||
}
|
||||
err = cloudcfg.ResizeController(name, replicas, c)
|
||||
default:
|
||||
return false
|
||||
}
|
||||
if err != nil {
|
||||
log.Fatalf("Error: %#v", err)
|
||||
glog.Fatalf("Error: %#v", err)
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
@ -23,13 +23,13 @@ package main
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"log"
|
||||
"os"
|
||||
"time"
|
||||
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/client"
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/controller"
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/util"
|
||||
"github.com/coreos/go-etcd/etcd"
|
||||
"github.com/golang/glog"
|
||||
)
|
||||
|
||||
var (
|
||||
@ -39,13 +39,15 @@ var (
|
||||
|
||||
func main() {
|
||||
flag.Parse()
|
||||
util.InitLogs()
|
||||
defer util.FlushLogs()
|
||||
|
||||
if len(*etcd_servers) == 0 || len(*master) == 0 {
|
||||
log.Fatal("usage: controller-manager -etcd_servers <servers> -master <master>")
|
||||
glog.Fatal("usage: controller-manager -etcd_servers <servers> -master <master>")
|
||||
}
|
||||
|
||||
// Set up logger for etcd client
|
||||
etcd.SetLogger(log.New(os.Stderr, "etcd ", log.LstdFlags))
|
||||
etcd.SetLogger(util.NewLogger("etcd "))
|
||||
|
||||
controllerManager := controller.MakeReplicationManager(
|
||||
etcd.NewClient([]string{*etcd_servers}),
|
||||
|
@ -21,7 +21,6 @@ package main
|
||||
import (
|
||||
"encoding/json"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"time"
|
||||
@ -31,14 +30,19 @@ import (
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/controller"
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/kubelet"
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/master"
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/util"
|
||||
"github.com/coreos/go-etcd/etcd"
|
||||
"github.com/golang/glog"
|
||||
)
|
||||
|
||||
func main() {
|
||||
util.InitLogs()
|
||||
defer util.FlushLogs()
|
||||
|
||||
manifestUrl := ServeCachedManifestFile()
|
||||
// Setup
|
||||
servers := []string{"http://localhost:4001"}
|
||||
log.Printf("Creating etcd client pointing to %v", servers)
|
||||
glog.Infof("Creating etcd client pointing to %v", servers)
|
||||
machineList := []string{"localhost", "machine"}
|
||||
|
||||
// Master
|
||||
@ -75,22 +79,22 @@ func main() {
|
||||
go otherKubelet.RunKubelet("", "", servers[0], "localhost", 0)
|
||||
|
||||
// Ok. we're good to go.
|
||||
log.Printf("API Server started on %s", apiserver.URL)
|
||||
glog.Infof("API Server started on %s", apiserver.URL)
|
||||
// Wait for the synchronization threads to come up.
|
||||
time.Sleep(time.Second * 10)
|
||||
|
||||
kubeClient := client.New(apiserver.URL, nil)
|
||||
data, err := ioutil.ReadFile("api/examples/controller.json")
|
||||
if err != nil {
|
||||
log.Fatalf("Unexpected error: %#v", err)
|
||||
glog.Fatalf("Unexpected error: %#v", err)
|
||||
}
|
||||
var controllerRequest api.ReplicationController
|
||||
if err = json.Unmarshal(data, &controllerRequest); err != nil {
|
||||
log.Fatalf("Unexpected error: %#v", err)
|
||||
glog.Fatalf("Unexpected error: %#v", err)
|
||||
}
|
||||
|
||||
if _, err = kubeClient.CreateReplicationController(controllerRequest); err != nil {
|
||||
log.Fatalf("Unexpected error: %#v", err)
|
||||
glog.Fatalf("Unexpected error: %#v", err)
|
||||
}
|
||||
// Give the controllers some time to actually create the pods
|
||||
time.Sleep(time.Second * 10)
|
||||
@ -98,7 +102,7 @@ func main() {
|
||||
// Validate that they're truly up.
|
||||
pods, err := kubeClient.ListPods(nil)
|
||||
if err != nil || len(pods.Items) != 2 {
|
||||
log.Fatal("FAILED")
|
||||
glog.Fatal("FAILED")
|
||||
}
|
||||
|
||||
// Check that kubelet tried to make the pods.
|
||||
@ -120,9 +124,9 @@ func main() {
|
||||
// We expect 5: 2 net containers + 2 pods from the replication controller +
|
||||
// 1 net container + 2 pods from the URL.
|
||||
if len(createdPods) != 7 {
|
||||
log.Fatalf("Unexpected list of created pods: %#v\n", createdPods)
|
||||
glog.Fatalf("Unexpected list of created pods: %#v\n", createdPods)
|
||||
}
|
||||
log.Printf("OK")
|
||||
glog.Infof("OK")
|
||||
}
|
||||
|
||||
// Serve a file for kubelet to read.
|
||||
@ -132,7 +136,7 @@ func ServeCachedManifestFile() (servingAddress string) {
|
||||
w.Write([]byte(testManifestFile))
|
||||
return
|
||||
}
|
||||
log.Fatalf("Got request: %#v\n", r)
|
||||
glog.Fatalf("Got request: %#v\n", r)
|
||||
http.NotFound(w, r)
|
||||
}))
|
||||
return server.URL + "/manifest"
|
||||
|
@ -22,15 +22,15 @@ package main
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"log"
|
||||
"math/rand"
|
||||
"os"
|
||||
"os/exec"
|
||||
"time"
|
||||
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/kubelet"
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/util"
|
||||
"github.com/coreos/go-etcd/etcd"
|
||||
"github.com/fsouza/go-dockerclient"
|
||||
"github.com/golang/glog"
|
||||
)
|
||||
|
||||
var (
|
||||
@ -49,15 +49,17 @@ const dockerBinary = "/usr/bin/docker"
|
||||
|
||||
func main() {
|
||||
flag.Parse()
|
||||
util.InitLogs()
|
||||
defer util.FlushLogs()
|
||||
rand.Seed(time.Now().UTC().UnixNano())
|
||||
|
||||
// Set up logger for etcd client
|
||||
etcd.SetLogger(log.New(os.Stderr, "etcd ", log.LstdFlags))
|
||||
etcd.SetLogger(util.NewLogger("etcd "))
|
||||
|
||||
endpoint := "unix:///var/run/docker.sock"
|
||||
dockerClient, err := docker.NewClient(endpoint)
|
||||
if err != nil {
|
||||
log.Fatal("Couldn't connnect to docker.")
|
||||
glog.Fatal("Couldn't connnect to docker.")
|
||||
}
|
||||
|
||||
hostname := []byte(*hostnameOverride)
|
||||
@ -66,7 +68,7 @@ func main() {
|
||||
// want the FQDN, and this is the easiest way to get it.
|
||||
hostname, err = exec.Command("hostname", "-f").Output()
|
||||
if err != nil {
|
||||
log.Fatalf("Couldn't determine hostname: %v", err)
|
||||
glog.Fatalf("Couldn't determine hostname: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -22,10 +22,8 @@ package main
|
||||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"log"
|
||||
"math/rand"
|
||||
"net"
|
||||
"os"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
@ -33,8 +31,10 @@ import (
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/controller"
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/kubelet"
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/master"
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/util"
|
||||
"github.com/coreos/go-etcd/etcd"
|
||||
"github.com/fsouza/go-dockerclient"
|
||||
"github.com/golang/glog"
|
||||
)
|
||||
|
||||
// kubelet flags
|
||||
@ -65,7 +65,7 @@ func fakeKubelet() {
|
||||
endpoint := "unix:///var/run/docker.sock"
|
||||
dockerClient, err := docker.NewClient(endpoint)
|
||||
if err != nil {
|
||||
log.Fatal("Couldn't connnect to docker.")
|
||||
glog.Fatal("Couldn't connnect to docker.")
|
||||
}
|
||||
|
||||
myKubelet := kubelet.Kubelet{
|
||||
@ -81,7 +81,7 @@ func fakeKubelet() {
|
||||
// Starts api services (the master). Never returns.
|
||||
func apiServer() {
|
||||
m := master.New([]string{*etcdServer}, []string{*kubeletAddress}, nil)
|
||||
log.Fatal(m.Run(net.JoinHostPort(*masterAddress, strconv.Itoa(int(*masterPort))), *apiPrefix))
|
||||
glog.Fatal(m.Run(net.JoinHostPort(*masterAddress, strconv.Itoa(int(*masterPort))), *apiPrefix))
|
||||
}
|
||||
|
||||
// Starts up a controller manager. Never returns.
|
||||
@ -96,16 +96,18 @@ func controllerManager() {
|
||||
|
||||
func main() {
|
||||
flag.Parse()
|
||||
util.InitLogs()
|
||||
defer util.FlushLogs()
|
||||
rand.Seed(time.Now().UTC().UnixNano())
|
||||
|
||||
// Set up logger for etcd client
|
||||
etcd.SetLogger(log.New(os.Stderr, "etcd ", log.LstdFlags))
|
||||
etcd.SetLogger(util.NewLogger("etcd "))
|
||||
|
||||
go apiServer()
|
||||
go fakeKubelet()
|
||||
go controllerManager()
|
||||
|
||||
log.Printf("All components started.\nMaster running at: http://%s:%d\nKubelet running at: http://%s:%d\n",
|
||||
glog.Infof("All components started.\nMaster running at: http://%s:%d\nKubelet running at: http://%s:%d\n",
|
||||
*masterAddress, *masterPort,
|
||||
*kubeletAddress, *kubeletPort)
|
||||
select {}
|
||||
|
@ -18,12 +18,12 @@ package main
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"log"
|
||||
"os"
|
||||
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/proxy"
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/proxy/config"
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/util"
|
||||
"github.com/coreos/go-etcd/etcd"
|
||||
"github.com/golang/glog"
|
||||
)
|
||||
|
||||
var (
|
||||
@ -33,11 +33,13 @@ var (
|
||||
|
||||
func main() {
|
||||
flag.Parse()
|
||||
util.InitLogs()
|
||||
defer util.FlushLogs()
|
||||
|
||||
// Set up logger for etcd client
|
||||
etcd.SetLogger(log.New(os.Stderr, "etcd ", log.LstdFlags))
|
||||
etcd.SetLogger(util.NewLogger("etcd "))
|
||||
|
||||
log.Printf("Using configuration file %s and etcd_servers %s", *config_file, *etcd_servers)
|
||||
glog.Infof("Using configuration file %s and etcd_servers %s", *config_file, *etcd_servers)
|
||||
|
||||
proxyConfig := config.NewServiceConfig()
|
||||
|
||||
|
@ -19,7 +19,6 @@ package apiserver
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"runtime/debug"
|
||||
@ -29,6 +28,7 @@ import (
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/api"
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/labels"
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/util"
|
||||
"github.com/golang/glog"
|
||||
)
|
||||
|
||||
// RESTStorage is a generic interface for RESTful storage services
|
||||
@ -84,7 +84,7 @@ func (server *ApiServer) ServeHTTP(w http.ResponseWriter, req *http.Request) {
|
||||
if x := recover(); x != nil {
|
||||
w.WriteHeader(http.StatusInternalServerError)
|
||||
fmt.Fprint(w, "apiserver panic. Look in log for details.")
|
||||
log.Printf("ApiServer panic'd on %v %v: %#v\n%s\n", req.Method, req.RequestURI, x, debug.Stack())
|
||||
glog.Infof("ApiServer panic'd on %v %v: %#v\n%s\n", req.Method, req.RequestURI, x, debug.Stack())
|
||||
}
|
||||
}()
|
||||
logger := MakeLogged(req, w)
|
||||
@ -172,7 +172,7 @@ func (server *ApiServer) handleREST(parts []string, requestUrl *url.URL, req *ht
|
||||
sync := requestUrl.Query().Get("sync") == "true"
|
||||
timeout, err := time.ParseDuration(requestUrl.Query().Get("timeout"))
|
||||
if err != nil && len(requestUrl.Query().Get("timeout")) > 0 {
|
||||
log.Printf("Failed to parse: %#v '%s'", err, requestUrl.Query().Get("timeout"))
|
||||
glog.Errorf("Failed to parse: %#v '%s'", err, requestUrl.Query().Get("timeout"))
|
||||
timeout = time.Second * 30
|
||||
}
|
||||
switch req.Method {
|
||||
|
@ -18,10 +18,11 @@ package apiserver
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
"net/http"
|
||||
"runtime"
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
)
|
||||
|
||||
// Add a layer on top of ResponseWriter, so we can track latency and error
|
||||
@ -56,7 +57,7 @@ func (rl *respLogger) Addf(format string, data ...interface{}) {
|
||||
// Log is intended to be called once at the end of your request handler, via defer
|
||||
func (rl *respLogger) Log() {
|
||||
latency := time.Since(rl.startTime)
|
||||
log.Printf("%s %s: (%v) %v%v%v", rl.req.Method, rl.req.RequestURI, latency, rl.status, rl.statusStack, rl.addedInfo)
|
||||
glog.Infof("%s %s: (%v) %v%v%v", rl.req.Method, rl.req.RequestURI, latency, rl.status, rl.statusStack, rl.addedInfo)
|
||||
}
|
||||
|
||||
// Implement http.ResponseWriter
|
||||
|
@ -21,11 +21,11 @@ import (
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"net/http"
|
||||
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/api"
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/labels"
|
||||
"github.com/golang/glog"
|
||||
)
|
||||
|
||||
// ClientInterface holds the methods for clients of Kubenetes, an interface to allow mock testing
|
||||
@ -135,7 +135,7 @@ func (c *Client) rawRequest(method, path string, requestBody io.Reader, target i
|
||||
err = api.DecodeInto(body, target)
|
||||
}
|
||||
if err != nil {
|
||||
log.Printf("Failed to parse: %s\n", string(body))
|
||||
glog.Infof("Failed to parse: %s\n", string(body))
|
||||
// FIXME: no need to return err here?
|
||||
}
|
||||
return body, err
|
||||
|
@ -20,7 +20,6 @@ import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
@ -29,6 +28,7 @@ import (
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/api"
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/client"
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/labels"
|
||||
"github.com/golang/glog"
|
||||
"gopkg.in/v1/yaml"
|
||||
)
|
||||
|
||||
@ -134,17 +134,17 @@ func makePorts(spec string) []api.Port {
|
||||
for _, part := range parts {
|
||||
pieces := strings.Split(part, ":")
|
||||
if len(pieces) != 2 {
|
||||
log.Printf("Bad port spec: %s", part)
|
||||
glog.Infof("Bad port spec: %s", part)
|
||||
continue
|
||||
}
|
||||
host, err := strconv.Atoi(pieces[0])
|
||||
if err != nil {
|
||||
log.Printf("Host part is not integer: %s %v", pieces[0], err)
|
||||
glog.Errorf("Host part is not integer: %s %v", pieces[0], err)
|
||||
continue
|
||||
}
|
||||
container, err := strconv.Atoi(pieces[1])
|
||||
if err != nil {
|
||||
log.Printf("Container part is not integer: %s %v", pieces[1], err)
|
||||
glog.Errorf("Container part is not integer: %s %v", pieces[1], err)
|
||||
continue
|
||||
}
|
||||
result = append(result, api.Port{ContainerPort: container, HostPort: host})
|
||||
|
@ -19,7 +19,6 @@ package controller
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"log"
|
||||
"math/rand"
|
||||
"strings"
|
||||
"time"
|
||||
@ -29,6 +28,7 @@ import (
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/labels"
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/util"
|
||||
"github.com/coreos/go-etcd/etcd"
|
||||
"github.com/golang/glog"
|
||||
)
|
||||
|
||||
// ReplicationManager is responsible for synchronizing ReplicationController objects stored in etcd
|
||||
@ -69,7 +69,7 @@ func (r RealPodControl) createReplica(controllerSpec api.ReplicationController)
|
||||
}
|
||||
_, err := r.kubeClient.CreatePod(pod)
|
||||
if err != nil {
|
||||
log.Printf("%#v\n", err)
|
||||
glog.Errorf("%#v\n", err)
|
||||
}
|
||||
}
|
||||
|
||||
@ -111,7 +111,7 @@ func (rm *ReplicationManager) watchControllers() {
|
||||
}()
|
||||
_, err := rm.etcdClient.Watch("/registry/controllers", 0, true, watchChannel, stop)
|
||||
if err != etcd.ErrWatchStoppedByUser {
|
||||
log.Printf("etcd.Watch stopped unexpectedly: %v (%#v)", err, err)
|
||||
glog.Errorf("etcd.Watch stopped unexpectedly: %v (%#v)", err, err)
|
||||
}
|
||||
}()
|
||||
|
||||
@ -126,10 +126,10 @@ func (rm *ReplicationManager) watchControllers() {
|
||||
// that called us call us again.
|
||||
return
|
||||
}
|
||||
log.Printf("Got watch: %#v", watchResponse)
|
||||
glog.Infof("Got watch: %#v", watchResponse)
|
||||
controller, err := rm.handleWatchResponse(watchResponse)
|
||||
if err != nil {
|
||||
log.Printf("Error handling data: %#v, %#v", err, watchResponse)
|
||||
glog.Errorf("Error handling data: %#v, %#v", err, watchResponse)
|
||||
continue
|
||||
}
|
||||
rm.syncHandler(*controller)
|
||||
@ -185,15 +185,15 @@ func (rm *ReplicationManager) syncReplicationController(controllerSpec api.Repli
|
||||
}
|
||||
filteredList := rm.filterActivePods(podList.Items)
|
||||
diff := len(filteredList) - controllerSpec.DesiredState.Replicas
|
||||
log.Printf("%#v", filteredList)
|
||||
glog.Infof("%#v", filteredList)
|
||||
if diff < 0 {
|
||||
diff *= -1
|
||||
log.Printf("Too few replicas, creating %d\n", diff)
|
||||
glog.Infof("Too few replicas, creating %d\n", diff)
|
||||
for i := 0; i < diff; i++ {
|
||||
rm.podControl.createReplica(controllerSpec)
|
||||
}
|
||||
} else if diff > 0 {
|
||||
log.Print("Too many replicas, deleting")
|
||||
glog.Info("Too many replicas, deleting")
|
||||
for i := 0; i < diff; i++ {
|
||||
rm.podControl.deletePod(filteredList[i].ID)
|
||||
}
|
||||
@ -206,13 +206,13 @@ func (rm *ReplicationManager) synchronize() {
|
||||
helper := util.EtcdHelper{rm.etcdClient}
|
||||
err := helper.ExtractList("/registry/controllers", &controllerSpecs)
|
||||
if err != nil {
|
||||
log.Printf("Synchronization error: %v (%#v)", err, err)
|
||||
glog.Errorf("Synchronization error: %v (%#v)", err, err)
|
||||
return
|
||||
}
|
||||
for _, controllerSpec := range controllerSpecs {
|
||||
err = rm.syncHandler(controllerSpec)
|
||||
if err != nil {
|
||||
log.Printf("Error synchronizing: %#v", err)
|
||||
glog.Errorf("Error synchronizing: %#v", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -20,7 +20,6 @@ import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"math/rand"
|
||||
"net/http"
|
||||
"os"
|
||||
@ -36,6 +35,7 @@ import (
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/util"
|
||||
"github.com/coreos/go-etcd/etcd"
|
||||
"github.com/fsouza/go-dockerclient"
|
||||
"github.com/golang/glog"
|
||||
"github.com/google/cadvisor/info"
|
||||
"gopkg.in/v1/yaml"
|
||||
)
|
||||
@ -102,27 +102,27 @@ func (kl *Kubelet) RunKubelet(config_path, manifest_url, etcd_servers, address s
|
||||
}
|
||||
updateChannel := make(chan manifestUpdate)
|
||||
if config_path != "" {
|
||||
log.Printf("Watching for file configs at %s", config_path)
|
||||
glog.Infof("Watching for file configs at %s", config_path)
|
||||
go util.Forever(func() {
|
||||
kl.WatchFiles(config_path, updateChannel)
|
||||
}, kl.FileCheckFrequency)
|
||||
}
|
||||
if manifest_url != "" {
|
||||
log.Printf("Watching for HTTP configs at %s", manifest_url)
|
||||
glog.Infof("Watching for HTTP configs at %s", manifest_url)
|
||||
go util.Forever(func() {
|
||||
if err := kl.extractFromHTTP(manifest_url, updateChannel); err != nil {
|
||||
log.Printf("Error syncing http: %#v", err)
|
||||
glog.Errorf("Error syncing http: %#v", err)
|
||||
}
|
||||
}, kl.HTTPCheckFrequency)
|
||||
}
|
||||
if etcd_servers != "" {
|
||||
servers := []string{etcd_servers}
|
||||
log.Printf("Watching for etcd configs at %v", servers)
|
||||
glog.Infof("Watching for etcd configs at %v", servers)
|
||||
kl.EtcdClient = etcd.NewClient(servers)
|
||||
go util.Forever(func() { kl.SyncAndSetupEtcdWatch(updateChannel) }, 20*time.Second)
|
||||
}
|
||||
if address != "" {
|
||||
log.Printf("Starting to listen on %s:%d", address, port)
|
||||
glog.Infof("Starting to listen on %s:%d", address, port)
|
||||
handler := KubeletServer{
|
||||
Kubelet: kl,
|
||||
UpdateChannel: updateChannel,
|
||||
@ -160,9 +160,9 @@ func (kl *Kubelet) LogEvent(event *api.Event) error {
|
||||
response, err = kl.EtcdClient.AddChild(fmt.Sprintf("/events/%s", event.Container.Name), string(data), 60*60*48 /* 2 days */)
|
||||
// TODO(bburns) : examine response here.
|
||||
if err != nil {
|
||||
log.Printf("Error writing event: %s\n", err)
|
||||
glog.Errorf("Error writing event: %s\n", err)
|
||||
if response != nil {
|
||||
log.Printf("Response was: %#v\n", *response)
|
||||
glog.Infof("Response was: %#v\n", *response)
|
||||
}
|
||||
}
|
||||
return err
|
||||
@ -330,7 +330,7 @@ func makePortsAndBindings(container *api.Container) (map[docker.Port]struct{}, m
|
||||
protocol = "/tcp"
|
||||
default:
|
||||
if len(port.Protocol) != 0 {
|
||||
log.Printf("Unknown protocol: %s, defaulting to tcp.", port.Protocol)
|
||||
glog.Infof("Unknown protocol: %s, defaulting to tcp.", port.Protocol)
|
||||
}
|
||||
protocol = "/tcp"
|
||||
}
|
||||
@ -381,7 +381,7 @@ func (kl *Kubelet) KillContainer(name string) error {
|
||||
}
|
||||
if !found {
|
||||
// This is weird, but not an error, so yell and then return nil
|
||||
log.Printf("Couldn't find container: %s", name)
|
||||
glog.Infof("Couldn't find container: %s", name)
|
||||
return nil
|
||||
}
|
||||
err = kl.DockerClient.StopContainer(id, 10)
|
||||
@ -411,7 +411,7 @@ func (kl *Kubelet) extractFromFile(name string) (api.ContainerManifest, error) {
|
||||
|
||||
data, err := ioutil.ReadAll(file)
|
||||
if err != nil {
|
||||
log.Printf("Couldn't read from file: %v", err)
|
||||
glog.Errorf("Couldn't read from file: %v", err)
|
||||
return manifest, err
|
||||
}
|
||||
if err = kl.ExtractYAMLData(data, &manifest); err != nil {
|
||||
@ -433,7 +433,7 @@ func (kl *Kubelet) extractFromDir(name string) ([]api.ContainerManifest, error)
|
||||
for _, file := range files {
|
||||
manifest, err := kl.extractFromFile(file)
|
||||
if err != nil {
|
||||
log.Printf("Couldn't read from file %s: %v", file, err)
|
||||
glog.Errorf("Couldn't read from file %s: %v", file, err)
|
||||
return manifests, err
|
||||
}
|
||||
manifests = append(manifests, manifest)
|
||||
@ -449,26 +449,26 @@ func (kl *Kubelet) WatchFiles(config_path string, updateChannel chan<- manifestU
|
||||
statInfo, err := os.Stat(config_path)
|
||||
if err != nil {
|
||||
if !os.IsNotExist(err) {
|
||||
log.Printf("Error accessing path: %#v", err)
|
||||
glog.Errorf("Error accessing path: %#v", err)
|
||||
}
|
||||
return
|
||||
}
|
||||
if statInfo.Mode().IsDir() {
|
||||
manifests, err := kl.extractFromDir(config_path)
|
||||
if err != nil {
|
||||
log.Printf("Error polling dir: %#v", err)
|
||||
glog.Errorf("Error polling dir: %#v", err)
|
||||
return
|
||||
}
|
||||
updateChannel <- manifestUpdate{fileSource, manifests}
|
||||
} else if statInfo.Mode().IsRegular() {
|
||||
manifest, err := kl.extractFromFile(config_path)
|
||||
if err != nil {
|
||||
log.Printf("Error polling file: %#v", err)
|
||||
glog.Errorf("Error polling file: %#v", err)
|
||||
return
|
||||
}
|
||||
updateChannel <- manifestUpdate{fileSource, []api.ContainerManifest{manifest}}
|
||||
} else {
|
||||
log.Printf("Error accessing config - not a directory or file")
|
||||
glog.Errorf("Error accessing config - not a directory or file")
|
||||
return
|
||||
}
|
||||
}
|
||||
@ -544,15 +544,15 @@ func (kl *Kubelet) getKubeletStateFromEtcd(key string, updateChannel chan<- mani
|
||||
if util.IsEtcdNotFound(err) {
|
||||
return nil
|
||||
}
|
||||
log.Printf("Error on etcd get of %s: %#v", key, err)
|
||||
glog.Errorf("Error on etcd get of %s: %#v", key, err)
|
||||
return err
|
||||
}
|
||||
manifests, err := kl.ResponseToManifests(response)
|
||||
if err != nil {
|
||||
log.Printf("Error parsing response (%#v): %s", response, err)
|
||||
glog.Errorf("Error parsing response (%#v): %s", response, err)
|
||||
return err
|
||||
}
|
||||
log.Printf("Got state from etcd: %+v", manifests)
|
||||
glog.Infof("Got state from etcd: %+v", manifests)
|
||||
updateChannel <- manifestUpdate{etcdSource, manifests}
|
||||
return nil
|
||||
}
|
||||
@ -583,7 +583,7 @@ func (kl *Kubelet) SyncAndSetupEtcdWatch(updateChannel chan<- manifestUpdate) {
|
||||
go kl.WatchEtcd(watchChannel, updateChannel)
|
||||
|
||||
kl.getKubeletStateFromEtcd(key, updateChannel)
|
||||
log.Printf("Setting up a watch for configuration changes in etcd for %s", key)
|
||||
glog.Infof("Setting up a watch for configuration changes in etcd for %s", key)
|
||||
kl.EtcdClient.Watch(key, 0, true, watchChannel, done)
|
||||
}
|
||||
}
|
||||
@ -600,7 +600,7 @@ func (kl *Kubelet) TimeoutWatch(done chan bool) {
|
||||
func (kl *Kubelet) ExtractYAMLData(buf []byte, output interface{}) error {
|
||||
err := yaml.Unmarshal(buf, output)
|
||||
if err != nil {
|
||||
log.Printf("Couldn't unmarshal configuration: %v", err)
|
||||
glog.Errorf("Couldn't unmarshal configuration: %v", err)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
@ -625,13 +625,13 @@ func (kl *Kubelet) WatchEtcd(watchChannel <-chan *etcd.Response, updateChannel c
|
||||
if watchResponse == nil {
|
||||
return
|
||||
}
|
||||
log.Printf("Got etcd change: %#v", watchResponse)
|
||||
glog.Infof("Got etcd change: %#v", watchResponse)
|
||||
manifests, err := kl.extractFromEtcd(watchResponse)
|
||||
if err != nil {
|
||||
log.Printf("Error handling response from etcd: %#v", err)
|
||||
glog.Errorf("Error handling response from etcd: %#v", err)
|
||||
continue
|
||||
}
|
||||
log.Printf("manifests: %#v", manifests)
|
||||
glog.Infof("manifests: %#v", manifests)
|
||||
// Ok, we have a valid configuration, send to channel for
|
||||
// rejiggering.
|
||||
updateChannel <- manifestUpdate{etcdSource, manifests}
|
||||
@ -672,20 +672,20 @@ func (kl *Kubelet) createNetworkContainer(manifest *api.ContainerManifest) (stri
|
||||
|
||||
// Sync the configured list of containers (desired state) with the host current state
|
||||
func (kl *Kubelet) SyncManifests(config []api.ContainerManifest) error {
|
||||
log.Printf("Desired: %#v", config)
|
||||
glog.Infof("Desired: %#v", config)
|
||||
var err error
|
||||
desired := map[string]bool{}
|
||||
for _, manifest := range config {
|
||||
netName, exists, err := kl.networkContainerExists(&manifest)
|
||||
if err != nil {
|
||||
log.Printf("Failed to introspect network container. (%#v) Skipping container %s", err, manifest.Id)
|
||||
glog.Errorf("Failed to introspect network container. (%#v) Skipping container %s", err, manifest.Id)
|
||||
continue
|
||||
}
|
||||
if !exists {
|
||||
log.Printf("Network container doesn't exist, creating")
|
||||
glog.Infof("Network container doesn't exist, creating")
|
||||
netName, err = kl.createNetworkContainer(&manifest)
|
||||
if err != nil {
|
||||
log.Printf("Failed to create network container: %#v", err)
|
||||
glog.Errorf("Failed to create network container: %#v", err)
|
||||
}
|
||||
// Docker list prefixes '/' for some reason, so let's do that...
|
||||
netName = "/" + netName
|
||||
@ -695,14 +695,14 @@ func (kl *Kubelet) SyncManifests(config []api.ContainerManifest) error {
|
||||
var exists bool
|
||||
exists, actualName, err := kl.ContainerExists(&manifest, &element)
|
||||
if err != nil {
|
||||
log.Printf("Error detecting container: %#v skipping.", err)
|
||||
glog.Errorf("Error detecting container: %#v skipping.", err)
|
||||
continue
|
||||
}
|
||||
if !exists {
|
||||
log.Printf("%#v doesn't exist, creating", element)
|
||||
glog.Infof("%#v doesn't exist, creating", element)
|
||||
kl.DockerPuller.Pull(element.Image)
|
||||
if err != nil {
|
||||
log.Printf("Error pulling container: %#v", err)
|
||||
glog.Errorf("Error pulling container: %#v", err)
|
||||
continue
|
||||
}
|
||||
// netName has the '/' prefix, so slice it off
|
||||
@ -713,18 +713,18 @@ func (kl *Kubelet) SyncManifests(config []api.ContainerManifest) error {
|
||||
|
||||
if err != nil {
|
||||
// TODO(bburns) : Perhaps blacklist a container after N failures?
|
||||
log.Printf("Error creating container: %#v", err)
|
||||
glog.Errorf("Error creating container: %#v", err)
|
||||
desired[actualName] = true
|
||||
continue
|
||||
}
|
||||
} else {
|
||||
log.Printf("%#v exists as %v", element.Name, actualName)
|
||||
glog.Infof("%#v exists as %v", element.Name, actualName)
|
||||
}
|
||||
desired[actualName] = true
|
||||
}
|
||||
}
|
||||
existingContainers, _ := kl.ListContainers()
|
||||
log.Printf("Existing: %#v Desired: %#v", existingContainers, desired)
|
||||
glog.Infof("Existing: %#v Desired: %#v", existingContainers, desired)
|
||||
for _, container := range existingContainers {
|
||||
// Skip containers that we didn't create to allow users to manually
|
||||
// spin up their own containers if they want.
|
||||
@ -732,10 +732,10 @@ func (kl *Kubelet) SyncManifests(config []api.ContainerManifest) error {
|
||||
continue
|
||||
}
|
||||
if !desired[container] {
|
||||
log.Printf("Killing: %s", container)
|
||||
glog.Infof("Killing: %s", container)
|
||||
err = kl.KillContainer(container)
|
||||
if err != nil {
|
||||
log.Printf("Error killing container: %#v", err)
|
||||
glog.Errorf("Error killing container: %#v", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -753,7 +753,7 @@ func (kl *Kubelet) RunSyncLoop(updateChannel <-chan manifestUpdate, handler Sync
|
||||
for {
|
||||
select {
|
||||
case u := <-updateChannel:
|
||||
log.Printf("Got configuration from %s: %#v", u.source, u.manifests)
|
||||
glog.Infof("Got configuration from %s: %#v", u.source, u.manifests)
|
||||
last[u.source] = u.manifests
|
||||
case <-time.After(kl.SyncFrequency):
|
||||
}
|
||||
@ -765,7 +765,7 @@ func (kl *Kubelet) RunSyncLoop(updateChannel <-chan manifestUpdate, handler Sync
|
||||
|
||||
err := handler.SyncManifests(manifests)
|
||||
if err != nil {
|
||||
log.Printf("Couldn't sync containers : %#v", err)
|
||||
glog.Errorf("Couldn't sync containers : %#v", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -17,7 +17,6 @@ limitations under the License.
|
||||
package master
|
||||
|
||||
import (
|
||||
"log"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
@ -25,6 +24,7 @@ import (
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/labels"
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/registry"
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/util"
|
||||
"github.com/golang/glog"
|
||||
)
|
||||
|
||||
// PodCache contains both a cache of container information, as well as the mechanism for keeping
|
||||
@ -74,13 +74,13 @@ func (p *PodCache) updateContainerInfo(host, id string) error {
|
||||
func (p *PodCache) UpdateAllContainers() {
|
||||
pods, err := p.pods.ListPods(labels.Everything())
|
||||
if err != nil {
|
||||
log.Printf("Error synchronizing container: %#v", err)
|
||||
glog.Errorf("Error synchronizing container: %#v", err)
|
||||
return
|
||||
}
|
||||
for _, pod := range pods {
|
||||
err := p.updateContainerInfo(pod.CurrentState.Host, pod.ID)
|
||||
if err != nil {
|
||||
log.Printf("Error synchronizing container: %#v", err)
|
||||
glog.Errorf("Error synchronizing container: %#v", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -17,11 +17,11 @@ limitations under the License.
|
||||
package config
|
||||
|
||||
import (
|
||||
"log"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/api"
|
||||
"github.com/golang/glog"
|
||||
)
|
||||
|
||||
type Operation int
|
||||
@ -110,14 +110,14 @@ func NewServiceConfig() ServiceConfig {
|
||||
}
|
||||
|
||||
func (impl *ServiceConfig) Run() {
|
||||
log.Printf("Starting the config Run loop")
|
||||
glog.Infof("Starting the config Run loop")
|
||||
for {
|
||||
select {
|
||||
case source := <-impl.serviceNotifyChannel:
|
||||
log.Printf("Got new service configuration from source %s", source)
|
||||
glog.Infof("Got new service configuration from source %s", source)
|
||||
impl.NotifyServiceUpdate()
|
||||
case source := <-impl.endpointsNotifyChannel:
|
||||
log.Printf("Got new endpoint configuration from source %s", source)
|
||||
glog.Infof("Got new endpoint configuration from source %s", source)
|
||||
impl.NotifyEndpointsUpdate()
|
||||
case <-time.After(1 * time.Second):
|
||||
}
|
||||
@ -132,24 +132,24 @@ func (impl *ServiceConfig) ServiceChannelListener(source string, listenChannel c
|
||||
case update := <-listenChannel:
|
||||
switch update.Op {
|
||||
case ADD:
|
||||
log.Printf("Adding new service from source %s : %v", source, update.Services)
|
||||
glog.Infof("Adding new service from source %s : %v", source, update.Services)
|
||||
for _, value := range update.Services {
|
||||
serviceMap[value.ID] = value
|
||||
}
|
||||
case REMOVE:
|
||||
log.Printf("Removing a service %v", update)
|
||||
glog.Infof("Removing a service %v", update)
|
||||
for _, value := range update.Services {
|
||||
delete(serviceMap, value.ID)
|
||||
}
|
||||
case SET:
|
||||
log.Printf("Setting services %v", update)
|
||||
glog.Infof("Setting services %v", update)
|
||||
// Clear the old map entries by just creating a new map
|
||||
serviceMap = make(map[string]api.Service)
|
||||
for _, value := range update.Services {
|
||||
serviceMap[value.ID] = value
|
||||
}
|
||||
default:
|
||||
log.Printf("Received invalid update type: %v", update)
|
||||
glog.Infof("Received invalid update type: %v", update)
|
||||
continue
|
||||
}
|
||||
impl.configLock.Lock()
|
||||
@ -167,25 +167,25 @@ func (impl *ServiceConfig) EndpointsChannelListener(source string, listenChannel
|
||||
case update := <-listenChannel:
|
||||
switch update.Op {
|
||||
case ADD:
|
||||
log.Printf("Adding a new endpoint %v", update)
|
||||
glog.Infof("Adding a new endpoint %v", update)
|
||||
for _, value := range update.Endpoints {
|
||||
endpointMap[value.Name] = value
|
||||
}
|
||||
case REMOVE:
|
||||
log.Printf("Removing an endpoint %v", update)
|
||||
glog.Infof("Removing an endpoint %v", update)
|
||||
for _, value := range update.Endpoints {
|
||||
delete(endpointMap, value.Name)
|
||||
}
|
||||
|
||||
case SET:
|
||||
log.Printf("Setting services %v", update)
|
||||
glog.Infof("Setting services %v", update)
|
||||
// Clear the old map entries by just creating a new map
|
||||
endpointMap = make(map[string]api.Endpoints)
|
||||
for _, value := range update.Endpoints {
|
||||
endpointMap[value.Name] = value
|
||||
}
|
||||
default:
|
||||
log.Printf("Received invalid update type: %v", update)
|
||||
glog.Infof("Received invalid update type: %v", update)
|
||||
continue
|
||||
}
|
||||
impl.configLock.Lock()
|
||||
@ -280,7 +280,7 @@ func (impl *ServiceConfig) NotifyServiceUpdate() {
|
||||
}
|
||||
}
|
||||
impl.configLock.RUnlock()
|
||||
log.Printf("Unified configuration %+v", services)
|
||||
glog.Infof("Unified configuration %+v", services)
|
||||
impl.handlerLock.RLock()
|
||||
handlers := impl.serviceHandlers
|
||||
impl.handlerLock.RUnlock()
|
||||
@ -300,7 +300,7 @@ func (impl *ServiceConfig) NotifyEndpointsUpdate() {
|
||||
}
|
||||
}
|
||||
impl.configLock.RUnlock()
|
||||
log.Printf("Unified configuration %+v", endpoints)
|
||||
glog.Infof("Unified configuration %+v", endpoints)
|
||||
impl.handlerLock.RLock()
|
||||
handlers := impl.endpointHandlers
|
||||
impl.handlerLock.RUnlock()
|
||||
|
@ -36,12 +36,12 @@ package config
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"log"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/api"
|
||||
"github.com/coreos/go-etcd/etcd"
|
||||
"github.com/golang/glog"
|
||||
)
|
||||
|
||||
const RegistryRoot = "registry/services"
|
||||
@ -72,7 +72,7 @@ func (impl ConfigSourceEtcd) Run() {
|
||||
if err == nil {
|
||||
break
|
||||
}
|
||||
log.Printf("Failed to get any services: %v", err)
|
||||
glog.Errorf("Failed to get any services: %v", err)
|
||||
time.Sleep(2 * time.Second)
|
||||
}
|
||||
|
||||
@ -92,7 +92,7 @@ func (impl ConfigSourceEtcd) Run() {
|
||||
for {
|
||||
services, endpoints, err = impl.GetServices()
|
||||
if err != nil {
|
||||
log.Printf("ConfigSourceEtcd: Failed to get services: %v", err)
|
||||
glog.Errorf("ConfigSourceEtcd: Failed to get services: %v", err)
|
||||
} else {
|
||||
if len(services) > 0 {
|
||||
serviceUpdate := ServiceUpdate{Op: SET, Services: services}
|
||||
@ -112,7 +112,7 @@ func (impl ConfigSourceEtcd) Run() {
|
||||
func (impl ConfigSourceEtcd) GetServices() ([]api.Service, []api.Endpoints, error) {
|
||||
response, err := impl.client.Get(RegistryRoot+"/specs", true, false)
|
||||
if err != nil {
|
||||
log.Printf("Failed to get the key %s: %v", RegistryRoot, err)
|
||||
glog.Errorf("Failed to get the key %s: %v", RegistryRoot, err)
|
||||
return make([]api.Service, 0), make([]api.Endpoints, 0), err
|
||||
}
|
||||
if response.Node.Dir == true {
|
||||
@ -125,15 +125,15 @@ func (impl ConfigSourceEtcd) GetServices() ([]api.Service, []api.Endpoints, erro
|
||||
var svc api.Service
|
||||
err = json.Unmarshal([]byte(node.Value), &svc)
|
||||
if err != nil {
|
||||
log.Printf("Failed to load Service: %s (%#v)", node.Value, err)
|
||||
glog.Errorf("Failed to load Service: %s (%#v)", node.Value, err)
|
||||
continue
|
||||
}
|
||||
retServices[i] = svc
|
||||
endpoints, err := impl.GetEndpoints(svc.ID)
|
||||
if err != nil {
|
||||
log.Printf("Couldn't get endpoints for %s : %v skipping", svc.ID, err)
|
||||
glog.Errorf("Couldn't get endpoints for %s : %v skipping", svc.ID, err)
|
||||
}
|
||||
log.Printf("Got service: %s on localport %d mapping to: %s", svc.ID, svc.Port, endpoints)
|
||||
glog.Infof("Got service: %s on localport %d mapping to: %s", svc.ID, svc.Port, endpoints)
|
||||
retEndpoints[i] = endpoints
|
||||
}
|
||||
return retServices, retEndpoints, err
|
||||
@ -145,7 +145,7 @@ func (impl ConfigSourceEtcd) GetEndpoints(service string) (api.Endpoints, error)
|
||||
key := fmt.Sprintf(RegistryRoot + "/endpoints/" + service)
|
||||
response, err := impl.client.Get(key, true, false)
|
||||
if err != nil {
|
||||
log.Printf("Failed to get the key: %s %v", key, err)
|
||||
glog.Errorf("Failed to get the key: %s %v", key, err)
|
||||
return api.Endpoints{}, err
|
||||
}
|
||||
// Parse all the endpoint specifications in this value.
|
||||
@ -173,7 +173,7 @@ func ParseEndpoints(jsonString string) (api.Endpoints, error) {
|
||||
}
|
||||
|
||||
func (impl ConfigSourceEtcd) WatchForChanges() {
|
||||
log.Print("Setting up a watch for new services")
|
||||
glog.Info("Setting up a watch for new services")
|
||||
watchChannel := make(chan *etcd.Response)
|
||||
go impl.client.Watch("/registry/services/", 0, true, watchChannel, nil)
|
||||
for {
|
||||
@ -183,7 +183,7 @@ func (impl ConfigSourceEtcd) WatchForChanges() {
|
||||
}
|
||||
|
||||
func (impl ConfigSourceEtcd) ProcessChange(response *etcd.Response) {
|
||||
log.Printf("Processing a change in service configuration... %s", *response)
|
||||
glog.Infof("Processing a change in service configuration... %s", *response)
|
||||
|
||||
// If it's a new service being added (signified by a localport being added)
|
||||
// then process it as such
|
||||
@ -192,11 +192,11 @@ func (impl ConfigSourceEtcd) ProcessChange(response *etcd.Response) {
|
||||
} else if response.Action == "set" {
|
||||
service, err := EtcdResponseToService(response)
|
||||
if err != nil {
|
||||
log.Printf("Failed to parse %s Port: %s", response, err)
|
||||
glog.Errorf("Failed to parse %s Port: %s", response, err)
|
||||
return
|
||||
}
|
||||
|
||||
log.Printf("New service added/updated: %#v", service)
|
||||
glog.Infof("New service added/updated: %#v", service)
|
||||
serviceUpdate := ServiceUpdate{Op: ADD, Services: []api.Service{*service}}
|
||||
impl.serviceChannel <- serviceUpdate
|
||||
return
|
||||
@ -204,22 +204,22 @@ func (impl ConfigSourceEtcd) ProcessChange(response *etcd.Response) {
|
||||
if response.Action == "delete" {
|
||||
parts := strings.Split(response.Node.Key[1:], "/")
|
||||
if len(parts) == 4 {
|
||||
log.Printf("Deleting service: %s", parts[3])
|
||||
glog.Infof("Deleting service: %s", parts[3])
|
||||
serviceUpdate := ServiceUpdate{Op: REMOVE, Services: []api.Service{{JSONBase: api.JSONBase{ID: parts[3]}}}}
|
||||
impl.serviceChannel <- serviceUpdate
|
||||
return
|
||||
} else {
|
||||
log.Printf("Unknown service delete: %#v", parts)
|
||||
glog.Infof("Unknown service delete: %#v", parts)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (impl ConfigSourceEtcd) ProcessEndpointResponse(response *etcd.Response) {
|
||||
log.Printf("Processing a change in endpoint configuration... %s", *response)
|
||||
glog.Infof("Processing a change in endpoint configuration... %s", *response)
|
||||
var endpoints api.Endpoints
|
||||
err := json.Unmarshal([]byte(response.Node.Value), &endpoints)
|
||||
if err != nil {
|
||||
log.Printf("Failed to parse service out of etcd key: %v : %+v", response.Node.Value, err)
|
||||
glog.Errorf("Failed to parse service out of etcd key: %v : %+v", response.Node.Value, err)
|
||||
return
|
||||
}
|
||||
endpointsUpdate := EndpointsUpdate{Op: ADD, Endpoints: []api.Endpoints{endpoints}}
|
||||
|
@ -34,11 +34,11 @@ import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"reflect"
|
||||
"time"
|
||||
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/api"
|
||||
"github.com/golang/glog"
|
||||
)
|
||||
|
||||
// TODO: kill this struct.
|
||||
@ -68,7 +68,7 @@ func NewConfigSourceFile(filename string, serviceChannel chan ServiceUpdate, end
|
||||
}
|
||||
|
||||
func (impl ConfigSourceFile) Run() {
|
||||
log.Printf("Watching file %s", impl.filename)
|
||||
glog.Infof("Watching file %s", impl.filename)
|
||||
var lastData []byte
|
||||
var lastServices []api.Service
|
||||
var lastEndpoints []api.Endpoints
|
||||
@ -76,12 +76,12 @@ func (impl ConfigSourceFile) Run() {
|
||||
for {
|
||||
data, err := ioutil.ReadFile(impl.filename)
|
||||
if err != nil {
|
||||
log.Printf("Couldn't read file: %s : %v", impl.filename, err)
|
||||
glog.Errorf("Couldn't read file: %s : %v", impl.filename, err)
|
||||
} else {
|
||||
var config ConfigFile
|
||||
err = json.Unmarshal(data, &config)
|
||||
if err != nil {
|
||||
log.Printf("Couldn't unmarshal configuration from file : %s %v", data, err)
|
||||
glog.Errorf("Couldn't unmarshal configuration from file : %s %v", data, err)
|
||||
} else {
|
||||
if !bytes.Equal(lastData, data) {
|
||||
lastData = data
|
||||
|
@ -19,11 +19,11 @@ package proxy
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
"net"
|
||||
"time"
|
||||
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/api"
|
||||
"github.com/golang/glog"
|
||||
)
|
||||
|
||||
// Proxier is a simple proxy for tcp connections between a localhost:lport and services that provide
|
||||
@ -38,11 +38,11 @@ func NewProxier(loadBalancer LoadBalancer) *Proxier {
|
||||
}
|
||||
|
||||
func CopyBytes(in, out *net.TCPConn) {
|
||||
log.Printf("Copying from %v <-> %v <-> %v <-> %v",
|
||||
glog.Infof("Copying from %v <-> %v <-> %v <-> %v",
|
||||
in.RemoteAddr(), in.LocalAddr(), out.LocalAddr(), out.RemoteAddr())
|
||||
_, err := io.Copy(in, out)
|
||||
if err != nil && err != io.EOF {
|
||||
log.Printf("I/O error: %v", err)
|
||||
glog.Errorf("I/O error: %v", err)
|
||||
}
|
||||
|
||||
in.CloseRead()
|
||||
@ -51,7 +51,7 @@ func CopyBytes(in, out *net.TCPConn) {
|
||||
|
||||
// Create a bidirectional byte shuffler. Copies bytes to/from each connection.
|
||||
func ProxyConnection(in, out *net.TCPConn) {
|
||||
log.Printf("Creating proxy between %v <-> %v <-> %v <-> %v",
|
||||
glog.Infof("Creating proxy between %v <-> %v <-> %v <-> %v",
|
||||
in.RemoteAddr(), in.LocalAddr(), out.LocalAddr(), out.RemoteAddr())
|
||||
go CopyBytes(in, out)
|
||||
go CopyBytes(out, in)
|
||||
@ -61,25 +61,25 @@ func (proxier Proxier) AcceptHandler(service string, listener net.Listener) {
|
||||
for {
|
||||
inConn, err := listener.Accept()
|
||||
if err != nil {
|
||||
log.Printf("Accept failed: %v", err)
|
||||
glog.Errorf("Accept failed: %v", err)
|
||||
continue
|
||||
}
|
||||
log.Printf("Accepted connection from: %v to %v", inConn.RemoteAddr(), inConn.LocalAddr())
|
||||
glog.Infof("Accepted connection from: %v to %v", inConn.RemoteAddr(), inConn.LocalAddr())
|
||||
|
||||
// Figure out where this request should go.
|
||||
endpoint, err := proxier.loadBalancer.LoadBalance(service, inConn.RemoteAddr())
|
||||
if err != nil {
|
||||
log.Printf("Couldn't find an endpoint for %s %v", service, err)
|
||||
glog.Errorf("Couldn't find an endpoint for %s %v", service, err)
|
||||
inConn.Close()
|
||||
continue
|
||||
}
|
||||
|
||||
log.Printf("Mapped service %s to endpoint %s", service, endpoint)
|
||||
glog.Infof("Mapped service %s to endpoint %s", service, endpoint)
|
||||
outConn, err := net.DialTimeout("tcp", endpoint, time.Duration(5)*time.Second)
|
||||
// We basically need to take everything from inConn and send to outConn
|
||||
// and anything coming from outConn needs to be sent to inConn.
|
||||
if err != nil {
|
||||
log.Printf("Dial failed: %v", err)
|
||||
glog.Errorf("Dial failed: %v", err)
|
||||
inConn.Close()
|
||||
continue
|
||||
}
|
||||
@ -112,22 +112,22 @@ func (proxier Proxier) addServiceOnUnusedPort(service string) (string, error) {
|
||||
}
|
||||
|
||||
func (proxier Proxier) addServiceCommon(service string, l net.Listener) {
|
||||
log.Printf("Listening for %s on %s", service, l.Addr().String())
|
||||
glog.Infof("Listening for %s on %s", service, l.Addr().String())
|
||||
// If that succeeds, start the accepting loop.
|
||||
go proxier.AcceptHandler(service, l)
|
||||
}
|
||||
|
||||
func (proxier Proxier) OnUpdate(services []api.Service) {
|
||||
log.Printf("Received update notice: %+v", services)
|
||||
glog.Infof("Received update notice: %+v", services)
|
||||
for _, service := range services {
|
||||
port, exists := proxier.serviceMap[service.ID]
|
||||
if !exists || port != service.Port {
|
||||
log.Printf("Adding a new service %s on port %d", service.ID, service.Port)
|
||||
glog.Infof("Adding a new service %s on port %d", service.ID, service.Port)
|
||||
err := proxier.AddService(service.ID, service.Port)
|
||||
if err == nil {
|
||||
proxier.serviceMap[service.ID] = service.Port
|
||||
} else {
|
||||
log.Printf("Failed to start listening for %s on %d", service.ID, service.Port)
|
||||
glog.Infof("Failed to start listening for %s on %d", service.ID, service.Port)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -20,7 +20,6 @@ package proxy
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"log"
|
||||
"net"
|
||||
"reflect"
|
||||
"strconv"
|
||||
@ -28,6 +27,7 @@ import (
|
||||
"sync"
|
||||
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/api"
|
||||
"github.com/golang/glog"
|
||||
)
|
||||
|
||||
type LoadBalancerRR struct {
|
||||
@ -86,7 +86,7 @@ func (impl LoadBalancerRR) OnUpdate(endpoints []api.Endpoints) {
|
||||
for _, value := range endpoints {
|
||||
existingEndpoints, exists := impl.endpointsMap[value.Name]
|
||||
if !exists || !reflect.DeepEqual(value.Endpoints, existingEndpoints) {
|
||||
log.Printf("LoadBalancerRR: Setting endpoints for %s to %+v", value.Name, value.Endpoints)
|
||||
glog.Infof("LoadBalancerRR: Setting endpoints for %s to %+v", value.Name, value.Endpoints)
|
||||
impl.endpointsMap[value.Name] = impl.FilterValidEndpoints(value.Endpoints)
|
||||
// Start RR from the beginning if added or updated.
|
||||
impl.rrIndex[value.Name] = 0
|
||||
@ -97,7 +97,7 @@ func (impl LoadBalancerRR) OnUpdate(endpoints []api.Endpoints) {
|
||||
for key, value := range impl.endpointsMap {
|
||||
_, exists := tmp[key]
|
||||
if !exists {
|
||||
log.Printf("LoadBalancerRR: Removing endpoints for %s -> %+v", key, value)
|
||||
glog.Infof("LoadBalancerRR: Removing endpoints for %s -> %+v", key, value)
|
||||
delete(impl.endpointsMap, key)
|
||||
}
|
||||
}
|
||||
|
@ -18,10 +18,10 @@ package registry
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/api"
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/labels"
|
||||
"github.com/golang/glog"
|
||||
)
|
||||
|
||||
func MakeEndpointController(serviceRegistry ServiceRegistry, podRegistry PodRegistry) *EndpointController {
|
||||
@ -45,7 +45,7 @@ func (e *EndpointController) SyncServiceEndpoints() error {
|
||||
for _, service := range services.Items {
|
||||
pods, err := e.podRegistry.ListPods(labels.Set(service.Selector).AsSelector())
|
||||
if err != nil {
|
||||
log.Printf("Error syncing service: %#v, skipping.", service)
|
||||
glog.Errorf("Error syncing service: %#v, skipping.", service)
|
||||
resultErr = err
|
||||
continue
|
||||
}
|
||||
@ -59,7 +59,7 @@ func (e *EndpointController) SyncServiceEndpoints() error {
|
||||
Endpoints: endpoints,
|
||||
})
|
||||
if err != nil {
|
||||
log.Printf("Error updating endpoints: %#v", err)
|
||||
glog.Errorf("Error updating endpoints: %#v", err)
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
@ -19,11 +19,11 @@ package registry
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"log"
|
||||
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/api"
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/labels"
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/util"
|
||||
"github.com/golang/glog"
|
||||
)
|
||||
|
||||
// TODO: Need to add a reconciler loop that makes sure that things in pods are reflected into
|
||||
@ -158,7 +158,7 @@ func (registry *EtcdRegistry) deletePodFromMachine(machine, podID string) error
|
||||
// This really shouldn't happen, it indicates something is broken, and likely
|
||||
// there is a lost pod somewhere.
|
||||
// However it is "deleted" so log it and move on
|
||||
log.Printf("Couldn't find: %s in %#v", podID, manifests)
|
||||
glog.Infof("Couldn't find: %s in %#v", podID, manifests)
|
||||
}
|
||||
if err = registry.updateManifests(machine, newManifests); err != nil {
|
||||
return err
|
||||
|
@ -18,7 +18,6 @@ package registry
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
"strings"
|
||||
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/api"
|
||||
@ -26,6 +25,7 @@ import (
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/client"
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/cloudprovider"
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/labels"
|
||||
"github.com/golang/glog"
|
||||
)
|
||||
|
||||
// PodRegistryStorage implements the RESTStorage interface in terms of a PodRegistry
|
||||
@ -65,7 +65,7 @@ func (storage *PodRegistryStorage) List(selector labels.Selector) (interface{},
|
||||
for ix, pod := range pods {
|
||||
info, err := storage.podCache.GetContainerInfo(pod.CurrentState.Host, pod.ID)
|
||||
if err != nil {
|
||||
log.Printf("Error getting container info: %#v", err)
|
||||
glog.Errorf("Error getting container info: %#v", err)
|
||||
continue
|
||||
}
|
||||
result.Items[ix].CurrentState.Info = info
|
||||
@ -103,7 +103,7 @@ func getInstanceIP(cloud cloudprovider.Interface, host string) string {
|
||||
}
|
||||
addr, err := instances.IPAddress(host)
|
||||
if err != nil {
|
||||
log.Printf("Error getting instance IP: %#v", err)
|
||||
glog.Errorf("Error getting instance IP: %#v", err)
|
||||
return ""
|
||||
}
|
||||
return addr.String()
|
||||
|
50
pkg/util/logs.go
Normal file
50
pkg/util/logs.go
Normal file
@ -0,0 +1,50 @@
|
||||
/*
|
||||
Copyright 2014 Google Inc. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package util
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"log"
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
)
|
||||
|
||||
var logFlushFreq = flag.Duration("log_flush_frequency", 5*time.Second, "Maximum number of seconds between log flushes")
|
||||
|
||||
// This serves as a bridge between the standard log package and the glog package.
|
||||
type GlogWriter struct{}
|
||||
|
||||
func (writer GlogWriter) Write(data []byte) (n int, err error) {
|
||||
glog.Info(string(data))
|
||||
return len(data), nil
|
||||
}
|
||||
|
||||
func InitLogs() {
|
||||
log.SetOutput(GlogWriter{})
|
||||
log.SetFlags(0)
|
||||
// The default glog flush interval is 30 seconds, which is frighteningly long.
|
||||
go Forever(glog.Flush, *logFlushFreq)
|
||||
}
|
||||
|
||||
func FlushLogs() {
|
||||
glog.Flush()
|
||||
}
|
||||
|
||||
func NewLogger(prefix string) *log.Logger {
|
||||
return log.New(GlogWriter{}, prefix, 0)
|
||||
}
|
@ -19,9 +19,10 @@ package util
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"log"
|
||||
"runtime"
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
)
|
||||
|
||||
// Simply catches a crash and logs an error. Meant to be called via defer.
|
||||
@ -36,7 +37,7 @@ func HandleCrash() {
|
||||
}
|
||||
callers = callers + fmt.Sprintf("%v:%v\n", file, line)
|
||||
}
|
||||
log.Printf("Recovered from panic: %#v (%v)\n%v", r, r, callers)
|
||||
glog.Infof("Recovered from panic: %#v (%v)\n%v", r, r, callers)
|
||||
}
|
||||
}
|
||||
|
||||
|
1
third_party/deps.sh
vendored
1
third_party/deps.sh
vendored
@ -1,6 +1,7 @@
|
||||
TOP_PACKAGES="
|
||||
github.com/coreos/go-etcd/etcd
|
||||
github.com/fsouza/go-dockerclient
|
||||
github.com/golang/glog
|
||||
code.google.com/p/goauth2/compute/serviceaccount
|
||||
code.google.com/p/goauth2/oauth
|
||||
code.google.com/p/google-api-go-client/compute/v1
|
||||
|
4
third_party/update.sh
vendored
4
third_party/update.sh
vendored
@ -13,6 +13,10 @@ cd $THIRD_PARTY_DIR
|
||||
|
||||
. ./deps.sh
|
||||
|
||||
if [ $# -gt 0 ]; then
|
||||
PACKAGES="$@"
|
||||
fi
|
||||
|
||||
# Create a temp GOPATH root. It must be an absolute path
|
||||
mkdir -p ../output/go_dep_update
|
||||
cd ../output/go_dep_update
|
||||
|
Loading…
Reference in New Issue
Block a user