Merge pull request #42630 from anguslees/elasticsearch

Automatic merge from submit-queue (batch tested with PRs 41830, 42630)

Arrange for elasticsearch to shutdown cleanly

Kubernetes initiates "graceful shutdown" by sending SIGTERM to pid 1, which
is exactly what elasticsearch is expecting (good!)

The way the existing startup scripts worked however, this signal arrived at
the shell wrapper, not elasticsearch, and the shell wrapper exited,
killing the container immediately (bad!)

Before this change:
```
    1 ?        Ss     0:00 /bin/sh -c /run.sh
    6 ?        S      0:00 /bin/bash /run.sh
   13 ?        S      0:00  \_ /bin/su -c /elasticsearch/bin/elasticsearch elasticsearch
   14 ?        Ss     0:00      \_ sh -c /elasticsearch/bin/elasticsearch
   15 ?        Sl    19:18          \_ /usr/lib/jvm/java-8-openjdk-amd64/jre/bin/java ... org.elasticsearch.bootstrap.Elasticsearch start
```
After this change:
```
    1 ?        Ssl    0:29 /usr/lib/jvm/java-8-openjdk-amd64/jre/bin/java ... org.elasticsearch.bootstrap.Elasticsearch start
```
This commit is contained in:
Kubernetes Submit Queue 2017-03-10 16:21:20 -08:00 committed by GitHub
commit 17793bccb9
5 changed files with 9 additions and 9 deletions

View File

@ -21,7 +21,7 @@ spec:
kubernetes.io/cluster-service: "true"
spec:
containers:
- image: gcr.io/google_containers/elasticsearch:v2.4.1-1
- image: gcr.io/google_containers/elasticsearch:v2.4.1-2
name: elasticsearch-logging
resources:
# need more cpu upon initialization, therefore burstable class

View File

@ -23,7 +23,7 @@ ENV DEBIAN_FRONTEND noninteractive
ENV ELASTICSEARCH_VERSION 2.4.1
RUN apt-get update \
&& apt-get install -y curl \
&& apt-get install -y curl gosu \
&& apt-get clean
RUN set -x \
@ -48,4 +48,4 @@ RUN useradd --no-create-home --user-group elasticsearch \
VOLUME ["/data"]
EXPOSE 9200 9300
CMD /run.sh
CMD ["/run.sh"]

View File

@ -16,7 +16,7 @@
# The current value of the tag to be used for building and
# pushing an image to gcr.io
TAG = v2.4.1-1
TAG = v2.4.1-2
build: elasticsearch_logging_discovery
docker build --pull -t gcr.io/google_containers/elasticsearch:$(TAG) .

View File

@ -24,7 +24,7 @@ import (
"time"
"github.com/golang/glog"
meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
restclient "k8s.io/client-go/rest"
"k8s.io/kubernetes/pkg/api"
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
@ -56,7 +56,7 @@ func main() {
namespace := metav1.NamespaceSystem
envNamespace := os.Getenv("NAMESPACE")
if envNamespace != "" {
if _, err := client.Core().Namespaces().Get(envNamespace, meta_v1.GetOptions{}); err != nil {
if _, err := client.Core().Namespaces().Get(envNamespace, metav1.GetOptions{}); err != nil {
glog.Fatalf("%s namespace doesn't exist: %v", envNamespace, err)
}
namespace = envNamespace
@ -66,7 +66,7 @@ func main() {
// Look for endpoints associated with the Elasticsearch loggging service.
// First wait for the service to become available.
for t := time.Now(); time.Since(t) < 5*time.Minute; time.Sleep(10 * time.Second) {
elasticsearch, err = client.Core().Services(namespace).Get("elasticsearch-logging", meta_v1.GetOptions{})
elasticsearch, err = client.Core().Services(namespace).Get("elasticsearch-logging", metav1.GetOptions{})
if err == nil {
break
}
@ -83,7 +83,7 @@ func main() {
// Wait for some endpoints.
count := 0
for t := time.Now(); time.Since(t) < 5*time.Minute; time.Sleep(10 * time.Second) {
endpoints, err = client.Core().Endpoints(namespace).Get("elasticsearch-logging", meta_v1.GetOptions{})
endpoints, err = client.Core().Endpoints(namespace).Get("elasticsearch-logging", metav1.GetOptions{})
if err != nil {
continue
}

View File

@ -25,4 +25,4 @@ export MINIMUM_MASTER_NODES=${MINIMUM_MASTER_NODES:-2}
chown -R elasticsearch:elasticsearch /data
/bin/su -c /elasticsearch/bin/elasticsearch elasticsearch
exec gosu elasticsearch /elasticsearch/bin/elasticsearch