Merge branch 'master' into upgrade_aliases_branch

This commit is contained in:
Jing Ai 2017-12-01 10:34:36 -08:00
commit 9449bd2760
20 changed files with 193 additions and 44 deletions

View File

@ -1,5 +1,4 @@
# Kubernetes Dashboard
==============
Kubernetes Dashboard is a general purpose, web-based UI for Kubernetes clusters.
It allows users to manage applications running in the cluster, troubleshoot them,

View File

@ -0,0 +1,9 @@
apiVersion: v1
kind: ConfigMap
metadata:
labels:
k8s-app: kubernetes-dashboard
# Allows editing resource and makes sure it is created first.
addonmanager.kubernetes.io/mode: EnsureExists
name: kubernetes-dashboard-settings
namespace: kube-system

View File

@ -1,4 +1,13 @@
apiVersion: extensions/v1beta1
apiVersion: v1
kind: ServiceAccount
metadata:
labels:
k8s-app: kubernetes-dashboard
addonmanager.kubernetes.io/mode: Reconcile
name: kubernetes-dashboard
namespace: kube-system
---
apiVersion: apps/v1beta2
kind: Deployment
metadata:
name: kubernetes-dashboard
@ -20,9 +29,8 @@ spec:
spec:
containers:
- name: kubernetes-dashboard
image: gcr.io/google_containers/kubernetes-dashboard-amd64:v1.6.3
image: gcr.io/google_containers/kubernetes-dashboard-amd64:v1.8.0
resources:
# keep request = limit to keep this container in guaranteed class
limits:
cpu: 100m
memory: 300Mi
@ -30,13 +38,29 @@ spec:
cpu: 100m
memory: 100Mi
ports:
- containerPort: 9090
- containerPort: 8443
protocol: TCP
args:
- --auto-generate-certificates
volumeMounts:
- name: kubernetes-dashboard-certs
mountPath: /certs
- name: tmp-volume
mountPath: /tmp
livenessProbe:
httpGet:
scheme: HTTPS
path: /
port: 9090
port: 8443
initialDelaySeconds: 30
timeoutSeconds: 30
volumes:
- name: kubernetes-dashboard-certs
secret:
secretName: kubernetes-dashboard-certs
- name: tmp-volume
emptyDir: {}
serviceAccountName: kubernetes-dashboard
tolerations:
- key: "CriticalAddonsOnly"
operator: "Exists"

View File

@ -0,0 +1,45 @@
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
labels:
k8s-app: kubernetes-dashboard
addonmanager.kubernetes.io/mode: Reconcile
name: kubernetes-dashboard-minimal
namespace: kube-system
rules:
# Allow Dashboard to create 'kubernetes-dashboard-key-holder' secret.
- apiGroups: [""]
resources: ["secrets"]
verbs: ["create"]
# Allow Dashboard to get, update and delete Dashboard exclusive secrets.
- apiGroups: [""]
resources: ["secrets"]
resourceNames: ["kubernetes-dashboard-key-holder", "kubernetes-dashboard-certs"]
verbs: ["get", "update", "delete"]
# Allow Dashboard to get and update 'kubernetes-dashboard-settings' config map.
- apiGroups: [""]
resources: ["configmaps"]
resourceNames: ["kubernetes-dashboard-settings"]
verbs: ["get", "update"]
# Allow Dashboard to get metrics from heapster.
- apiGroups: [""]
resources: ["services"]
resourceNames: ["heapster"]
verbs: ["proxy"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: kubernetes-dashboard-minimal
namespace: kube-system
labels:
k8s-app: kubernetes-dashboard
addonmanager.kubernetes.io/mode: Reconcile
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: kubernetes-dashboard-minimal
subjects:
- kind: ServiceAccount
name: kubernetes-dashboard
namespace: kube-system

View File

@ -0,0 +1,10 @@
apiVersion: v1
kind: Secret
metadata:
labels:
k8s-app: kubernetes-dashboard
# Allows editing resource and makes sure it is created first.
addonmanager.kubernetes.io/mode: EnsureExists
name: kubernetes-dashboard-certs
namespace: kube-system
type: Opaque

View File

@ -11,5 +11,5 @@ spec:
selector:
k8s-app: kubernetes-dashboard
ports:
- port: 80
targetPort: 9090
- port: 443
targetPort: 8443

View File

@ -36,7 +36,7 @@ spec:
hostPath:
path: /dev
containers:
- image: "gcr.io/google-containers/nvidia-gpu-device-plugin@sha256:943a62949cd80c26e7371d4e123dac61b4cc7281390721aaa95f265171094842"
- image: "gcr.io/google-containers/nvidia-gpu-device-plugin@sha256:5e3837c3ab99e90d4c19053998ad86239591de4264bc177faad75642b64b723d"
command: ["/usr/bin/nvidia-gpu-device-plugin", "-logtostderr"]
name: nvidia-gpu-device-plugin
resources:

View File

@ -45,19 +45,13 @@ function deploy_dns {
}
function deploy_dashboard {
if ${KUBECTL} get rc -l k8s-app=kubernetes-dashboard --namespace=kube-system | grep kubernetes-dashboard-v &> /dev/null; then
echo "Kubernetes Dashboard replicationController already exists"
else
echo "Creating Kubernetes Dashboard replicationController"
${KUBECTL} create -f ${KUBE_ROOT}/cluster/addons/dashboard/dashboard-controller.yaml
fi
echo "Deploying Kubernetes Dashboard"
if ${KUBECTL} get service/kubernetes-dashboard --namespace=kube-system &> /dev/null; then
echo "Kubernetes Dashboard service already exists"
else
echo "Creating Kubernetes Dashboard service"
${KUBECTL} create -f ${KUBE_ROOT}/cluster/addons/dashboard/dashboard-service.yaml
fi
${KUBECTL} apply -f ${KUBE_ROOT}/cluster/addons/dashboard/dashboard-secret.yaml
${KUBECTL} apply -f ${KUBE_ROOT}/cluster/addons/dashboard/dashboard-configmap.yaml
${KUBECTL} apply -f ${KUBE_ROOT}/cluster/addons/dashboard/dashboard-rbac.yaml
${KUBECTL} apply -f ${KUBE_ROOT}/cluster/addons/dashboard/dashboard-controller.yaml
${KUBECTL} apply -f ${KUBE_ROOT}/cluster/addons/dashboard/dashboard-service.yaml
echo
}

View File

@ -25,7 +25,7 @@
"containers": [
{
"name": "cluster-autoscaler",
"image": "gcr.io/google_containers/cluster-autoscaler:v1.1.0-alpha1",
"image": "gcr.io/google_containers/cluster-autoscaler:v1.1.0-beta1",
"livenessProbe": {
"httpGet": {
"path": "/health-check",

View File

@ -792,8 +792,11 @@ function start_kubedashboard {
if [[ "${ENABLE_CLUSTER_DASHBOARD}" = true ]]; then
echo "Creating kubernetes-dashboard"
# use kubectl to create the dashboard
${KUBECTL} --kubeconfig="${CERT_DIR}/admin.kubeconfig" create -f ${KUBE_ROOT}/cluster/addons/dashboard/dashboard-controller.yaml
${KUBECTL} --kubeconfig="${CERT_DIR}/admin.kubeconfig" create -f ${KUBE_ROOT}/cluster/addons/dashboard/dashboard-service.yaml
${KUBECTL} --kubeconfig="${CERT_DIR}/admin.kubeconfig" apply -f ${KUBE_ROOT}/cluster/addons/dashboard/dashboard-secret.yaml
${KUBECTL} --kubeconfig="${CERT_DIR}/admin.kubeconfig" apply -f ${KUBE_ROOT}/cluster/addons/dashboard/dashboard-configmap.yaml
${KUBECTL} --kubeconfig="${CERT_DIR}/admin.kubeconfig" apply -f ${KUBE_ROOT}/cluster/addons/dashboard/dashboard-rbac.yaml
${KUBECTL} --kubeconfig="${CERT_DIR}/admin.kubeconfig" apply -f ${KUBE_ROOT}/cluster/addons/dashboard/dashboard-controller.yaml
${KUBECTL} --kubeconfig="${CERT_DIR}/admin.kubeconfig" apply -f ${KUBE_ROOT}/cluster/addons/dashboard/dashboard-service.yaml
echo "kubernetes-dashboard deployment and service successfully deployed."
fi
}

View File

@ -1662,7 +1662,7 @@ type awsDisk struct {
}
func newAWSDisk(aws *Cloud, name KubernetesVolumeID) (*awsDisk, error) {
awsID, err := name.mapToAWSVolumeID()
awsID, err := name.MapToAWSVolumeID()
if err != nil {
return nil, err
}
@ -2022,7 +2022,6 @@ func (c *Cloud) AttachDisk(diskName KubernetesVolumeID, nodeName types.NodeName,
// DetachDisk implements Volumes.DetachDisk
func (c *Cloud) DetachDisk(diskName KubernetesVolumeID, nodeName types.NodeName) (string, error) {
diskInfo, attached, err := c.checkIfAttachedToNode(diskName, nodeName)
if diskInfo == nil {
return "", err
}
@ -2320,7 +2319,6 @@ func (c *Cloud) GetDiskPath(volumeName KubernetesVolumeID) (string, error) {
// DiskIsAttached implements Volumes.DiskIsAttached
func (c *Cloud) DiskIsAttached(diskName KubernetesVolumeID, nodeName types.NodeName) (bool, error) {
diskInfo, attached, err := c.checkIfAttachedToNode(diskName, nodeName)
if diskInfo == nil {
return true, err
}
@ -2378,7 +2376,7 @@ func (c *Cloud) DisksAreAttached(nodeDisks map[types.NodeName][]KubernetesVolume
idToDiskName := make(map[awsVolumeID]KubernetesVolumeID)
for _, diskName := range diskNames {
volumeID, err := diskName.mapToAWSVolumeID()
volumeID, err := diskName.MapToAWSVolumeID()
if err != nil {
return nil, fmt.Errorf("error mapping volume spec %q to aws id: %v", diskName, err)
}

View File

@ -59,8 +59,8 @@ type diskInfo struct {
disk *awsDisk
}
// mapToAWSVolumeID extracts the awsVolumeID from the KubernetesVolumeID
func (name KubernetesVolumeID) mapToAWSVolumeID() (awsVolumeID, error) {
// MapToAWSVolumeID extracts the awsVolumeID from the KubernetesVolumeID
func (name KubernetesVolumeID) MapToAWSVolumeID() (awsVolumeID, error) {
// name looks like aws://availability-zone/awsVolumeId
// The original idea of the URL-style name was to put the AZ into the
@ -101,7 +101,7 @@ func (name KubernetesVolumeID) mapToAWSVolumeID() (awsVolumeID, error) {
func GetAWSVolumeID(kubeVolumeID string) (string, error) {
kid := KubernetesVolumeID(kubeVolumeID)
awsID, err := kid.mapToAWSVolumeID()
awsID, err := kid.MapToAWSVolumeID()
return string(awsID), err
}

View File

@ -169,12 +169,13 @@ func findMatchingVolume(
continue
}
nodeAffinityValid := true
if node != nil {
// Scheduler path, check that the PV NodeAffinity
// is satisfied by the node
err := volumeutil.CheckNodeAffinity(volume, node.Labels)
if err != nil {
continue
nodeAffinityValid = false
}
}
@ -185,6 +186,14 @@ func findMatchingVolume(
if volumeQty.Cmp(requestedQty) < 0 {
continue
}
// If PV node affinity is invalid, return no match.
// This means the prebound PV (and therefore PVC)
// is not suitable for this node.
if !nodeAffinityValid {
return nil, nil
}
return volume, nil
}
@ -199,6 +208,7 @@ func findMatchingVolume(
// - volumes bound to another claim
// - volumes whose labels don't match the claim's selector, if specified
// - volumes in Class that is not requested
// - volumes whose NodeAffinity does not match the node
if volume.Spec.ClaimRef != nil {
continue
} else if selector != nil && !selector.Matches(labels.Set(volume.Labels)) {
@ -207,6 +217,9 @@ func findMatchingVolume(
if v1helper.GetPersistentVolumeClass(volume) != requestedClass {
continue
}
if !nodeAffinityValid {
continue
}
if node != nil {
// Scheduler path

View File

@ -1218,7 +1218,7 @@ func TestFindMatchVolumeWithNode(t *testing.T) {
pvc.Spec.StorageClassName = &classWait
pvc.Name = "claim02"
}),
node: node2,
node: node3,
},
"success-bad-and-good-node-affinity": {
expectedMatch: "affinity-pv3",

View File

@ -22,7 +22,7 @@ import (
"k8s.io/apiserver/pkg/server/mux"
)
const dashboardPath = "/api/v1/namespaces/kube-system/services/kubernetes-dashboard/proxy"
const dashboardPath = "/api/v1/namespaces/kube-system/services/https:kubernetes-dashboard:/proxy/"
// UIRedirect redirects /ui to the kube-ui proxy path.
type UIRedirect struct{}

View File

@ -169,7 +169,7 @@ func (attacher *awsElasticBlockStoreAttacher) WaitForAttach(spec *volume.Spec, d
case <-ticker.C:
glog.V(5).Infof("Checking AWS Volume %q is attached.", volumeID)
if devicePath != "" {
devicePaths := getDiskByIdPaths(partition, devicePath)
devicePaths := getDiskByIdPaths(aws.KubernetesVolumeID(volumeSource.VolumeID), partition, devicePath)
path, err := verifyDevicePath(devicePaths)
if err != nil {
// Log error, if any, and continue checking periodically. See issue #11321

View File

@ -18,6 +18,8 @@ package aws_ebs
import (
"fmt"
"os"
"path/filepath"
"strconv"
"strings"
"time"
@ -178,7 +180,7 @@ func verifyAllPathsRemoved(devicePaths []string) (bool, error) {
// Returns list of all paths for given EBS mount
// This is more interesting on GCE (where we are able to identify volumes under /dev/disk-by-id)
// Here it is mostly about applying the partition path
func getDiskByIdPaths(partition string, devicePath string) []string {
func getDiskByIdPaths(volumeID aws.KubernetesVolumeID, partition string, devicePath string) []string {
devicePaths := []string{}
if devicePath != "" {
devicePaths = append(devicePaths, devicePath)
@ -190,6 +192,23 @@ func getDiskByIdPaths(partition string, devicePath string) []string {
}
}
// We need to find NVME volumes, which are mounted on a "random" nvme path ("/dev/nvme0n1"),
// and we have to get the volume id from the nvme interface
awsVolumeID, err := volumeID.MapToAWSVolumeID()
if err != nil {
glog.Warningf("error mapping volume %q to AWS volume: %v", volumeID, err)
} else {
// This is the magic name on which AWS presents NVME devices under /dev/disk/by-id/
// For example, vol-0fab1d5e3f72a5e23 creates a symlink at /dev/disk/by-id/nvme-Amazon_Elastic_Block_Store_vol0fab1d5e3f72a5e23
nvmeName := "nvme-Amazon_Elastic_Block_Store_" + strings.Replace(string(awsVolumeID), "-", "", -1)
nvmePath, err := findNvmeVolume(nvmeName)
if err != nil {
glog.Warningf("error looking for nvme volume %q: %v", volumeID, err)
} else if nvmePath != "" {
devicePaths = append(devicePaths, nvmePath)
}
}
return devicePaths
}
@ -202,3 +221,35 @@ func getCloudProvider(cloudProvider cloudprovider.Interface) (*aws.Cloud, error)
return awsCloudProvider, nil
}
// findNvmeVolume looks for the nvme volume with the specified name
// It follows the symlink (if it exists) and returns the absolute path to the device
func findNvmeVolume(findName string) (device string, err error) {
p := filepath.Join("/dev/disk/by-id/", findName)
stat, err := os.Lstat(p)
if err != nil {
if os.IsNotExist(err) {
glog.V(6).Infof("nvme path not found %q", p)
return "", nil
}
return "", fmt.Errorf("error getting stat of %q: %v", p, err)
}
if stat.Mode()&os.ModeSymlink != os.ModeSymlink {
glog.Warningf("nvme file %q found, but was not a symlink", p)
return "", nil
}
// Find the target, resolving to an absolute path
// For example, /dev/disk/by-id/nvme-Amazon_Elastic_Block_Store_vol0fab1d5e3f72a5e23 -> ../../nvme2n1
resolved, err := filepath.EvalSymlinks(p)
if err != nil {
return "", fmt.Errorf("error reading target of symlink %q: %v", p, err)
}
if !strings.HasPrefix(resolved, "/dev") {
return "", fmt.Errorf("resolved symlink for %q was unexpected: %q", p, resolved)
}
return resolved, nil
}

View File

@ -1475,12 +1475,12 @@ func (c *VolumeBindingChecker) predicate(pod *v1.Pod, meta algorithm.PredicateMe
failReasons := []algorithm.PredicateFailureReason{}
if !boundSatisfied {
glog.V(5).Info("Bound PVs not satisfied for pod %v/%v, node %q", pod.Namespace, pod.Name, node.Name)
glog.V(5).Infof("Bound PVs not satisfied for pod %v/%v, node %q", pod.Namespace, pod.Name, node.Name)
failReasons = append(failReasons, ErrVolumeNodeConflict)
}
if !unboundSatisfied {
glog.V(5).Info("Couldn't find matching PVs for pod %v/%v, node %q", pod.Namespace, pod.Name, node.Name)
glog.V(5).Infof("Couldn't find matching PVs for pod %v/%v, node %q", pod.Namespace, pod.Name, node.Name)
failReasons = append(failReasons, ErrVolumeBindConflict)
}
@ -1489,6 +1489,6 @@ func (c *VolumeBindingChecker) predicate(pod *v1.Pod, meta algorithm.PredicateMe
}
// All volumes bound or matching PVs found for all unbound PVCs
glog.V(5).Info("All PVCs found matches for pod %v/%v, node %q", pod.Namespace, pod.Name, node.Name)
glog.V(5).Infof("All PVCs found matches for pod %v/%v, node %q", pod.Namespace, pod.Name, node.Name)
return true, nil, nil
}

View File

@ -15,6 +15,7 @@ go_library(
"//vendor/github.com/onsi/gomega:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/labels:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/net:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
],
)

View File

@ -23,6 +23,7 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
utilnet "k8s.io/apimachinery/pkg/util/net"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/kubernetes/test/e2e/framework"
testutils "k8s.io/kubernetes/test/utils"
@ -36,6 +37,7 @@ var _ = SIGDescribe("Kubernetes Dashboard", func() {
uiServiceName = "kubernetes-dashboard"
uiAppName = uiServiceName
uiNamespace = metav1.NamespaceSystem
uiRedirect = "/ui"
serverStartTimeout = 1 * time.Minute
)
@ -63,20 +65,20 @@ var _ = SIGDescribe("Kubernetes Dashboard", func() {
ctx, cancel := context.WithTimeout(context.Background(), framework.SingleCallTimeout)
defer cancel()
// Query against the proxy URL for the kube-ui service.
// Query against the proxy URL for the kubernetes-dashboard service.
err := proxyRequest.Namespace(uiNamespace).
Context(ctx).
Name(uiServiceName).
Name(utilnet.JoinSchemeNamePort("https", uiServiceName, "")).
Timeout(framework.SingleCallTimeout).
Do().
StatusCode(&status).
Error()
if err != nil {
if ctx.Err() != nil {
framework.Failf("Request to kube-ui failed: %v", err)
framework.Failf("Request to kubernetes-dashboard failed: %v", err)
return true, err
}
framework.Logf("Request to kube-ui failed: %v", err)
framework.Logf("Request to kubernetes-dashboard failed: %v", err)
} else if status != http.StatusOK {
framework.Logf("Unexpected status from kubernetes-dashboard: %v", status)
}
@ -88,7 +90,7 @@ var _ = SIGDescribe("Kubernetes Dashboard", func() {
By("Checking that the ApiServer /ui endpoint redirects to a valid server.")
var status int
err = f.ClientSet.CoreV1().RESTClient().Get().
AbsPath("/ui").
AbsPath(uiRedirect).
Timeout(framework.SingleCallTimeout).
Do().
StatusCode(&status).