Merge branch 'master' into upgrade_aliases_branch

This commit is contained in:
Jing Ai 2017-12-01 10:34:36 -08:00
commit 9449bd2760
20 changed files with 193 additions and 44 deletions

View File

@ -1,5 +1,4 @@
# Kubernetes Dashboard # Kubernetes Dashboard
==============
Kubernetes Dashboard is a general purpose, web-based UI for Kubernetes clusters. Kubernetes Dashboard is a general purpose, web-based UI for Kubernetes clusters.
It allows users to manage applications running in the cluster, troubleshoot them, It allows users to manage applications running in the cluster, troubleshoot them,

View File

@ -0,0 +1,9 @@
apiVersion: v1
kind: ConfigMap
metadata:
labels:
k8s-app: kubernetes-dashboard
# Allows editing resource and makes sure it is created first.
addonmanager.kubernetes.io/mode: EnsureExists
name: kubernetes-dashboard-settings
namespace: kube-system

View File

@ -1,4 +1,13 @@
apiVersion: extensions/v1beta1 apiVersion: v1
kind: ServiceAccount
metadata:
labels:
k8s-app: kubernetes-dashboard
addonmanager.kubernetes.io/mode: Reconcile
name: kubernetes-dashboard
namespace: kube-system
---
apiVersion: apps/v1beta2
kind: Deployment kind: Deployment
metadata: metadata:
name: kubernetes-dashboard name: kubernetes-dashboard
@ -20,9 +29,8 @@ spec:
spec: spec:
containers: containers:
- name: kubernetes-dashboard - name: kubernetes-dashboard
image: gcr.io/google_containers/kubernetes-dashboard-amd64:v1.6.3 image: gcr.io/google_containers/kubernetes-dashboard-amd64:v1.8.0
resources: resources:
# keep request = limit to keep this container in guaranteed class
limits: limits:
cpu: 100m cpu: 100m
memory: 300Mi memory: 300Mi
@ -30,13 +38,29 @@ spec:
cpu: 100m cpu: 100m
memory: 100Mi memory: 100Mi
ports: ports:
- containerPort: 9090 - containerPort: 8443
protocol: TCP
args:
- --auto-generate-certificates
volumeMounts:
- name: kubernetes-dashboard-certs
mountPath: /certs
- name: tmp-volume
mountPath: /tmp
livenessProbe: livenessProbe:
httpGet: httpGet:
scheme: HTTPS
path: / path: /
port: 9090 port: 8443
initialDelaySeconds: 30 initialDelaySeconds: 30
timeoutSeconds: 30 timeoutSeconds: 30
volumes:
- name: kubernetes-dashboard-certs
secret:
secretName: kubernetes-dashboard-certs
- name: tmp-volume
emptyDir: {}
serviceAccountName: kubernetes-dashboard
tolerations: tolerations:
- key: "CriticalAddonsOnly" - key: "CriticalAddonsOnly"
operator: "Exists" operator: "Exists"

View File

@ -0,0 +1,45 @@
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
labels:
k8s-app: kubernetes-dashboard
addonmanager.kubernetes.io/mode: Reconcile
name: kubernetes-dashboard-minimal
namespace: kube-system
rules:
# Allow Dashboard to create 'kubernetes-dashboard-key-holder' secret.
- apiGroups: [""]
resources: ["secrets"]
verbs: ["create"]
# Allow Dashboard to get, update and delete Dashboard exclusive secrets.
- apiGroups: [""]
resources: ["secrets"]
resourceNames: ["kubernetes-dashboard-key-holder", "kubernetes-dashboard-certs"]
verbs: ["get", "update", "delete"]
# Allow Dashboard to get and update 'kubernetes-dashboard-settings' config map.
- apiGroups: [""]
resources: ["configmaps"]
resourceNames: ["kubernetes-dashboard-settings"]
verbs: ["get", "update"]
# Allow Dashboard to get metrics from heapster.
- apiGroups: [""]
resources: ["services"]
resourceNames: ["heapster"]
verbs: ["proxy"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: kubernetes-dashboard-minimal
namespace: kube-system
labels:
k8s-app: kubernetes-dashboard
addonmanager.kubernetes.io/mode: Reconcile
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: kubernetes-dashboard-minimal
subjects:
- kind: ServiceAccount
name: kubernetes-dashboard
namespace: kube-system

View File

@ -0,0 +1,10 @@
apiVersion: v1
kind: Secret
metadata:
labels:
k8s-app: kubernetes-dashboard
# Allows editing resource and makes sure it is created first.
addonmanager.kubernetes.io/mode: EnsureExists
name: kubernetes-dashboard-certs
namespace: kube-system
type: Opaque

View File

@ -11,5 +11,5 @@ spec:
selector: selector:
k8s-app: kubernetes-dashboard k8s-app: kubernetes-dashboard
ports: ports:
- port: 80 - port: 443
targetPort: 9090 targetPort: 8443

View File

@ -36,7 +36,7 @@ spec:
hostPath: hostPath:
path: /dev path: /dev
containers: containers:
- image: "gcr.io/google-containers/nvidia-gpu-device-plugin@sha256:943a62949cd80c26e7371d4e123dac61b4cc7281390721aaa95f265171094842" - image: "gcr.io/google-containers/nvidia-gpu-device-plugin@sha256:5e3837c3ab99e90d4c19053998ad86239591de4264bc177faad75642b64b723d"
command: ["/usr/bin/nvidia-gpu-device-plugin", "-logtostderr"] command: ["/usr/bin/nvidia-gpu-device-plugin", "-logtostderr"]
name: nvidia-gpu-device-plugin name: nvidia-gpu-device-plugin
resources: resources:

View File

@ -45,19 +45,13 @@ function deploy_dns {
} }
function deploy_dashboard { function deploy_dashboard {
if ${KUBECTL} get rc -l k8s-app=kubernetes-dashboard --namespace=kube-system | grep kubernetes-dashboard-v &> /dev/null; then echo "Deploying Kubernetes Dashboard"
echo "Kubernetes Dashboard replicationController already exists"
else
echo "Creating Kubernetes Dashboard replicationController"
${KUBECTL} create -f ${KUBE_ROOT}/cluster/addons/dashboard/dashboard-controller.yaml
fi
if ${KUBECTL} get service/kubernetes-dashboard --namespace=kube-system &> /dev/null; then ${KUBECTL} apply -f ${KUBE_ROOT}/cluster/addons/dashboard/dashboard-secret.yaml
echo "Kubernetes Dashboard service already exists" ${KUBECTL} apply -f ${KUBE_ROOT}/cluster/addons/dashboard/dashboard-configmap.yaml
else ${KUBECTL} apply -f ${KUBE_ROOT}/cluster/addons/dashboard/dashboard-rbac.yaml
echo "Creating Kubernetes Dashboard service" ${KUBECTL} apply -f ${KUBE_ROOT}/cluster/addons/dashboard/dashboard-controller.yaml
${KUBECTL} create -f ${KUBE_ROOT}/cluster/addons/dashboard/dashboard-service.yaml ${KUBECTL} apply -f ${KUBE_ROOT}/cluster/addons/dashboard/dashboard-service.yaml
fi
echo echo
} }

View File

@ -25,7 +25,7 @@
"containers": [ "containers": [
{ {
"name": "cluster-autoscaler", "name": "cluster-autoscaler",
"image": "gcr.io/google_containers/cluster-autoscaler:v1.1.0-alpha1", "image": "gcr.io/google_containers/cluster-autoscaler:v1.1.0-beta1",
"livenessProbe": { "livenessProbe": {
"httpGet": { "httpGet": {
"path": "/health-check", "path": "/health-check",

View File

@ -792,8 +792,11 @@ function start_kubedashboard {
if [[ "${ENABLE_CLUSTER_DASHBOARD}" = true ]]; then if [[ "${ENABLE_CLUSTER_DASHBOARD}" = true ]]; then
echo "Creating kubernetes-dashboard" echo "Creating kubernetes-dashboard"
# use kubectl to create the dashboard # use kubectl to create the dashboard
${KUBECTL} --kubeconfig="${CERT_DIR}/admin.kubeconfig" create -f ${KUBE_ROOT}/cluster/addons/dashboard/dashboard-controller.yaml ${KUBECTL} --kubeconfig="${CERT_DIR}/admin.kubeconfig" apply -f ${KUBE_ROOT}/cluster/addons/dashboard/dashboard-secret.yaml
${KUBECTL} --kubeconfig="${CERT_DIR}/admin.kubeconfig" create -f ${KUBE_ROOT}/cluster/addons/dashboard/dashboard-service.yaml ${KUBECTL} --kubeconfig="${CERT_DIR}/admin.kubeconfig" apply -f ${KUBE_ROOT}/cluster/addons/dashboard/dashboard-configmap.yaml
${KUBECTL} --kubeconfig="${CERT_DIR}/admin.kubeconfig" apply -f ${KUBE_ROOT}/cluster/addons/dashboard/dashboard-rbac.yaml
${KUBECTL} --kubeconfig="${CERT_DIR}/admin.kubeconfig" apply -f ${KUBE_ROOT}/cluster/addons/dashboard/dashboard-controller.yaml
${KUBECTL} --kubeconfig="${CERT_DIR}/admin.kubeconfig" apply -f ${KUBE_ROOT}/cluster/addons/dashboard/dashboard-service.yaml
echo "kubernetes-dashboard deployment and service successfully deployed." echo "kubernetes-dashboard deployment and service successfully deployed."
fi fi
} }

View File

@ -1662,7 +1662,7 @@ type awsDisk struct {
} }
func newAWSDisk(aws *Cloud, name KubernetesVolumeID) (*awsDisk, error) { func newAWSDisk(aws *Cloud, name KubernetesVolumeID) (*awsDisk, error) {
awsID, err := name.mapToAWSVolumeID() awsID, err := name.MapToAWSVolumeID()
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -2022,7 +2022,6 @@ func (c *Cloud) AttachDisk(diskName KubernetesVolumeID, nodeName types.NodeName,
// DetachDisk implements Volumes.DetachDisk // DetachDisk implements Volumes.DetachDisk
func (c *Cloud) DetachDisk(diskName KubernetesVolumeID, nodeName types.NodeName) (string, error) { func (c *Cloud) DetachDisk(diskName KubernetesVolumeID, nodeName types.NodeName) (string, error) {
diskInfo, attached, err := c.checkIfAttachedToNode(diskName, nodeName) diskInfo, attached, err := c.checkIfAttachedToNode(diskName, nodeName)
if diskInfo == nil { if diskInfo == nil {
return "", err return "", err
} }
@ -2320,7 +2319,6 @@ func (c *Cloud) GetDiskPath(volumeName KubernetesVolumeID) (string, error) {
// DiskIsAttached implements Volumes.DiskIsAttached // DiskIsAttached implements Volumes.DiskIsAttached
func (c *Cloud) DiskIsAttached(diskName KubernetesVolumeID, nodeName types.NodeName) (bool, error) { func (c *Cloud) DiskIsAttached(diskName KubernetesVolumeID, nodeName types.NodeName) (bool, error) {
diskInfo, attached, err := c.checkIfAttachedToNode(diskName, nodeName) diskInfo, attached, err := c.checkIfAttachedToNode(diskName, nodeName)
if diskInfo == nil { if diskInfo == nil {
return true, err return true, err
} }
@ -2378,7 +2376,7 @@ func (c *Cloud) DisksAreAttached(nodeDisks map[types.NodeName][]KubernetesVolume
idToDiskName := make(map[awsVolumeID]KubernetesVolumeID) idToDiskName := make(map[awsVolumeID]KubernetesVolumeID)
for _, diskName := range diskNames { for _, diskName := range diskNames {
volumeID, err := diskName.mapToAWSVolumeID() volumeID, err := diskName.MapToAWSVolumeID()
if err != nil { if err != nil {
return nil, fmt.Errorf("error mapping volume spec %q to aws id: %v", diskName, err) return nil, fmt.Errorf("error mapping volume spec %q to aws id: %v", diskName, err)
} }

View File

@ -59,8 +59,8 @@ type diskInfo struct {
disk *awsDisk disk *awsDisk
} }
// mapToAWSVolumeID extracts the awsVolumeID from the KubernetesVolumeID // MapToAWSVolumeID extracts the awsVolumeID from the KubernetesVolumeID
func (name KubernetesVolumeID) mapToAWSVolumeID() (awsVolumeID, error) { func (name KubernetesVolumeID) MapToAWSVolumeID() (awsVolumeID, error) {
// name looks like aws://availability-zone/awsVolumeId // name looks like aws://availability-zone/awsVolumeId
// The original idea of the URL-style name was to put the AZ into the // The original idea of the URL-style name was to put the AZ into the
@ -101,7 +101,7 @@ func (name KubernetesVolumeID) mapToAWSVolumeID() (awsVolumeID, error) {
func GetAWSVolumeID(kubeVolumeID string) (string, error) { func GetAWSVolumeID(kubeVolumeID string) (string, error) {
kid := KubernetesVolumeID(kubeVolumeID) kid := KubernetesVolumeID(kubeVolumeID)
awsID, err := kid.mapToAWSVolumeID() awsID, err := kid.MapToAWSVolumeID()
return string(awsID), err return string(awsID), err
} }

View File

@ -169,12 +169,13 @@ func findMatchingVolume(
continue continue
} }
nodeAffinityValid := true
if node != nil { if node != nil {
// Scheduler path, check that the PV NodeAffinity // Scheduler path, check that the PV NodeAffinity
// is satisfied by the node // is satisfied by the node
err := volumeutil.CheckNodeAffinity(volume, node.Labels) err := volumeutil.CheckNodeAffinity(volume, node.Labels)
if err != nil { if err != nil {
continue nodeAffinityValid = false
} }
} }
@ -185,6 +186,14 @@ func findMatchingVolume(
if volumeQty.Cmp(requestedQty) < 0 { if volumeQty.Cmp(requestedQty) < 0 {
continue continue
} }
// If PV node affinity is invalid, return no match.
// This means the prebound PV (and therefore PVC)
// is not suitable for this node.
if !nodeAffinityValid {
return nil, nil
}
return volume, nil return volume, nil
} }
@ -199,6 +208,7 @@ func findMatchingVolume(
// - volumes bound to another claim // - volumes bound to another claim
// - volumes whose labels don't match the claim's selector, if specified // - volumes whose labels don't match the claim's selector, if specified
// - volumes in Class that is not requested // - volumes in Class that is not requested
// - volumes whose NodeAffinity does not match the node
if volume.Spec.ClaimRef != nil { if volume.Spec.ClaimRef != nil {
continue continue
} else if selector != nil && !selector.Matches(labels.Set(volume.Labels)) { } else if selector != nil && !selector.Matches(labels.Set(volume.Labels)) {
@ -207,6 +217,9 @@ func findMatchingVolume(
if v1helper.GetPersistentVolumeClass(volume) != requestedClass { if v1helper.GetPersistentVolumeClass(volume) != requestedClass {
continue continue
} }
if !nodeAffinityValid {
continue
}
if node != nil { if node != nil {
// Scheduler path // Scheduler path

View File

@ -1218,7 +1218,7 @@ func TestFindMatchVolumeWithNode(t *testing.T) {
pvc.Spec.StorageClassName = &classWait pvc.Spec.StorageClassName = &classWait
pvc.Name = "claim02" pvc.Name = "claim02"
}), }),
node: node2, node: node3,
}, },
"success-bad-and-good-node-affinity": { "success-bad-and-good-node-affinity": {
expectedMatch: "affinity-pv3", expectedMatch: "affinity-pv3",

View File

@ -22,7 +22,7 @@ import (
"k8s.io/apiserver/pkg/server/mux" "k8s.io/apiserver/pkg/server/mux"
) )
const dashboardPath = "/api/v1/namespaces/kube-system/services/kubernetes-dashboard/proxy" const dashboardPath = "/api/v1/namespaces/kube-system/services/https:kubernetes-dashboard:/proxy/"
// UIRedirect redirects /ui to the kube-ui proxy path. // UIRedirect redirects /ui to the kube-ui proxy path.
type UIRedirect struct{} type UIRedirect struct{}

View File

@ -169,7 +169,7 @@ func (attacher *awsElasticBlockStoreAttacher) WaitForAttach(spec *volume.Spec, d
case <-ticker.C: case <-ticker.C:
glog.V(5).Infof("Checking AWS Volume %q is attached.", volumeID) glog.V(5).Infof("Checking AWS Volume %q is attached.", volumeID)
if devicePath != "" { if devicePath != "" {
devicePaths := getDiskByIdPaths(partition, devicePath) devicePaths := getDiskByIdPaths(aws.KubernetesVolumeID(volumeSource.VolumeID), partition, devicePath)
path, err := verifyDevicePath(devicePaths) path, err := verifyDevicePath(devicePaths)
if err != nil { if err != nil {
// Log error, if any, and continue checking periodically. See issue #11321 // Log error, if any, and continue checking periodically. See issue #11321

View File

@ -18,6 +18,8 @@ package aws_ebs
import ( import (
"fmt" "fmt"
"os"
"path/filepath"
"strconv" "strconv"
"strings" "strings"
"time" "time"
@ -178,7 +180,7 @@ func verifyAllPathsRemoved(devicePaths []string) (bool, error) {
// Returns list of all paths for given EBS mount // Returns list of all paths for given EBS mount
// This is more interesting on GCE (where we are able to identify volumes under /dev/disk-by-id) // This is more interesting on GCE (where we are able to identify volumes under /dev/disk-by-id)
// Here it is mostly about applying the partition path // Here it is mostly about applying the partition path
func getDiskByIdPaths(partition string, devicePath string) []string { func getDiskByIdPaths(volumeID aws.KubernetesVolumeID, partition string, devicePath string) []string {
devicePaths := []string{} devicePaths := []string{}
if devicePath != "" { if devicePath != "" {
devicePaths = append(devicePaths, devicePath) devicePaths = append(devicePaths, devicePath)
@ -190,6 +192,23 @@ func getDiskByIdPaths(partition string, devicePath string) []string {
} }
} }
// We need to find NVME volumes, which are mounted on a "random" nvme path ("/dev/nvme0n1"),
// and we have to get the volume id from the nvme interface
awsVolumeID, err := volumeID.MapToAWSVolumeID()
if err != nil {
glog.Warningf("error mapping volume %q to AWS volume: %v", volumeID, err)
} else {
// This is the magic name on which AWS presents NVME devices under /dev/disk/by-id/
// For example, vol-0fab1d5e3f72a5e23 creates a symlink at /dev/disk/by-id/nvme-Amazon_Elastic_Block_Store_vol0fab1d5e3f72a5e23
nvmeName := "nvme-Amazon_Elastic_Block_Store_" + strings.Replace(string(awsVolumeID), "-", "", -1)
nvmePath, err := findNvmeVolume(nvmeName)
if err != nil {
glog.Warningf("error looking for nvme volume %q: %v", volumeID, err)
} else if nvmePath != "" {
devicePaths = append(devicePaths, nvmePath)
}
}
return devicePaths return devicePaths
} }
@ -202,3 +221,35 @@ func getCloudProvider(cloudProvider cloudprovider.Interface) (*aws.Cloud, error)
return awsCloudProvider, nil return awsCloudProvider, nil
} }
// findNvmeVolume looks for the nvme volume with the specified name
// It follows the symlink (if it exists) and returns the absolute path to the device
func findNvmeVolume(findName string) (device string, err error) {
p := filepath.Join("/dev/disk/by-id/", findName)
stat, err := os.Lstat(p)
if err != nil {
if os.IsNotExist(err) {
glog.V(6).Infof("nvme path not found %q", p)
return "", nil
}
return "", fmt.Errorf("error getting stat of %q: %v", p, err)
}
if stat.Mode()&os.ModeSymlink != os.ModeSymlink {
glog.Warningf("nvme file %q found, but was not a symlink", p)
return "", nil
}
// Find the target, resolving to an absolute path
// For example, /dev/disk/by-id/nvme-Amazon_Elastic_Block_Store_vol0fab1d5e3f72a5e23 -> ../../nvme2n1
resolved, err := filepath.EvalSymlinks(p)
if err != nil {
return "", fmt.Errorf("error reading target of symlink %q: %v", p, err)
}
if !strings.HasPrefix(resolved, "/dev") {
return "", fmt.Errorf("resolved symlink for %q was unexpected: %q", p, resolved)
}
return resolved, nil
}

View File

@ -1475,12 +1475,12 @@ func (c *VolumeBindingChecker) predicate(pod *v1.Pod, meta algorithm.PredicateMe
failReasons := []algorithm.PredicateFailureReason{} failReasons := []algorithm.PredicateFailureReason{}
if !boundSatisfied { if !boundSatisfied {
glog.V(5).Info("Bound PVs not satisfied for pod %v/%v, node %q", pod.Namespace, pod.Name, node.Name) glog.V(5).Infof("Bound PVs not satisfied for pod %v/%v, node %q", pod.Namespace, pod.Name, node.Name)
failReasons = append(failReasons, ErrVolumeNodeConflict) failReasons = append(failReasons, ErrVolumeNodeConflict)
} }
if !unboundSatisfied { if !unboundSatisfied {
glog.V(5).Info("Couldn't find matching PVs for pod %v/%v, node %q", pod.Namespace, pod.Name, node.Name) glog.V(5).Infof("Couldn't find matching PVs for pod %v/%v, node %q", pod.Namespace, pod.Name, node.Name)
failReasons = append(failReasons, ErrVolumeBindConflict) failReasons = append(failReasons, ErrVolumeBindConflict)
} }
@ -1489,6 +1489,6 @@ func (c *VolumeBindingChecker) predicate(pod *v1.Pod, meta algorithm.PredicateMe
} }
// All volumes bound or matching PVs found for all unbound PVCs // All volumes bound or matching PVs found for all unbound PVCs
glog.V(5).Info("All PVCs found matches for pod %v/%v, node %q", pod.Namespace, pod.Name, node.Name) glog.V(5).Infof("All PVCs found matches for pod %v/%v, node %q", pod.Namespace, pod.Name, node.Name)
return true, nil, nil return true, nil, nil
} }

View File

@ -15,6 +15,7 @@ go_library(
"//vendor/github.com/onsi/gomega:go_default_library", "//vendor/github.com/onsi/gomega:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/labels:go_default_library", "//vendor/k8s.io/apimachinery/pkg/labels:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/net:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
], ],
) )

View File

@ -23,6 +23,7 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/labels"
utilnet "k8s.io/apimachinery/pkg/util/net"
"k8s.io/apimachinery/pkg/util/wait" "k8s.io/apimachinery/pkg/util/wait"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
testutils "k8s.io/kubernetes/test/utils" testutils "k8s.io/kubernetes/test/utils"
@ -36,6 +37,7 @@ var _ = SIGDescribe("Kubernetes Dashboard", func() {
uiServiceName = "kubernetes-dashboard" uiServiceName = "kubernetes-dashboard"
uiAppName = uiServiceName uiAppName = uiServiceName
uiNamespace = metav1.NamespaceSystem uiNamespace = metav1.NamespaceSystem
uiRedirect = "/ui"
serverStartTimeout = 1 * time.Minute serverStartTimeout = 1 * time.Minute
) )
@ -63,20 +65,20 @@ var _ = SIGDescribe("Kubernetes Dashboard", func() {
ctx, cancel := context.WithTimeout(context.Background(), framework.SingleCallTimeout) ctx, cancel := context.WithTimeout(context.Background(), framework.SingleCallTimeout)
defer cancel() defer cancel()
// Query against the proxy URL for the kube-ui service. // Query against the proxy URL for the kubernetes-dashboard service.
err := proxyRequest.Namespace(uiNamespace). err := proxyRequest.Namespace(uiNamespace).
Context(ctx). Context(ctx).
Name(uiServiceName). Name(utilnet.JoinSchemeNamePort("https", uiServiceName, "")).
Timeout(framework.SingleCallTimeout). Timeout(framework.SingleCallTimeout).
Do(). Do().
StatusCode(&status). StatusCode(&status).
Error() Error()
if err != nil { if err != nil {
if ctx.Err() != nil { if ctx.Err() != nil {
framework.Failf("Request to kube-ui failed: %v", err) framework.Failf("Request to kubernetes-dashboard failed: %v", err)
return true, err return true, err
} }
framework.Logf("Request to kube-ui failed: %v", err) framework.Logf("Request to kubernetes-dashboard failed: %v", err)
} else if status != http.StatusOK { } else if status != http.StatusOK {
framework.Logf("Unexpected status from kubernetes-dashboard: %v", status) framework.Logf("Unexpected status from kubernetes-dashboard: %v", status)
} }
@ -88,7 +90,7 @@ var _ = SIGDescribe("Kubernetes Dashboard", func() {
By("Checking that the ApiServer /ui endpoint redirects to a valid server.") By("Checking that the ApiServer /ui endpoint redirects to a valid server.")
var status int var status int
err = f.ClientSet.CoreV1().RESTClient().Get(). err = f.ClientSet.CoreV1().RESTClient().Get().
AbsPath("/ui"). AbsPath(uiRedirect).
Timeout(framework.SingleCallTimeout). Timeout(framework.SingleCallTimeout).
Do(). Do().
StatusCode(&status). StatusCode(&status).