diff --git a/cluster/addons/dashboard/README.md b/cluster/addons/dashboard/README.md index 9a798ec39ff..b3c2c582fc9 100644 --- a/cluster/addons/dashboard/README.md +++ b/cluster/addons/dashboard/README.md @@ -1,5 +1,4 @@ # Kubernetes Dashboard -============== Kubernetes Dashboard is a general purpose, web-based UI for Kubernetes clusters. It allows users to manage applications running in the cluster, troubleshoot them, diff --git a/cluster/addons/dashboard/dashboard-configmap.yaml b/cluster/addons/dashboard/dashboard-configmap.yaml new file mode 100644 index 00000000000..8aa6ac47db0 --- /dev/null +++ b/cluster/addons/dashboard/dashboard-configmap.yaml @@ -0,0 +1,9 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + labels: + k8s-app: kubernetes-dashboard + # Allows editing resource and makes sure it is created first. + addonmanager.kubernetes.io/mode: EnsureExists + name: kubernetes-dashboard-settings + namespace: kube-system diff --git a/cluster/addons/dashboard/dashboard-controller.yaml b/cluster/addons/dashboard/dashboard-controller.yaml index 515355b0700..ac05d3a9a29 100644 --- a/cluster/addons/dashboard/dashboard-controller.yaml +++ b/cluster/addons/dashboard/dashboard-controller.yaml @@ -1,4 +1,13 @@ -apiVersion: extensions/v1beta1 +apiVersion: v1 +kind: ServiceAccount +metadata: + labels: + k8s-app: kubernetes-dashboard + addonmanager.kubernetes.io/mode: Reconcile + name: kubernetes-dashboard + namespace: kube-system +--- +apiVersion: apps/v1beta2 kind: Deployment metadata: name: kubernetes-dashboard @@ -20,9 +29,8 @@ spec: spec: containers: - name: kubernetes-dashboard - image: gcr.io/google_containers/kubernetes-dashboard-amd64:v1.6.3 + image: gcr.io/google_containers/kubernetes-dashboard-amd64:v1.8.0 resources: - # keep request = limit to keep this container in guaranteed class limits: cpu: 100m memory: 300Mi @@ -30,13 +38,29 @@ spec: cpu: 100m memory: 100Mi ports: - - containerPort: 9090 + - containerPort: 8443 + protocol: TCP + args: + - --auto-generate-certificates + volumeMounts: + - name: kubernetes-dashboard-certs + mountPath: /certs + - name: tmp-volume + mountPath: /tmp livenessProbe: httpGet: + scheme: HTTPS path: / - port: 9090 + port: 8443 initialDelaySeconds: 30 timeoutSeconds: 30 + volumes: + - name: kubernetes-dashboard-certs + secret: + secretName: kubernetes-dashboard-certs + - name: tmp-volume + emptyDir: {} + serviceAccountName: kubernetes-dashboard tolerations: - key: "CriticalAddonsOnly" operator: "Exists" diff --git a/cluster/addons/dashboard/dashboard-rbac.yaml b/cluster/addons/dashboard/dashboard-rbac.yaml new file mode 100644 index 00000000000..658ffd94861 --- /dev/null +++ b/cluster/addons/dashboard/dashboard-rbac.yaml @@ -0,0 +1,45 @@ +kind: Role +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + labels: + k8s-app: kubernetes-dashboard + addonmanager.kubernetes.io/mode: Reconcile + name: kubernetes-dashboard-minimal + namespace: kube-system +rules: + # Allow Dashboard to create 'kubernetes-dashboard-key-holder' secret. +- apiGroups: [""] + resources: ["secrets"] + verbs: ["create"] + # Allow Dashboard to get, update and delete Dashboard exclusive secrets. +- apiGroups: [""] + resources: ["secrets"] + resourceNames: ["kubernetes-dashboard-key-holder", "kubernetes-dashboard-certs"] + verbs: ["get", "update", "delete"] + # Allow Dashboard to get and update 'kubernetes-dashboard-settings' config map. +- apiGroups: [""] + resources: ["configmaps"] + resourceNames: ["kubernetes-dashboard-settings"] + verbs: ["get", "update"] + # Allow Dashboard to get metrics from heapster. +- apiGroups: [""] + resources: ["services"] + resourceNames: ["heapster"] + verbs: ["proxy"] +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: kubernetes-dashboard-minimal + namespace: kube-system + labels: + k8s-app: kubernetes-dashboard + addonmanager.kubernetes.io/mode: Reconcile +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: kubernetes-dashboard-minimal +subjects: +- kind: ServiceAccount + name: kubernetes-dashboard + namespace: kube-system diff --git a/cluster/addons/dashboard/dashboard-secret.yaml b/cluster/addons/dashboard/dashboard-secret.yaml new file mode 100644 index 00000000000..f26235bec3b --- /dev/null +++ b/cluster/addons/dashboard/dashboard-secret.yaml @@ -0,0 +1,10 @@ +apiVersion: v1 +kind: Secret +metadata: + labels: + k8s-app: kubernetes-dashboard + # Allows editing resource and makes sure it is created first. + addonmanager.kubernetes.io/mode: EnsureExists + name: kubernetes-dashboard-certs + namespace: kube-system +type: Opaque diff --git a/cluster/addons/dashboard/dashboard-service.yaml b/cluster/addons/dashboard/dashboard-service.yaml index 831248a97d7..ae65ec232b3 100644 --- a/cluster/addons/dashboard/dashboard-service.yaml +++ b/cluster/addons/dashboard/dashboard-service.yaml @@ -11,5 +11,5 @@ spec: selector: k8s-app: kubernetes-dashboard ports: - - port: 80 - targetPort: 9090 + - port: 443 + targetPort: 8443 diff --git a/cluster/addons/device-plugins/nvidia-gpu/daemonset.yaml b/cluster/addons/device-plugins/nvidia-gpu/daemonset.yaml index de66faecb30..58c233ef027 100644 --- a/cluster/addons/device-plugins/nvidia-gpu/daemonset.yaml +++ b/cluster/addons/device-plugins/nvidia-gpu/daemonset.yaml @@ -36,7 +36,7 @@ spec: hostPath: path: /dev containers: - - image: "gcr.io/google-containers/nvidia-gpu-device-plugin@sha256:943a62949cd80c26e7371d4e123dac61b4cc7281390721aaa95f265171094842" + - image: "gcr.io/google-containers/nvidia-gpu-device-plugin@sha256:5e3837c3ab99e90d4c19053998ad86239591de4264bc177faad75642b64b723d" command: ["/usr/bin/nvidia-gpu-device-plugin", "-logtostderr"] name: nvidia-gpu-device-plugin resources: diff --git a/cluster/centos/deployAddons.sh b/cluster/centos/deployAddons.sh index cc96b44248e..cefbc7c250d 100755 --- a/cluster/centos/deployAddons.sh +++ b/cluster/centos/deployAddons.sh @@ -45,19 +45,13 @@ function deploy_dns { } function deploy_dashboard { - if ${KUBECTL} get rc -l k8s-app=kubernetes-dashboard --namespace=kube-system | grep kubernetes-dashboard-v &> /dev/null; then - echo "Kubernetes Dashboard replicationController already exists" - else - echo "Creating Kubernetes Dashboard replicationController" - ${KUBECTL} create -f ${KUBE_ROOT}/cluster/addons/dashboard/dashboard-controller.yaml - fi + echo "Deploying Kubernetes Dashboard" - if ${KUBECTL} get service/kubernetes-dashboard --namespace=kube-system &> /dev/null; then - echo "Kubernetes Dashboard service already exists" - else - echo "Creating Kubernetes Dashboard service" - ${KUBECTL} create -f ${KUBE_ROOT}/cluster/addons/dashboard/dashboard-service.yaml - fi + ${KUBECTL} apply -f ${KUBE_ROOT}/cluster/addons/dashboard/dashboard-secret.yaml + ${KUBECTL} apply -f ${KUBE_ROOT}/cluster/addons/dashboard/dashboard-configmap.yaml + ${KUBECTL} apply -f ${KUBE_ROOT}/cluster/addons/dashboard/dashboard-rbac.yaml + ${KUBECTL} apply -f ${KUBE_ROOT}/cluster/addons/dashboard/dashboard-controller.yaml + ${KUBECTL} apply -f ${KUBE_ROOT}/cluster/addons/dashboard/dashboard-service.yaml echo } diff --git a/cluster/saltbase/salt/cluster-autoscaler/cluster-autoscaler.manifest b/cluster/saltbase/salt/cluster-autoscaler/cluster-autoscaler.manifest index 844243d428d..4a8891f5b36 100644 --- a/cluster/saltbase/salt/cluster-autoscaler/cluster-autoscaler.manifest +++ b/cluster/saltbase/salt/cluster-autoscaler/cluster-autoscaler.manifest @@ -25,7 +25,7 @@ "containers": [ { "name": "cluster-autoscaler", - "image": "gcr.io/google_containers/cluster-autoscaler:v1.1.0-alpha1", + "image": "gcr.io/google_containers/cluster-autoscaler:v1.1.0-beta1", "livenessProbe": { "httpGet": { "path": "/health-check", diff --git a/hack/local-up-cluster.sh b/hack/local-up-cluster.sh index 1528668174c..212265c8a7b 100755 --- a/hack/local-up-cluster.sh +++ b/hack/local-up-cluster.sh @@ -792,8 +792,11 @@ function start_kubedashboard { if [[ "${ENABLE_CLUSTER_DASHBOARD}" = true ]]; then echo "Creating kubernetes-dashboard" # use kubectl to create the dashboard - ${KUBECTL} --kubeconfig="${CERT_DIR}/admin.kubeconfig" create -f ${KUBE_ROOT}/cluster/addons/dashboard/dashboard-controller.yaml - ${KUBECTL} --kubeconfig="${CERT_DIR}/admin.kubeconfig" create -f ${KUBE_ROOT}/cluster/addons/dashboard/dashboard-service.yaml + ${KUBECTL} --kubeconfig="${CERT_DIR}/admin.kubeconfig" apply -f ${KUBE_ROOT}/cluster/addons/dashboard/dashboard-secret.yaml + ${KUBECTL} --kubeconfig="${CERT_DIR}/admin.kubeconfig" apply -f ${KUBE_ROOT}/cluster/addons/dashboard/dashboard-configmap.yaml + ${KUBECTL} --kubeconfig="${CERT_DIR}/admin.kubeconfig" apply -f ${KUBE_ROOT}/cluster/addons/dashboard/dashboard-rbac.yaml + ${KUBECTL} --kubeconfig="${CERT_DIR}/admin.kubeconfig" apply -f ${KUBE_ROOT}/cluster/addons/dashboard/dashboard-controller.yaml + ${KUBECTL} --kubeconfig="${CERT_DIR}/admin.kubeconfig" apply -f ${KUBE_ROOT}/cluster/addons/dashboard/dashboard-service.yaml echo "kubernetes-dashboard deployment and service successfully deployed." fi } diff --git a/pkg/cloudprovider/providers/aws/aws.go b/pkg/cloudprovider/providers/aws/aws.go index 5f8d0ab36cf..034a26b6b7e 100644 --- a/pkg/cloudprovider/providers/aws/aws.go +++ b/pkg/cloudprovider/providers/aws/aws.go @@ -1662,7 +1662,7 @@ type awsDisk struct { } func newAWSDisk(aws *Cloud, name KubernetesVolumeID) (*awsDisk, error) { - awsID, err := name.mapToAWSVolumeID() + awsID, err := name.MapToAWSVolumeID() if err != nil { return nil, err } @@ -2022,7 +2022,6 @@ func (c *Cloud) AttachDisk(diskName KubernetesVolumeID, nodeName types.NodeName, // DetachDisk implements Volumes.DetachDisk func (c *Cloud) DetachDisk(diskName KubernetesVolumeID, nodeName types.NodeName) (string, error) { diskInfo, attached, err := c.checkIfAttachedToNode(diskName, nodeName) - if diskInfo == nil { return "", err } @@ -2320,7 +2319,6 @@ func (c *Cloud) GetDiskPath(volumeName KubernetesVolumeID) (string, error) { // DiskIsAttached implements Volumes.DiskIsAttached func (c *Cloud) DiskIsAttached(diskName KubernetesVolumeID, nodeName types.NodeName) (bool, error) { diskInfo, attached, err := c.checkIfAttachedToNode(diskName, nodeName) - if diskInfo == nil { return true, err } @@ -2378,7 +2376,7 @@ func (c *Cloud) DisksAreAttached(nodeDisks map[types.NodeName][]KubernetesVolume idToDiskName := make(map[awsVolumeID]KubernetesVolumeID) for _, diskName := range diskNames { - volumeID, err := diskName.mapToAWSVolumeID() + volumeID, err := diskName.MapToAWSVolumeID() if err != nil { return nil, fmt.Errorf("error mapping volume spec %q to aws id: %v", diskName, err) } diff --git a/pkg/cloudprovider/providers/aws/volumes.go b/pkg/cloudprovider/providers/aws/volumes.go index 3a4aa6284eb..2ac932d43d7 100644 --- a/pkg/cloudprovider/providers/aws/volumes.go +++ b/pkg/cloudprovider/providers/aws/volumes.go @@ -59,8 +59,8 @@ type diskInfo struct { disk *awsDisk } -// mapToAWSVolumeID extracts the awsVolumeID from the KubernetesVolumeID -func (name KubernetesVolumeID) mapToAWSVolumeID() (awsVolumeID, error) { +// MapToAWSVolumeID extracts the awsVolumeID from the KubernetesVolumeID +func (name KubernetesVolumeID) MapToAWSVolumeID() (awsVolumeID, error) { // name looks like aws://availability-zone/awsVolumeId // The original idea of the URL-style name was to put the AZ into the @@ -101,7 +101,7 @@ func (name KubernetesVolumeID) mapToAWSVolumeID() (awsVolumeID, error) { func GetAWSVolumeID(kubeVolumeID string) (string, error) { kid := KubernetesVolumeID(kubeVolumeID) - awsID, err := kid.mapToAWSVolumeID() + awsID, err := kid.MapToAWSVolumeID() return string(awsID), err } diff --git a/pkg/controller/volume/persistentvolume/index.go b/pkg/controller/volume/persistentvolume/index.go index dd652471d7a..5c345744564 100644 --- a/pkg/controller/volume/persistentvolume/index.go +++ b/pkg/controller/volume/persistentvolume/index.go @@ -169,12 +169,13 @@ func findMatchingVolume( continue } + nodeAffinityValid := true if node != nil { // Scheduler path, check that the PV NodeAffinity // is satisfied by the node err := volumeutil.CheckNodeAffinity(volume, node.Labels) if err != nil { - continue + nodeAffinityValid = false } } @@ -185,6 +186,14 @@ func findMatchingVolume( if volumeQty.Cmp(requestedQty) < 0 { continue } + + // If PV node affinity is invalid, return no match. + // This means the prebound PV (and therefore PVC) + // is not suitable for this node. + if !nodeAffinityValid { + return nil, nil + } + return volume, nil } @@ -199,6 +208,7 @@ func findMatchingVolume( // - volumes bound to another claim // - volumes whose labels don't match the claim's selector, if specified // - volumes in Class that is not requested + // - volumes whose NodeAffinity does not match the node if volume.Spec.ClaimRef != nil { continue } else if selector != nil && !selector.Matches(labels.Set(volume.Labels)) { @@ -207,6 +217,9 @@ func findMatchingVolume( if v1helper.GetPersistentVolumeClass(volume) != requestedClass { continue } + if !nodeAffinityValid { + continue + } if node != nil { // Scheduler path diff --git a/pkg/controller/volume/persistentvolume/index_test.go b/pkg/controller/volume/persistentvolume/index_test.go index b734a67a991..5f5b50af50f 100644 --- a/pkg/controller/volume/persistentvolume/index_test.go +++ b/pkg/controller/volume/persistentvolume/index_test.go @@ -1218,7 +1218,7 @@ func TestFindMatchVolumeWithNode(t *testing.T) { pvc.Spec.StorageClassName = &classWait pvc.Name = "claim02" }), - node: node2, + node: node3, }, "success-bad-and-good-node-affinity": { expectedMatch: "affinity-pv3", diff --git a/pkg/routes/ui.go b/pkg/routes/ui.go index 1f079c86e42..de6ca3c3abd 100644 --- a/pkg/routes/ui.go +++ b/pkg/routes/ui.go @@ -22,7 +22,7 @@ import ( "k8s.io/apiserver/pkg/server/mux" ) -const dashboardPath = "/api/v1/namespaces/kube-system/services/kubernetes-dashboard/proxy" +const dashboardPath = "/api/v1/namespaces/kube-system/services/https:kubernetes-dashboard:/proxy/" // UIRedirect redirects /ui to the kube-ui proxy path. type UIRedirect struct{} diff --git a/pkg/volume/aws_ebs/attacher.go b/pkg/volume/aws_ebs/attacher.go index ce96b94ddde..19ca2ea49b4 100644 --- a/pkg/volume/aws_ebs/attacher.go +++ b/pkg/volume/aws_ebs/attacher.go @@ -169,7 +169,7 @@ func (attacher *awsElasticBlockStoreAttacher) WaitForAttach(spec *volume.Spec, d case <-ticker.C: glog.V(5).Infof("Checking AWS Volume %q is attached.", volumeID) if devicePath != "" { - devicePaths := getDiskByIdPaths(partition, devicePath) + devicePaths := getDiskByIdPaths(aws.KubernetesVolumeID(volumeSource.VolumeID), partition, devicePath) path, err := verifyDevicePath(devicePaths) if err != nil { // Log error, if any, and continue checking periodically. See issue #11321 diff --git a/pkg/volume/aws_ebs/aws_util.go b/pkg/volume/aws_ebs/aws_util.go index 711a9eaed39..932617d4e9a 100644 --- a/pkg/volume/aws_ebs/aws_util.go +++ b/pkg/volume/aws_ebs/aws_util.go @@ -18,6 +18,8 @@ package aws_ebs import ( "fmt" + "os" + "path/filepath" "strconv" "strings" "time" @@ -178,7 +180,7 @@ func verifyAllPathsRemoved(devicePaths []string) (bool, error) { // Returns list of all paths for given EBS mount // This is more interesting on GCE (where we are able to identify volumes under /dev/disk-by-id) // Here it is mostly about applying the partition path -func getDiskByIdPaths(partition string, devicePath string) []string { +func getDiskByIdPaths(volumeID aws.KubernetesVolumeID, partition string, devicePath string) []string { devicePaths := []string{} if devicePath != "" { devicePaths = append(devicePaths, devicePath) @@ -190,6 +192,23 @@ func getDiskByIdPaths(partition string, devicePath string) []string { } } + // We need to find NVME volumes, which are mounted on a "random" nvme path ("/dev/nvme0n1"), + // and we have to get the volume id from the nvme interface + awsVolumeID, err := volumeID.MapToAWSVolumeID() + if err != nil { + glog.Warningf("error mapping volume %q to AWS volume: %v", volumeID, err) + } else { + // This is the magic name on which AWS presents NVME devices under /dev/disk/by-id/ + // For example, vol-0fab1d5e3f72a5e23 creates a symlink at /dev/disk/by-id/nvme-Amazon_Elastic_Block_Store_vol0fab1d5e3f72a5e23 + nvmeName := "nvme-Amazon_Elastic_Block_Store_" + strings.Replace(string(awsVolumeID), "-", "", -1) + nvmePath, err := findNvmeVolume(nvmeName) + if err != nil { + glog.Warningf("error looking for nvme volume %q: %v", volumeID, err) + } else if nvmePath != "" { + devicePaths = append(devicePaths, nvmePath) + } + } + return devicePaths } @@ -202,3 +221,35 @@ func getCloudProvider(cloudProvider cloudprovider.Interface) (*aws.Cloud, error) return awsCloudProvider, nil } + +// findNvmeVolume looks for the nvme volume with the specified name +// It follows the symlink (if it exists) and returns the absolute path to the device +func findNvmeVolume(findName string) (device string, err error) { + p := filepath.Join("/dev/disk/by-id/", findName) + stat, err := os.Lstat(p) + if err != nil { + if os.IsNotExist(err) { + glog.V(6).Infof("nvme path not found %q", p) + return "", nil + } + return "", fmt.Errorf("error getting stat of %q: %v", p, err) + } + + if stat.Mode()&os.ModeSymlink != os.ModeSymlink { + glog.Warningf("nvme file %q found, but was not a symlink", p) + return "", nil + } + + // Find the target, resolving to an absolute path + // For example, /dev/disk/by-id/nvme-Amazon_Elastic_Block_Store_vol0fab1d5e3f72a5e23 -> ../../nvme2n1 + resolved, err := filepath.EvalSymlinks(p) + if err != nil { + return "", fmt.Errorf("error reading target of symlink %q: %v", p, err) + } + + if !strings.HasPrefix(resolved, "/dev") { + return "", fmt.Errorf("resolved symlink for %q was unexpected: %q", p, resolved) + } + + return resolved, nil +} diff --git a/plugin/pkg/scheduler/algorithm/predicates/predicates.go b/plugin/pkg/scheduler/algorithm/predicates/predicates.go index 917b35acf51..7b463699ea1 100644 --- a/plugin/pkg/scheduler/algorithm/predicates/predicates.go +++ b/plugin/pkg/scheduler/algorithm/predicates/predicates.go @@ -1475,12 +1475,12 @@ func (c *VolumeBindingChecker) predicate(pod *v1.Pod, meta algorithm.PredicateMe failReasons := []algorithm.PredicateFailureReason{} if !boundSatisfied { - glog.V(5).Info("Bound PVs not satisfied for pod %v/%v, node %q", pod.Namespace, pod.Name, node.Name) + glog.V(5).Infof("Bound PVs not satisfied for pod %v/%v, node %q", pod.Namespace, pod.Name, node.Name) failReasons = append(failReasons, ErrVolumeNodeConflict) } if !unboundSatisfied { - glog.V(5).Info("Couldn't find matching PVs for pod %v/%v, node %q", pod.Namespace, pod.Name, node.Name) + glog.V(5).Infof("Couldn't find matching PVs for pod %v/%v, node %q", pod.Namespace, pod.Name, node.Name) failReasons = append(failReasons, ErrVolumeBindConflict) } @@ -1489,6 +1489,6 @@ func (c *VolumeBindingChecker) predicate(pod *v1.Pod, meta algorithm.PredicateMe } // All volumes bound or matching PVs found for all unbound PVCs - glog.V(5).Info("All PVCs found matches for pod %v/%v, node %q", pod.Namespace, pod.Name, node.Name) + glog.V(5).Infof("All PVCs found matches for pod %v/%v, node %q", pod.Namespace, pod.Name, node.Name) return true, nil, nil } diff --git a/test/e2e/ui/BUILD b/test/e2e/ui/BUILD index 768f6f2bb11..4e6121c6a77 100644 --- a/test/e2e/ui/BUILD +++ b/test/e2e/ui/BUILD @@ -15,6 +15,7 @@ go_library( "//vendor/github.com/onsi/gomega:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/labels:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/util/net:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library", ], ) diff --git a/test/e2e/ui/dashboard.go b/test/e2e/ui/dashboard.go index ebf75fe561a..2bc9415f093 100644 --- a/test/e2e/ui/dashboard.go +++ b/test/e2e/ui/dashboard.go @@ -23,6 +23,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" + utilnet "k8s.io/apimachinery/pkg/util/net" "k8s.io/apimachinery/pkg/util/wait" "k8s.io/kubernetes/test/e2e/framework" testutils "k8s.io/kubernetes/test/utils" @@ -36,6 +37,7 @@ var _ = SIGDescribe("Kubernetes Dashboard", func() { uiServiceName = "kubernetes-dashboard" uiAppName = uiServiceName uiNamespace = metav1.NamespaceSystem + uiRedirect = "/ui" serverStartTimeout = 1 * time.Minute ) @@ -63,20 +65,20 @@ var _ = SIGDescribe("Kubernetes Dashboard", func() { ctx, cancel := context.WithTimeout(context.Background(), framework.SingleCallTimeout) defer cancel() - // Query against the proxy URL for the kube-ui service. + // Query against the proxy URL for the kubernetes-dashboard service. err := proxyRequest.Namespace(uiNamespace). Context(ctx). - Name(uiServiceName). + Name(utilnet.JoinSchemeNamePort("https", uiServiceName, "")). Timeout(framework.SingleCallTimeout). Do(). StatusCode(&status). Error() if err != nil { if ctx.Err() != nil { - framework.Failf("Request to kube-ui failed: %v", err) + framework.Failf("Request to kubernetes-dashboard failed: %v", err) return true, err } - framework.Logf("Request to kube-ui failed: %v", err) + framework.Logf("Request to kubernetes-dashboard failed: %v", err) } else if status != http.StatusOK { framework.Logf("Unexpected status from kubernetes-dashboard: %v", status) } @@ -88,7 +90,7 @@ var _ = SIGDescribe("Kubernetes Dashboard", func() { By("Checking that the ApiServer /ui endpoint redirects to a valid server.") var status int err = f.ClientSet.CoreV1().RESTClient().Get(). - AbsPath("/ui"). + AbsPath(uiRedirect). Timeout(framework.SingleCallTimeout). Do(). StatusCode(&status).