Skip to content


k8s_安装8_UI_Dashboard

八、 dashkoard

kubernetes/dashboard

Dashboard 是基于网页的 Kubernetes 用户界面。 你可以使用 Dashboard 将容器应用部署到 Kubernetes 集群中,也可以对容器应用排错,还能管理集群资源。 你可以使用 Dashboard 获取运行在集群中的应用的概览信息,也可以创建或者修改 Kubernetes 资源 (如 Deployment,Job,DaemonSet 等等)。 例如,你可以对 Deployment 实现弹性伸缩、发起滚动升级、重启 Pod 或者使用向导创建新的应用。

官方文档:https://kubernetes.io/zh-cn/docs/tasks/access-application-cluster/web-ui-dashboard/
开源地址:https://github.com/kubernetes/dashboard
版本兼容性确认:https://github.com/kubernetes/dashboard/releases

cert-manager

https://cert-manager.io/docs/installation/
wget --no-check-certificate https://github.com/cert-manager/cert-manager/releases/download/v1.13.1/cert-manager.yaml -O cert-manager-1.13.1.yaml
wget --no-check-certificate  https://github.com/cert-manager/cert-manager/releases/download/v1.13.2/cert-manager.yaml -O cert-manager-1.13.2.yaml

cat cert-manager-1.13.2.yaml|grep image:|sed -e 's/.*image: //'
"quay.io/jetstack/cert-manager-cainjector:v1.13.2"
"quay.io/jetstack/cert-manager-controller:v1.13.2"
"quay.io/jetstack/cert-manager-webhook:v1.13.2"

docker pull fishchen/cert-manager-controller:v1.13.2
docker pull quay.io/jetstack/cert-manager-webhook:v1.13.2
docker pull quay.io/jetstack/cert-manager-controller:v1.13.2
docker pull quay.nju.edu.cn/jetstack/cert-manager-controller:v1.13.2

quay.dockerproxy.com/
docker pull quay.dockerproxy.com/jetstack/cert-manager-controller:v1.13.1
docker pull quay.dockerproxy.com/jetstack/cert-manager-cainjector:v1.13.1
docker pull quay.dockerproxy.com/jetstack/cert-manager-webhook:v1.13.1

quay.io
docker pull quay.io/jetstack/cert-manager-cainjector:v1.13.1
docker pull quay.io/jetstack/cert-manager-controller:v1.13.1
docker pull quay.io/jetstack/cert-manager-webhook:v1.13.1

quay.nju.edu.cn
docker pull quay.nju.edu.cn/jetstack/cert-manager-cainjector:v1.13.1
docker pull quay.nju.edu.cn/jetstack/cert-manager-controller:v1.13.1
docker pull quay.nju.edu.cn/jetstack/cert-manager-webhook:v1.13.1

docker tag quay.dockerproxy.com/jetstack/cert-manager-cainjector:v1.13.1 repo.k8s.local/quay.io/jetstack/cert-manager-cainjector:v1.13.1
docker tag quay.nju.edu.cn/jetstack/cert-manager-webhook:v1.13.1  repo.k8s.local/quay.io/jetstack/cert-manager-webhook:v1.13.1
docker tag quay.io/jetstack/cert-manager-controller:v1.13.1  repo.k8s.local/quay.io/jetstack/cert-manager-controller:v1.13.1

docker push repo.k8s.local/quay.io/jetstack/cert-manager-cainjector:v1.13.1
docker push repo.k8s.local/quay.io/jetstack/cert-manager-webhook:v1.13.1
docker push repo.k8s.local/quay.io/jetstack/cert-manager-controller:v1.13.1
导入省略,可以参见harbor安装
docker pull ...
docker tag ...
docker push ...
docker images

准备yaml文件

cp cert-manager-1.13.1.yaml  cert-manager-1.13.1.org.yaml

sed -n 's/quay\.io/repo.k8s.local\/quay\.io/p'  cert-manager-1.13.1.yaml
sed -i 's/quay\.io/repo.k8s.local\/quay\.io/'  cert-manager-1.13.1.yaml
cat cert-manager-1.13.1.yaml|grep image:|sed -e 's/.*image: //'

kubectl apply -f cert-manager-1.13.1.yaml

customresourcedefinition.apiextensions.k8s.io/certificaterequests.cert-manager.io created
customresourcedefinition.apiextensions.k8s.io/certificates.cert-manager.io created
customresourcedefinition.apiextensions.k8s.io/challenges.acme.cert-manager.io created
customresourcedefinition.apiextensions.k8s.io/clusterissuers.cert-manager.io created
customresourcedefinition.apiextensions.k8s.io/issuers.cert-manager.io created
customresourcedefinition.apiextensions.k8s.io/orders.acme.cert-manager.io created
serviceaccount/cert-manager-cainjector created
serviceaccount/cert-manager created
serviceaccount/cert-manager-webhook created
configmap/cert-manager created
configmap/cert-manager-webhook created
clusterrole.rbac.authorization.k8s.io/cert-manager-cainjector created
clusterrole.rbac.authorization.k8s.io/cert-manager-controller-issuers created
clusterrole.rbac.authorization.k8s.io/cert-manager-controller-clusterissuers created
clusterrole.rbac.authorization.k8s.io/cert-manager-controller-certificates created
clusterrole.rbac.authorization.k8s.io/cert-manager-controller-orders created
clusterrole.rbac.authorization.k8s.io/cert-manager-controller-challenges created
clusterrole.rbac.authorization.k8s.io/cert-manager-controller-ingress-shim created
clusterrole.rbac.authorization.k8s.io/cert-manager-cluster-view created
clusterrole.rbac.authorization.k8s.io/cert-manager-view created
clusterrole.rbac.authorization.k8s.io/cert-manager-edit created
clusterrole.rbac.authorization.k8s.io/cert-manager-controller-approve:cert-manager-io created
clusterrole.rbac.authorization.k8s.io/cert-manager-controller-certificatesigningrequests created
clusterrole.rbac.authorization.k8s.io/cert-manager-webhook:subjectaccessreviews created
clusterrolebinding.rbac.authorization.k8s.io/cert-manager-cainjector created
clusterrolebinding.rbac.authorization.k8s.io/cert-manager-controller-issuers created
clusterrolebinding.rbac.authorization.k8s.io/cert-manager-controller-clusterissuers created
clusterrolebinding.rbac.authorization.k8s.io/cert-manager-controller-certificates created
clusterrolebinding.rbac.authorization.k8s.io/cert-manager-controller-orders created
clusterrolebinding.rbac.authorization.k8s.io/cert-manager-controller-challenges created
clusterrolebinding.rbac.authorization.k8s.io/cert-manager-controller-ingress-shim created
clusterrolebinding.rbac.authorization.k8s.io/cert-manager-controller-approve:cert-manager-io created
clusterrolebinding.rbac.authorization.k8s.io/cert-manager-controller-certificatesigningrequests created
clusterrolebinding.rbac.authorization.k8s.io/cert-manager-webhook:subjectaccessreviews created
role.rbac.authorization.k8s.io/cert-manager-cainjector:leaderelection created
role.rbac.authorization.k8s.io/cert-manager:leaderelection created
role.rbac.authorization.k8s.io/cert-manager-webhook:dynamic-serving created
rolebinding.rbac.authorization.k8s.io/cert-manager-cainjector:leaderelection created
rolebinding.rbac.authorization.k8s.io/cert-manager:leaderelection created
rolebinding.rbac.authorization.k8s.io/cert-manager-webhook:dynamic-serving created
service/cert-manager created
service/cert-manager-webhook created
deployment.apps/cert-manager-cainjector created
deployment.apps/cert-manager created
deployment.apps/cert-manager-webhook created
mutatingwebhookconfiguration.admissionregistration.k8s.io/cert-manager-webhook created
validatingwebhookconfiguration.admissionregistration.k8s.io/cert-manager-webhook created

kubectl get pods –namespace cert-manager

NAME                                       READY   STATUS    RESTARTS   AGE
cert-manager-7f46fcb774-gfvjm              1/1     Running   0          14s
cert-manager-cainjector-55f76bd446-nxkrt   1/1     Running   0          14s
cert-manager-webhook-799cbdc68-4t9zw       1/1     Running   0          14s
准备yaml文件,并显示images地址

在控制节点
dashboard3.0.0-alpha0 需要cert-manager,用service或nodeport安装后无法显示页面 /api/v1/login/status 为404,要用ingress方式访问.

wget  --no-check-certificate  https://raw.githubusercontent.com/kubernetes/dashboard/v3.0.0-alpha0/charts/kubernetes-dashboard.yaml -O kubernetes-dashboard.yaml
cat kubernetes-dashboard.yaml |grep image:|sed -e 's/.*image: //'

docker.io/kubernetesui/dashboard-api:v1.0.0
docker.io/kubernetesui/dashboard-web:v1.0.0
docker.io/kubernetesui/metrics-scraper:v1.0.9
提取image名称,并在harobor 导入
cat kubernetes-dashboard.yaml |grep image:|awk -F'/' '{print $NF}'
dashboard-api:v1.0.0
dashboard-web:v1.0.0
metrics-scraper:v1.0.9

#导入省略,可以参见harbor安装
docker pull ...
docker tag ...
docker push ...
docker images
导入harbor私仓后,替换docker.io为私仓repo.k8s.local 地址

如果拉取缓慢,Pulling fs layer,没有私仓可以用阿里的
registry.aliyuncs.com/google_containers/

cp kubernetes-dashboard.yaml kubernetes-dashboard.org.yaml
sed -n 's/docker\.io\/kubernetesui/repo.k8s.local\/google_containers/p'  kubernetes-dashboard.yaml
sed -i 's/docker\.io\/kubernetesui/repo.k8s.local\/google_containers/'  kubernetes-dashboard.yaml
cat  kubernetes-dashboard.yaml|grep -C2 image:
开始安装kubernetes-dashboard

kubectl apply -f kubernetes-dashboard.yaml

namespace/kubernetes-dashboard created
serviceaccount/kubernetes-dashboard created
secret/kubernetes-dashboard-csrf created
secret/kubernetes-dashboard-key-holder created
configmap/kubernetes-dashboard-settings created
role.rbac.authorization.k8s.io/kubernetes-dashboard created
clusterrole.rbac.authorization.k8s.io/kubernetes-dashboard created
rolebinding.rbac.authorization.k8s.io/kubernetes-dashboard created
clusterrolebinding.rbac.authorization.k8s.io/kubernetes-dashboard created
service/kubernetes-dashboard-web created
service/kubernetes-dashboard-api created
service/kubernetes-dashboard-metrics-scraper created
ingress.networking.k8s.io/kubernetes-dashboard created
deployment.apps/kubernetes-dashboard-api created
deployment.apps/kubernetes-dashboard-web created
deployment.apps/kubernetes-dashboard-metrics-scraper created
error: resource mapping not found for name: "selfsigned" namespace: "kubernetes-dashboard" from "kubernetes-dashboard.yaml": no matches for kind "Issuer" in version "cert-manager.io/v1"
ensure CRDs are installed first
查看状态
kubectl get pod -n kubernetes-dashboard -o wide
NAME                                                    READY   STATUS             RESTARTS   AGE
kubernetes-dashboard-api-6bfd48fcf6-njg9s               0/1     ImagePullBackOff   0          12m
kubernetes-dashboard-metrics-scraper-7d8c76dc88-6rn2w   0/1     ImagePullBackOff   0          12m
kubernetes-dashboard-web-7776cdb89f-jdwqt               0/1     ImagePullBackOff   0          12m
没有查到日志
kubectl logs kubernetes-dashboard-api-6bfd48fcf6-njg9s
Error from server (NotFound): pods "kubernetes-dashboard-api-6bfd48fcf6-njg9s" not found
指定namespace查看日志
kubectl describe pods/kubernetes-dashboard-api-6bfd48fcf6-njg9s -n kubernetes-dashboard
Name:             kubernetes-dashboard-api-6bfd48fcf6-njg9s
Namespace:        kubernetes-dashboard
Priority:         0
Service Account:  kubernetes-dashboard
Node:             node01.k8s.local/192.168.244.5
Start Time:       Tue, 17 Oct 2023 13:24:23 +0800
Labels:           app.kubernetes.io/component=api
                  app.kubernetes.io/name=kubernetes-dashboard-api
                  app.kubernetes.io/part-of=kubernetes-dashboard
                  app.kubernetes.io/version=v1.0.0
                  pod-template-hash=6bfd48fcf6

Normal   Scheduled  39m                    default-scheduler  Successfully assigned kubernetes-dashboard/kubernetes-dashboard-api-6bfd48fcf6-njg9s to node01.k8s.local
  Normal   Pulling    38m (x4 over 39m)      kubelet            Pulling image "repo.k8s.local/kubernetesui/dashboard-api:v1.0.0"
  Warning  Failed     38m (x4 over 39m)      kubelet            Failed to pull image "repo.k8s.local/kubernetesui/dashboard-api:v1.0.0": failed to pull and unpack image "repo.k8s.local/kubernetesui/dashboard-api:v1.0.0": failed to resolve reference "repo.k8s.local/kubernetesui/dashboard-api:v1.0.0": unexpected status from HEAD request to https://repo.k8s.local/v2/kubernetesui/dashboard-api/manifests/v1.0.0: 401 Unauthorized
  Warning  Failed     38m (x4 over 39m)      kubelet            Error: ErrImagePull
  Warning  Failed     37m (x6 over 39m)      kubelet            Error: ImagePullBackOff
  Normal   BackOff    4m29s (x150 over 39m)  kubelet            Back-off pulling image "repo.k8s.local/kubernetesui/dashboard-api:v1.0.0"
修正后重新安装

repo.k8s.local/kubernetesui/dashboard-api:v1.0.0 应为 repo.k8s.local/google_containers/dashboard-api:v1.0.0

kubectl delete -f kubernetes-dashboard.yaml
kubectl apply -f kubernetes-dashboard.yaml

运行正常

kubectl get pod -n kubernetes-dashboard  -o wide
NAME                                                    READY   STATUS    RESTARTS   AGE
kubernetes-dashboard-api-5fcfcfd7b-nlrnh                1/1     Running   0          15s
kubernetes-dashboard-metrics-scraper-585685f868-f7g5j   1/1     Running   0          15s
kubernetes-dashboard-web-57bd66fd9f-hbc62               1/1     Running   0          15s

kubectl describe pods/kubernetes-dashboard-api-5fcfcfd7b-nlrnh -n kubernetes-dashboard

查看Service暴露端口,我们使用这个端口进行访问:
kubectl get svc -n kubernetes-dashboard
NAME                                   TYPE        CLUSTER-IP     EXTERNAL-IP   PORT(S)    AGE
kubernetes-dashboard-api               ClusterIP   10.96.175.209   <none>        9000/TCP   14s
kubernetes-dashboard-metrics-scraper   ClusterIP   10.96.69.44     <none>        8000/TCP   14s
kubernetes-dashboard-web               ClusterIP   10.96.49.99     <none>        8000/TCP   14s
ClusterIP先行测试
curl http://10.96.175.209:9000/api/v1/login/status
{
 "tokenPresent": false,
 "headerPresent": false,
 "httpsMode": true,
 "impersonationPresent": false,
 "impersonatedUser": ""
}
curl http://10.96.49.99:8000/
<!--
Copyright 2017 The Kubernetes Authors.
创建kubernetes-dashboard管理员角色

默认账号kubernetes-dashboard权限过小

cat > dashboard-svc-account.yaml << EOF
apiVersion: v1
kind: ServiceAccount
metadata:
  name: dashboard-admin
  namespace: kubernetes-dashboard
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1      #需要修改的地方
metadata:
  name: dashboard-admin
subjects:
  - kind: ServiceAccount
    name: dashboard-admin
    namespace: kubernetes-dashboard
roleRef:
  kind: ClusterRole
  name: cluster-admin
  apiGroup: rbac.authorization.k8s.io
EOF

kubectl apply -f dashboard-svc-account.yaml 
serviceaccount/dashboard-admin created
clusterrolebinding.rbac.authorization.k8s.io/dashboard-admin created
Kubernetes服务帐户没有token?

kubectl get secret -n kubernetes-dashboard|grep admin|awk ‘{print $1}’
之前secret中查token已不适用

之前的版本在创建serviceAccount之后会自动生成secret,可以通过kubectl get secret -n kube-system命令查看,现在需要多执行一步:
自Kubernetes版本1.22以来,默认情况下不会为ServiceAccounts生成令牌,需运行生成token,这种方式创建是临时的

kubectl create token dashboard-admin --namespace kubernetes-dashboard
eyJhbGciOiJSUzI1NiIsImtpZCI6Ik9EWUpmSzcyLUdzRlJnQWNhdHpOYWhNX0E4RDZ6Zl9id0JMcXZyMng5bkUifQ.eyJhdWQiOlsiaHR0cHM6Ly9rdWJlcm5ldGVzLmRlZmF1bHQuc3ZjLmNsdXN0ZXIubG9jYWwiXSwiZXhwIjoxNzAwNTM4ODQzLCJpYXQiOjE3MDA1MzUyNDMsImlzcyI6Imh0dHBzOi8va3ViZXJuZXRlcy5kZWZhdWx0LnN2Yy5jbHVzdGVyLmxvY2FsIiwia3ViZXJuZXRlcy5pbyI6eyJuYW1lc3BhY2UiOiJrdWJlcm5ldGVzLWRhc2hib2FyZCIsInNlcnZpY2VhY2NvdW50Ijp7Im5hbWUiOiJkYXNoYm9hcmQtYWRtaW4iLCJ1aWQiOiI3ZmUwYjFiZi05ZDhlLTRjOGItYWEzMy0xZWU3ZDU2YjE2NjUifX0sIm5iZiI6MTcwMDUzNTI0Mywic3ViIjoic3lzdGVtOnNlcnZpY2VhY2NvdW50Omt1YmVybmV0ZXMtZGFzaGJvYXJkOmRhc2hib2FyZC1hZG1pbiJ9.cUGX77qAdY7Mqo3tPbWgcCLD2zmRoNUSlFG1EHlCRBwiA7ffL1PGbOHazE6eTmLrRo5if6nm9ILAK1Mv4Co2woOEW8qIJBVXClpZomvkj7BC2bGd-0X5W1s87CnEX7RnKcBqFVcP6zJY_ycLy1o9X9g4Y1wtMm8mptBgos5xmVAb8HecTgOWHt80W736K3WSB9ovuoAGVZe7-ahQ7DX8WJ_qYqbEE5v9laqYBIddcoJtfAYf8U8yaW-MQsJq46xp_sxU164WDozw_sSe4PIxHHqaG4tulJy3J2fn6D_0xbC8fupX3l8FPLcPQm1rWMFGPjsLhU8i_0ihnvyEmvsA6w

#默认账号
kubectl create token kubernetes-dashboard --namespace kubernetes-dashboard
eyJhbGciOiJSUzI1NiIsImtpZCI6Ik9EWUpmSzcyLUdzRlJnQWNhdHpOYWhNX0E4RDZ6Zl9id0JMcXZyMng5bkUifQ.eyJhdWQiOlsiaHR0cHM6Ly9rdWJlcm5ldGVzLmRlZmF1bHQuc3ZjLmNsdXN0ZXIubG9jYWwiXSwiZXhwIjoxNzAwNTM5MzgwLCJpYXQiOjE3MDA1MzU3ODAsImlzcyI6Imh0dHBzOi8va3ViZXJuZXRlcy5kZWZhdWx0LnN2Yy5jbHVzdGVyLmxvY2FsIiwia3ViZXJuZXRlcy5pbyI6eyJuYW1lc3BhY2UiOiJrdWJlcm5ldGVzLWRhc2hib2FyZCIsInNlcnZpY2VhY2NvdW50Ijp7Im5hbWUiOiJrdWJlcm5ldGVzLWRhc2hib2FyZCIsInVpZCI6ImU2NWUzODRhLTI5ZDYtNGYwYy04OGI0LWJlZWVkYmRhODMxNiJ9fSwibmJmIjoxNzAwNTM1NzgwLCJzdWIiOiJzeXN0ZW06c2VydmljZWFjY291bnQ6a3ViZXJuZXRlcy1kYXNoYm9hcmQ6a3ViZXJuZXRlcy1kYXNoYm9hcmQifQ.rpE8aVSVWGcydSJy_QcCg6LjdxPvE2M45AspWqC-u406HSznOby1cTvpa9c7scQ7KooyrjSdlzW-1JVd4U6aFSt8sKQLmLXSTUoGi7ACkI105wTGUU4WQmB5CaPynPC68hhrNPTrEXvM4fichCDykp2hWaVCKOwSQPU-cMsCrIeg-Jqeikckdbpfr7m5XDW8_ydb-_X49hwDVqJeA8eJ5Qn-qlkts8Lj3m3rWjVTKlVeMJARR6LCbZUFZ3uwmOFyUzIX0UDKUHGktt5-k33LbLMMpvKKRzhwfu9o5WSTQdvFux1EpVskYxtjpsyKW_PEwcz6UzxvaLwToxV4uDq5_w

# 可以加上 --duration 参数设置时间 kubectl create token account -h查看具体命令
kubectl create token dashboard-admin --namespace kubernetes-dashboard --duration 10h
dashboard暴露方式

部署 ingress
kube proxy 代理service ,无法访问api会显示白板
service NodePort 类型,无法访问api会显示白板

需要通过ingress,才能正常显示页面
修改kind: Ingress,将localhost去掉
kubectl edit Ingress kubernetes-dashboard -n kubernetes-dashboard

    #- host: localhost
     - http:

使用ingress的端口访口

curl http://127.0.0.1:30180/

curl http://127.0.0.1:30180/api/v1/login/status
{
 "tokenPresent": false,
 "headerPresent": false,
 "httpsMode": true,
 "impersonationPresent": false,
 "impersonatedUser": ""
}

kubectl delete Ingress kubernetes-dashboard -n kubernetes-dashboard
kubectl apply -f kubernetes-dashboard.yaml

Kubernetes Dashboard 认证时间延长

默认的Token失效时间是900秒,也就是15分钟,这意味着你每隔15分钟就要认证一次。
改成12小时。 – –token-ttl=43200
kubectl edit deployment kubernetes-dashboard-api -n kubernetes-dashboard

          args:
            - --enable-insecure-login
            - --namespace=kubernetes-dashboard
            - --token-ttl=43200
kubectl get pod -n kubernetes-dashboard  -o wide
NAME                                                    READY   STATUS    RESTARTS   AGE   IP             NODE               NOMINATED NODE   READINESS GATES
kubernetes-dashboard-api-55cf847b6b-7sctx               1/1     Running   0          20h   10.244.2.251   node02.k8s.local   <none>           <none>
kubernetes-dashboard-metrics-scraper-585685f868-hqgpc   1/1     Running   0          40h   10.244.1.254   node01.k8s.local   <none>           <none>
kubernetes-dashboard-web-57bd66fd9f-pghct               1/1     Running   0          40h   10.244.1.253   node01.k8s.local   <none>           <none>

kubectl delete pod kubernetes-dashboard-api-55cf847b6b-7sctx -n kubernetes-dashboard

使用域名

本机host中添加域名
dashboard.k8s.local

kubectl edit Ingress kubernetes-dashboard -n kubernetes-dashboard

  rules:
    #- host: 127.0.0.1 #Invalid value: "127.0.0.1": must be a DNS name, not an IP address
    #- host: localhost
     - host: dashboard.k8s.local
       http:
        paths:
          - path: /
            pathType: Prefix
            backend:
              service:
                name: kubernetes-dashboard-web
                port:
                  #number: 8000
                  name: web

curl -k -H "Host:dashboard.k8s.local" http://10.96.49.99:8000/

    - host: dashboard.k8s.local
      http:

kubectl apply -f kubernetes-dashboard.yaml

metrics-server 安装

metrics-server 采集node 和pod 的cpu/mem,数据存在容器本地,不做持久化。这些数据的使用场景有 kubectl top 和scheduler 调度、hpa 弹性伸缩,以及原生的dashboard 监控数据展示。
metrics-server 和prometheus 没有半毛钱关系。 也没有任何数据或者接口互相依赖关系。

Error scraping node metrics: the server could not find the requested resource (get nodes.metrics.k8s.io

https://github.com/kubernetes/kubernetes/tree/master/cluster/addons/metrics-server

wget --no-check-certificate   https://github.com/kubernetes-sigs/metrics-server/releases/download/v0.6.3/components.yaml -O metrics-server-0.6.3.yaml

在deploy中,spec.template.containers.args字段中加上--kubelet-insecure-tls选项,表示不验证客户端证书

cat metrics-server-0.6.3.yaml|grep image:
image: registry.k8s.io/metrics-server/metrics-server:v0.6.3

docker pull registry.aliyuncs.com/google_containers/metrics-server:v0.6.3
docker tag registry.aliyuncs.com/google_containers/metrics-server:v0.6.3  repo.k8s.local/registry.k8s.io/metrics-server/metrics-server:v0.6.3
docker push repo.k8s.local/registry.k8s.io/metrics-server/metrics-server:v0.6.3

sed -n "/image:/{s/image: /image: repo.k8s.local\//p}" metrics-server-0.6.3.yaml
sed -i "/image:/{s/image: /image: repo.k8s.local\//}" metrics-server-0.6.3.yaml

kubectl top nodes

kubectl apply -f metrics-server-0.6.3.yaml

kubectl get pods -n=kube-system |grep metrics
metrics-server-8fc7dd595-n2s6b               1/1     Running   6 (9d ago)      16d

kubectl api-versions|grep metrics
metrics.k8s.io/v1beta1

#top会比dashboard中看到的要高
kubectl top pods
kubectl top nodes
NAME                 CPU(cores)   CPU%   MEMORY(bytes)   MEMORY%   
master01.k8s.local   145m         3%     1490Mi          38%       
node01.k8s.local     54m          2%     1770Mi          46%       
node02.k8s.local     63m          3%     2477Mi          64%       

kubectl -n kube-system describe pod metrics-server-8fc7dd595-n2s6b 
kubectl logs metrics-server-8fc7dd595-n2s6b -n kube-system

kubectl describe  pod kube-apiserver-master01.k8s.local -n kube-system
  Type     Reason     Age                   From     Message
  ----     ------     ----                  ----     -------
  Warning  Unhealthy  35m (x321 over 10d)   kubelet  Liveness probe failed: HTTP probe failed with statuscode: 500
  Warning  Unhealthy  34m (x1378 over 10d)  kubelet  Readiness probe failed: HTTP probe failed with statuscode: 500
error: Metrics API not available

重新执行yaml
kubectl top pods
kubectl apply -f metrics-server-0.6.3.yaml

kubectl -n kube-system describe pod metrics-server-8fc7dd595-lz5kz

Posted in 安装k8s/kubernetes.

Tagged with , .


No Responses (yet)

Stay in touch with the conversation, subscribe to the RSS feed for comments on this post.



Some HTML is OK

or, reply to this post via trackback.