当前位置:   article > 正文

二进制文件方式安装kubernetes集群

arm64 二进制安装kubernetes1.26

所有操作全部用root使用者进行,高可用一般建议大于等于3台的奇数,我们使用3台master来做高可用

练习环境说明: 参考GitHub

master: kube-apiserver,kube-controller-manager,kube-scheduler,flanneld

node: kubelet,kube-proxy,flannel

Service_CIDR:10.254.0.0/16 服务网段,部署前路由不可达,部署后集群内部使用IP:Port可达

Cluster_CIDR:172.30.0.0/16 pod网段,部署前路由不可达,部署后路由可达(flanneld 保证)

主机名称IP地址部署软件备注
k8s-m12192.168.10.12keepalived+haproxy+etcd+mastermaster
k8s-m13192.168.10.13keepalived+haproxy+etcd+mastermaster
k8s-m14192.168.10.14keepalived+haproxy+etcd+mastermaster
k8s-n15192.168.10.15node+dockerwork
k8s-n16192.168.10.16node+dockerwork
VIP192.168.10.100VIP

2.1、下载安装包

kubernetes的GitHub网址https://github.com/kubernetes/kubernetes/releases

下载Server Binaries中的 kubernetes-server-linux-amd64.tar.gz 安装包

下载Node Binaries中的 kubernetes-node-linux-amd64.tar.gz 安装包

下载Client Binares中的 kubernetes-client-linux-amd64.tar.gz 安装包

各种CA证书类型参考

k8s各版本组件下载地址

  1. https://github.com/kubernetes/kubernetes/tree/v1.14.3
  2. #kubernetes
  3. wget https://storage.googleapis.com/kubernetes-release/release/v1.14.3/kubernetes-node-linux-amd64.tar.gz
  4. wget https://storage.googleapis.com/kubernetes-release/release/v1.14.3/kubernetes-client-linux-amd64.tar.gz
  5. wget https://storage.googleapis.com/kubernetes-release/release/v1.14.3/kubernetes-server-linux-amd64.tar.gz
  6. wget https://storage.googleapis.com/kubernetes-release/release/v1.14.3/kubernetes.tar.gz
  7. #etcd
  8. wget https://github.com/etcd-io/etcd/releases/download/v3.3.13/etcd-v3.3.13-linux-amd64.tar.gz
  9. #flannel
  10. wget https://github.com/coreos/flannel/releases/download/v0.11.0/flannel-v0.11.0-linux-amd64.tar.gz
  11. #cni-plugins
  12. wget https://github.com/containernetworking/plugins/releases/download/v0.8.1/cni-plugins-linux-amd64-v0.8.1.tgz
  13. #docker
  14. wget https://download.docker.com/linux/static/stable/x86_64/docker-18.09.6.tgz
  15. #cfssl
  16. wget https://pkg.cfssl.org/R1.2/cfssl_linux-amd64
  17. wget https://pkg.cfssl.org/R1.2/cfssljson_linux-amd64
  18. wget https://pkg.cfssl.org/R1.2/cfssl-certinfo_linux-amd64
  19. #heapster
  20. wget https://github.com/kubernetes-retired/heapster/archive/v1.5.4.tar.gz

2.2、环境准备

  1. #1.12机器上生成密钥,无密码ssh登陆
  2. ssh-keygen -t rsa
  3. ssh-copy-id 192.168.10.13 #依次拷贝到其他节点上
  4. #2.关闭防火墙,以下点所有机器执行
  5. systemctl stop firewalld
  6. systemctl disable firewalld
  7. #3.关闭swap分区
  8. swapoff -a
  9. sed -i '/ swap / s/^\(.*\)$/#\1/g' /etc/fstab
  10. #4.关闭SELinux
  11. sestatus #查看selinux状态
  12. setenforce 0 #临时关闭selinux
  13. sed -i 's/SELINUX=enforcing/SELINUX=disabled/g' /etc/selinux/config
  14. #5.升级内核参考:https://www.cnblogs.com/fan-gx/p/11006762.html
  15. #6.修改文件句柄数
  16. cat <<EOF >>/etc/security/limits.conf
  17. * soft nofile 65536
  18. * hard nofile 65536
  19. * soft nproc 65536
  20. * hard nproc 65536
  21. * soft memlock unlimited
  22. * hard memlock unlimited
  23. EOF
  24. #7.安装ipvs
  25. yum install ipvsadm ipset sysstat conntrack libseccomp -y
  26. #开机加载内核模块,并设置开机自动加载
  27. cat > /etc/sysconfig/modules/ipvs.modules <<EOF
  28. #!/bin/bash
  29. modprobe -- ip_vs
  30. modprobe -- ip_vs_rr
  31. modprobe -- ip_vs_wrr
  32. modprobe -- ip_vs_sh
  33. modprobe -- nf_conntrack_ipv4
  34. EOF
  35. #然后执行脚本
  36. chmod 755 /etc/sysconfig/modules/ipvs.modules && bash /etc/sysconfig/modules/ipvs.modules
  37. lsmod | grep -e ip_vs -e nf_conntrack_ipv4
  38. #8.修改系统参数
  39. cat <<EOF > /etc/sysctl.d/k8s.conf
  40. net.bridge.bridge-nf-call-ip6tables = 1
  41. net.bridge.bridge-nf-call-iptables = 1
  42. net.ipv4.ip_nonlocal_bind = 1
  43. net.ipv4.ip_forward = 1
  44. vm.swappiness=0
  45. EOF
  46. sysctl --system
  47. #-----------下面参考别人的---------#
  48. # cat <<EOF > /etc/sysctl.d/k8s.conf
  49. net.ipv4.tcp_keepalive_time = 600
  50. net.ipv4.tcp_keepalive_intvl = 30
  51. net.ipv4.tcp_keepalive_probes = 10
  52. net.ipv6.conf.all.disable_ipv6 = 1
  53. net.ipv6.conf.default.disable_ipv6 = 1
  54. net.ipv6.conf.lo.disable_ipv6 = 1
  55. net.ipv4.neigh.default.gc_stale_time = 120
  56. net.ipv4.conf.all.rp_filter = 0
  57. net.ipv4.conf.default.rp_filter = 0
  58. net.ipv4.conf.default.arp_announce = 2
  59. net.ipv4.conf.lo.arp_announce = 2
  60. net.ipv4.conf.all.arp_announce = 2
  61. net.ipv4.ip_forward = 1
  62. net.ipv4.tcp_max_tw_buckets = 5000
  63. net.ipv4.tcp_syncookies = 1
  64. net.ipv4.tcp_max_syn_backlog = 1024
  65. net.ipv4.tcp_synack_retries = 2
  66. net.bridge.bridge-nf-call-ip6tables = 1
  67. net.bridge.bridge-nf-call-iptables = 1
  68. net.netfilter.nf_conntrack_max = 2310720
  69. fs.inotify.max_user_watches=89100
  70. fs.may_detach_mounts = 1
  71. fs.file-max = 52706963
  72. fs.nr_open = 52706963
  73. net.bridge.bridge-nf-call-arptables = 1
  74. vm.swappiness = 0
  75. vm.overcommit_memory=1
  76. vm.panic_on_oom=0
  77. EOF
  78. #9.在生产环境建议预留内存,避免由于内存耗尽导致ssh连不上主机(32G的机器留2G,251的留3G, 500G的留5G)。下面是预留5G
  79. echo 'vm.min_free_kbytes=5000000' >> /etc/sysctl.conf
  80. sysctl -p

2.3、部署docker

二进制部署方式可参考:https://www.kubernetes.org.cn/3831.html 这里为了方便直接yum安装所有节点

  1. #1.安装yum源工具包
  2. yum install -y yum-utils device-mapper-persistent-data lvm2
  3. #2.下载docker-ce官方的yum源配置文件
  4. # yum-config-manager --add-repo https://download.docker.com/linux/centos/docker-ce.repo
  5. # yum-config-manager --add-repo http://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo
  6. #3.禁用docker-c-edge源配edge是不开发版,不稳定,下载stable版
  7. yum-config-manager --disable docker-ce-edge
  8. #4.更新本地YUM源缓存
  9. yum makecache fast
  10. #5.安装Docker-ce相应版本
  11. yum -y install docker-ce
  12. #6.配置daemon, 因为kubelet的启动环境变量要与docker的cgroup-driver驱动相同,以下是官方推荐处理方式(现在新版二进制kubelet就是cgroup了)
  13. #由于国内拉取镜像较慢,配置文件最后追加了阿里云镜像加速配置。
  14. mkdir -p /etc/docker &&
  15. cat > /etc/docker/daemon.json <<EOF
  16. {
  17. "exec-opts": ["native.cgroupdriver=systemd"],
  18. "log-driver": "json-file",
  19. "log-opts": {
  20. "max-size": "100m"
  21. },
  22. "storage-driver": "overlay2",
  23. "storage-opts": [
  24. "overlay2.override_kernel_check=true"
  25. ],
  26. "registry-mirrors": ["https://uyah70su.mirror.aliyuncs.com"]
  27. }
  28. EOF
  29. #7.设置开机自启动
  30. systemctl restart docker && systemctl enable docker && systemctl status docker
  31. #8.可以先在自己电脑下来安装包,本环境安装的是18.09版本
  32. yum install --downloadonly docker-ce-18.09 --downloaddir=/opt #yum下载docker-ce
  33. yum localinstall docker-ce -y #然后安装

2.4、部署etcd

etcd是用来保存集群所有状态的 Key/Value 存储系统,常用于服务发现、共享配置以及并发控制(如 leader 选举、分布式锁等)。kubernetes 使用 etcd 存储所有运行数据。

所有 Kubernetes 组件会通过 API Server 来跟 Etcd 进行沟通从而保存或读取资源状态。有条件的可以单独几台机器跑,不过需要配置apiserver指向etcd集群。

2.4.1、创建etcd证书

如果不希望将cfssl工具安装到部署主机上,可以在其他的主机上进行该步骤,生成以后将证书拷贝到部署etcd的主机上即可。不是要证书也可以部署,etcd.service文件和etcd.conf文件不要有https的URL

  1. wget https://pkg.cfssl.org/R1.2/cfssl_linux-amd64 -O /usr/local/bin/cfssl
  2. wget https://pkg.cfssl.org/R1.2/cfssljson_linux-amd64 -O /usr/local/bin/cfssljson
  3. wget https://pkg.cfssl.org/R1.2/cfssl-certinfo_linux-amd64 -O /usr/local/bin/cfssl-certinfo
  4. chmod +x /usr/local/bin/cfssl*
  5. #配置CA文件
  6. mkdir /root/ssl && cd /root/ssl
  7. cat > ca-config.json <<EOF
  8. {
  9. "signing": {
  10. "default": {
  11. "expiry": "8760h"
  12. },
  13. "profiles": {
  14. "kubernetes": {
  15. "usages": [
  16. "signing",
  17. "key encipherment",
  18. "server auth",
  19. "client auth"
  20. ],
  21. "expiry": "8760h"
  22. }
  23. }
  24. }
  25. }
  26. EOF
  27. #2----------------------------------------------
  28. cat > ca-csr.json <<EOF
  29. {
  30. "CN": "kubernetes",
  31. "key": {
  32. "algo": "rsa",
  33. "size": 2048
  34. },
  35. "names": [
  36. {
  37. "C": "CN",
  38. "ST": "ShangHai",
  39. "L": "ShangHai",
  40. "O": "k8s",
  41. "OU": "System"
  42. }
  43. ]
  44. }
  45. EOF
  46. #3--------------------------------------------------
  47. cat > etcd-csr.json <<EOF
  48. {
  49. "CN": "etcd",
  50. "hosts": [
  51. "127.0.0.1",
  52. "192.168.10.12",
  53. "192.168.10.13",
  54. "192.168.10.14"
  55. ],
  56. "key": {
  57. "algo": "rsa",
  58. "size": 2048
  59. },
  60. "names": [
  61. {
  62. "C": "CN",
  63. "ST": "ShangHai",
  64. "L": "ShangHai",
  65. "O": "k8s",
  66. "OU": "System"
  67. }
  68. ]
  69. }
  70. EOF
  71. # hosts字段的IP地址是指授权使用证书的etcd地址
  72. #------------------------------------
  73. cfssl gencert -initca ca-csr.json | cfssljson -bare ca
  74. cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes etcd-csr.json | cfssljson -bare etcd
  75. #生产后证书包含文件如下,共9个
  76. ca-config.json
  77. ca.csr
  78. ca-csr.json
  79. ca-key.pem
  80. ca.pem
  81. etcd.csr
  82. etcd-csr.json
  83. etcd-key.pem
  84. etcd.pem
  85. #将生成好的etcd.pem和etcd-key.pem以及ca.pem三个文件拷贝到etcd机器上
  86. mkdir -p /etc/kubernetes/ssl && cp *.pem /etc/kubernetes/ssl/
  87. ssh -n 192.168.10.13 "mkdir -p /etc/kubernetes/ssl && exit"
  88. ssh -n 192.168.10.14 "mkdir -p /etc/kubernetes/ssl && exit"
  89. scp -r /etc/kubernetes/ssl/*.pem 192.168.10.13:/etc/kubernetes/ssl/
  90. scp -r /etc/kubernetes/ssl/*.pem 192.168.10.14:/etc/kubernetes/ssl/
2.4.2、部署etcd

将下载的etcd二进制文件上传到etcd节点机器上。

  1. #在etcd的机器上安装etcd程序
  2. mkdir -p /var/lib/etcd
  3. tar -zxvf etcd-v3.3.13-linux-amd64.tar.gz
  4. cp etcd-v3.3.13-linux-amd64/etcd* /usr/local/bin
  5. scp etcd-v3.3.13-linux-amd64/etcd* 192.168.10.13:/usr/local/bin
  6. scp etcd-v3.3.13-linux-amd64/etcd* 192.168.10.14:/usr/local/bin
  7. #1.在12机器上创建etcd.service文件
  8. cat <<EOF >/etc/systemd/system/etcd.service
  9. [Unit]
  10. Description=Etcd Server
  11. After=network.target
  12. After=network-online.target
  13. Wants=network-online.target
  14. Documentation=https://github.com/coreos
  15. [Service]
  16. Type=notify
  17. WorkingDirectory=/var/lib/etcd/
  18. ExecStart=/usr/local/bin/etcd \
  19. --name k8s-m12 \
  20. --cert-file=/etc/kubernetes/ssl/etcd.pem \
  21. --key-file=/etc/kubernetes/ssl/etcd-key.pem \
  22. --trusted-ca-file=/etc/kubernetes/ssl/ca.pem \
  23. --peer-cert-file=/etc/kubernetes/ssl/etcd.pem \
  24. --peer-key-file=/etc/kubernetes/ssl/etcd-key.pem \
  25. --peer-trusted-ca-file=/etc/kubernetes/ssl/ca.pem \
  26. --initial-advertise-peer-urls https://192.168.10.12:2380 \
  27. --listen-peer-urls https://192.168.10.12:2380 \
  28. --listen-client-urls https://192.168.10.12:2379,http://127.0.0.1:2379 \
  29. --advertise-client-urls https://192.168.10.12:2379 \
  30. --initial-cluster-token etcd-cluster-0 \
  31. --initial-cluster k8s-m12=https://192.168.10.12:2380,k8s-m13=https://192.168.10.13:2380,k8s-m14=https://192.168.10.14:2380 \
  32. --initial-cluster-state new \
  33. --data-dir=/var/lib/etcd
  34. Restart=on-failure
  35. RestartSec=5
  36. LimitNOFILE=65536
  37. [Install]
  38. WantedBy=multi-user.target
  39. EOF
  40. #2.启动etcd服务
  41. systemctl daemon-reload && systemctl enable etcd.service && systemctl start etcd.service && systemctl status etcd
  1. #1.在13机器上创建etcd.service文件
  2. cat <<EOF >/etc/systemd/system/etcd.service
  3. [Unit]
  4. Description=Etcd Server
  5. After=network.target
  6. After=network-online.target
  7. Wants=network-online.target
  8. Documentation=https://github.com/coreos
  9. [Service]
  10. Type=notify
  11. WorkingDirectory=/var/lib/etcd/
  12. ExecStart=/usr/local/bin/etcd \
  13. --name k8s-m13 \
  14. --cert-file=/etc/kubernetes/ssl/etcd.pem \
  15. --key-file=/etc/kubernetes/ssl/etcd-key.pem \
  16. --peer-cert-file=/etc/kubernetes/ssl/etcd.pem \
  17. --peer-key-file=/etc/kubernetes/ssl/etcd-key.pem \
  18. --trusted-ca-file=/etc/kubernetes/ssl/ca.pem \
  19. --peer-trusted-ca-file=/etc/kubernetes/ssl/ca.pem \
  20. --initial-advertise-peer-urls https://192.168.10.13:2380 \
  21. --listen-peer-urls https://192.168.10.13:2380 \
  22. --listen-client-urls https://192.168.10.13:2379,http://127.0.0.1:2379 \
  23. --advertise-client-urls https://192.168.10.13:2379 \
  24. --initial-cluster-token etcd-cluster-0 \
  25. --initial-cluster k8s-m12=https://192.168.10.12:2380,k8s-m13=https://192.168.10.13:2380,k8s-m14=https://192.168.10.14:2380 \
  26. --initial-cluster-state new \
  27. --data-dir=/var/lib/etcd
  28. Restart=on-failure
  29. RestartSec=5
  30. LimitNOFILE=65536
  31. [Install]
  32. WantedBy=multi-user.target
  33. EOF
  34. #2.启动etcd服务
  35. systemctl daemon-reload && systemctl enable etcd.service && systemctl start etcd.service && systemctl status etcd
  1. #1.在14机器上创建etcd.service文件
  2. cat <<EOF >/etc/systemd/system/etcd.service
  3. [Unit]
  4. Description=Etcd Server
  5. After=network.target
  6. After=network-online.target
  7. Wants=network-online.target
  8. Documentation=https://github.com/coreos
  9. [Service]
  10. Type=notify
  11. WorkingDirectory=/var/lib/etcd/
  12. ExecStart=/usr/local/bin/etcd \
  13. --name k8s-m14 \
  14. --cert-file=/etc/kubernetes/ssl/etcd.pem \
  15. --key-file=/etc/kubernetes/ssl/etcd-key.pem \
  16. --peer-cert-file=/etc/kubernetes/ssl/etcd.pem \
  17. --peer-key-file=/etc/kubernetes/ssl/etcd-key.pem \
  18. --trusted-ca-file=/etc/kubernetes/ssl/ca.pem \
  19. --peer-trusted-ca-file=/etc/kubernetes/ssl/ca.pem \
  20. --initial-advertise-peer-urls https://192.168.10.14:2380 \
  21. --listen-peer-urls https://192.168.10.14:2380 \
  22. --listen-client-urls https://192.168.10.14:2379,http://127.0.0.1:2379 \
  23. --advertise-client-urls https://192.168.10.14:2379 \
  24. --initial-cluster-token etcd-cluster-0 \
  25. --initial-cluster k8s-m12=https://192.168.10.12:2380,k8s-m13=https://192.168.10.13:2380,k8s-m14=https://192.168.10.14:2380 \
  26. --initial-cluster-state new \
  27. --data-dir=/var/lib/etcd
  28. Restart=on-failure
  29. RestartSec=5
  30. LimitNOFILE=65536
  31. [Install]
  32. WantedBy=multi-user.target
  33. EOF
  34. #2.启动etcd服务
  35. systemctl daemon-reload && systemctl enable etcd.service && systemctl start etcd.service && systemctl status etcd
2.4.3、验证集群
  1. #1.查看集群状态
  2. etcdctl --ca-file=/etc/kubernetes/ssl/ca.pem --cert-file=/etc/kubernetes/ssl/etcd.pem --key-file=/etc/kubernetes/ssl/etcd-key.pem cluster-health
  3. #返回如下,代表集群正常
  4. member 1af68d968c7e3f22 is healthy: got healthy result from https://192.168.10.12:2379
  5. member 55204c19ed228077 is healthy: got healthy result from https://192.168.10.14:2379
  6. member e8d9a97b17f26476 is healthy: got healthy result from https://192.168.10.13:2379
  7. cluster is healthy
  8. #2.查看集群成员
  9. etcdctl --endpoints=https://192.168.10.12:2379,https://192.168.10.13:2379,https://192.168.10.14:2379 --ca-file=/etc/kubernetes/ssl/ca.pem --cert-file=/etc/kubernetes/ssl/etcd.pem --key-file=/etc/kubernetes/ssl/etcd-key.pem member list
  10. #返回如下结果
  11. 1af68d968c7e3f22: name=k8s-m12 peerURLs=https://192.168.10.12:2380 clientURLs=https://192.168.10.12:2379 isLeader=false
  12. 55204c19ed228077: name=k8s-m14 peerURLs=https://192.168.10.14:2380 clientURLs=https://192.168.10.14:2379 isLeader=false
  13. e8d9a97b17f26476: name=k8s-m13 peerURLs=https://192.168.10.13:2380 clientURLs=https://192.168.10.13:2379 isLeader=true

2.5、部署flannel

所有的节点都需要安装flannel,,主要目的是跨主机的docker能够互相通信,也是保障kubernetes集群的网络基础和保障

2.5.1、创建flannel证书
  1. #1.生产TLS证书,是让kubectl当做client证书使用,(证书只需要生成一次)
  2. cd /root/ssl
  3. cat > flanneld-csr.json <<EOF
  4. {
  5. "CN": "flanneld",
  6. "hosts": [],
  7. "key": {
  8. "algo": "rsa",
  9. "size": 2048
  10. },
  11. "names": [
  12. {
  13. "C": "CN",
  14. "ST": "ShangHai",
  15. "L": "ShangHai",
  16. "O": "k8s",
  17. "OU": "System"
  18. }
  19. ]
  20. }
  21. EOF
  22. #2.生成证书和私钥
  23. cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes flanneld-csr.json | cfssljson -bare flanneld
  24. #包含以下文件
  25. flanneld.csr
  26. flanneld-csr.json
  27. flanneld-key.pem
  28. flanneld.pem
  29. #3.然后将证书拷贝到所有节点下。
  30. cp flanneld*.pem /etc/kubernetes/ssl
  31. scp flanneld*.pem 192.168.10.13:/etc/kubernetes/ssl
  32. scp flanneld*.pem 192.168.10.14:/etc/kubernetes/ssl
  33. scp flanneld*.pem 192.168.10.15:/etc/kubernetes/ssl
  34. scp flanneld*.pem 192.168.10.16:/etc/kubernetes/ssl
2.5.2、部署flannel
  1. #1.开始安装flannel
  2. tar -zvxf flannel-v0.11.0-linux-amd64.tar.gz
  3. cp flanneld mk-docker-opts.sh /usr/local/bin
  4. scp flanneld mk-docker-opts.sh 192.168.10.13:/usr/local/bin
  5. scp flanneld mk-docker-opts.sh 192.168.10.14:/usr/local/bin
  6. scp flanneld mk-docker-opts.sh 192.168.10.15:/usr/local/bin
  7. scp flanneld mk-docker-opts.sh 192.168.10.16:/usr/local/bin
  8. #2.向etcd写入集群Pod网段信息,在etcd集群中任意一台执行一次即可
  9. etcdctl \
  10. --endpoints=https://192.168.10.12:2379,https://192.168.10.13:2379,https://192.168.10.14:2379 \
  11. --ca-file=/etc/kubernetes/ssl/ca.pem \
  12. --cert-file=/etc/kubernetes/ssl/flanneld.pem \
  13. --key-file=/etc/kubernetes/ssl/flanneld-key.pem \
  14. mk /kubernetes/network/config '{"Network":"172.30.0.0/16", "SubnetLen": 24, "Backend": {"Type": "vxlan"}}'
  15. #----得到返回信息如下,设置的网络是172.30.0.0/16,子网掩码是24位
  16. {"Network":"172.30.0.0/16", "SubnetLen": 24, "Backend": {"Type": "vxlan"}}
  17. #2.1.列出键值存储的目录
  18. etcdctl \
  19. --ca-file=/etc/kubernetes/ssl/ca.pem \
  20. --cert-file=/etc/kubernetes/ssl/flanneld.pem \
  21. --key-file=/etc/kubernetes/ssl/flanneld-key.pem ls -r
  22. #2.2.查看键值存储
  23. etcdctl \
  24. --ca-file=/etc/kubernetes/ssl/ca.pem \
  25. --cert-file=/etc/kubernetes/ssl/flanneld.pem \
  26. --key-file=/etc/kubernetes/ssl/flanneld-key.pem get /kubernetes/network/config
  27. #2.3查看已分配pod的子网列表
  28. etcdctl \
  29. --ca-file=/etc/kubernetes/ssl/ca.pem \
  30. --cert-file=/etc/kubernetes/ssl/flanneld.pem \
  31. --key-file=/etc/kubernetes/ssl/flanneld-key.pem ls /kubernetes/network/subnets
  32. #3、创建flannel.service文件
  33. cat > /etc/systemd/system/flannel.service << EOF
  34. [Unit]
  35. Description=Flanneld overlay address etcd agent
  36. After=network.target
  37. After=network-online.target
  38. Wants=network-online.target
  39. After=etcd.service
  40. Before=docker.service
  41. [Service]
  42. Type=notify
  43. ExecStart=/usr/local/bin/flanneld \
  44. -etcd-cafile=/etc/kubernetes/ssl/ca.pem \
  45. -etcd-certfile=/etc/kubernetes/ssl/flanneld.pem \
  46. -etcd-keyfile=/etc/kubernetes/ssl/flanneld-key.pem \
  47. -etcd-endpoints=https://192.168.10.12:2379,https://192.168.10.13:2379,https://192.168.10.14:2379 \
  48. -etcd-prefix=/kubernetes/network
  49. ExecStartPost=/usr/local/bin/mk-docker-opts.sh -k DOCKER_NETWORK_OPTIONS -d /run/flannel/docker
  50. Restart=on-failure
  51. [Install]
  52. WantedBy=multi-user.target
  53. RequiredBy=docker.service
  54. EOF
  55. #mk-docker-opts.sh 脚本将分配给flanneld的Pod子网网段信息写入到/run/flannel/docker文件中,后续docker启动时使用这个文件中参数值设置docker0网桥。
  56. #flanneld 使用系统缺省路由所在的接口和其它节点通信,对于有多个网络接口的机器(如,内网和公网),可以用 -iface=enpxx 选项值指定通信接口。
  57. #4、启动flannel
  58. systemctl daemon-reload && systemctl enable flannel && systemctl start flannel && systemctl status flannel
  59. #5.验证flannel
  60. cat /run/flannel/docker #/run/flannel/docker是flannel分配给docker的子网信息,
  61. #显示如下
  62. DOCKER_OPT_BIP="--bip=172.30.7.1/24"
  63. DOCKER_OPT_IPMASQ="--ip-masq=true"
  64. DOCKER_OPT_MTU="--mtu=1450"
  65. DOCKER_NETWORK_OPTIONS=" --bip=172.30.7.1/24 --ip-masq=true --mtu=1450"
  66. cat /run/flannel/subnet.env #/run/flannel/subnet.env包含了flannel整个大网段以及在此节点上的子网段
  67. #显示如下
  68. FLANNEL_NETWORK=172.30.0.0/16
  69. FLANNEL_SUBNET=172.30.7.1/24
  70. FLANNEL_MTU=1450
  71. FLANNEL_IPMASQ=false
  72. ip add | grep flannel #查看网卡信息
  73. 4: flannel.1: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1450 qdisc noqueue state UNKNOWN group default
  74. inet 172.30.7.0/32 scope global flannel.1
2.5.3、配置docker支持flannel
  1. #1.配置docker支持flannel网络,需要在[Service]标签下新加
  2. vim /etc/systemd/system/multi-user.target.wants/docker.service
  3. EnvironmentFile=/run/flannel/docker #这行新加内容,下面行新加$后面的内容
  4. ExecStart=/usr/bin/dockerd -H fd:// --containerd=/run/containerd/containerd.sock $DOCKER_NETWORK_OPTIONS
  5. #2.重启docker,然后可以查看到已分配pod的子网列表
  6. systemctl daemon-reload && systemctl restart docker && systemctl status docker
  7. ip add | grep docker
  8. #docker0网口IP地址,已改变
  9. 4: docker0: <NO-CARRIER,BROADCAST,MULTICAST,UP> mtu 1500 qdisc noqueue state DOWN group default
  10. inet 172.30.7.1/24 brd 172.30.7.255 scope global docker0
2.5.4、设置CNI插件支持flannel
  1. tar -zxvf cni-plugins-linux-amd64-v0.8.1.tgz -C /opt/cni
  2. mkdir -p /etc/cni/net.d
  3. cat > /etc/cni/net.d/10-default.conf <<EOF
  4. {
  5. "name": "flannel",
  6. "type": "flannel",
  7. "delegate": {
  8. "bridge": "docker0",
  9. "isDefaultGateway": true,
  10. "mtu": 1400
  11. }
  12. }
  13. EOF
  14. #把相关指令和文件拷贝到所有主机
  15. scp /opt/cni/* 192.168.10.13:/usr/local/bin && scp /etc/cni/net.d/* 192.168.10.13:/etc/cni/net.d/

2.6、部署keepalived+haproxy

keepalived 提供 kube-apiserver 对外服务的 VIP;haproxy 监听 VIP,后端连接所有 kube-apiserver 实例,提供健康检查和负载均衡功能;

本文档复用 master 节点的三台机器,haproxy 监听的端口(8443) 需要与 kube-apiserver 的端口 6443 不同,避免冲突。

keepalived 在运行过程中周期检查本机的 haproxy 进程状态,如果检测到 haproxy 进程异常,则触发重新选主的过程,VIP 将飘移到新选出来的主节点,从而实现 VIP 的高可用。所有组件(如 kubeclt、apiserver、controller-manager、scheduler 等)都通过 VIP 和 haproxy 监听的 8443 端口访问 kube-apiserver 服务。

2.6.1、安装haproxy
  1. yum install -y haproxy
  2. #12机器上配置
  3. cat << EOF > /etc/haproxy/haproxy.cfg
  4. global
  5. log 127.0.0.1 local2
  6. chroot /var/lib/haproxy
  7. pidfile /var/run/haproxy.pid
  8. maxconn 4000
  9. user haproxy
  10. group haproxy
  11. daemon
  12. defaults
  13. mode tcp
  14. log global
  15. retries 3
  16. timeout connect 10s
  17. timeout client 1m
  18. timeout server 1m
  19. listen admin_stats
  20. bind 0.0.0.0:9090
  21. mode http
  22. log 127.0.0.1 local0 err
  23. stats refresh 30s
  24. stats uri /status
  25. stats realm welcome login\ Haproxy
  26. stats auth admin:123456
  27. stats hide-version
  28. stats admin if TRUE
  29. frontend kubernetes
  30. bind *:8443
  31. mode tcp
  32. default_backend kubernetes-master
  33. backend kubernetes-master
  34. balance roundrobin
  35. server k8s-m12 192.168.10.12:6443 check maxconn 2000
  36. server k8s-m13 192.168.10.13:6443 check maxconn 2000
  37. server k8s-m14 192.168.10.14:6443 check maxconn 2000
  38. EOF
  39. #13 和 14机器上配置都一样
  40. # 启动haproxy
  41. systemctl enable haproxy && systemctl start haproxy && systemctl status haproxy
2.6.2、安装keepalived
  1. yum install -y keepalived
  2. #10.12机器上配置
  3. cat <<EOF > /etc/keepalived/keepalived.conf
  4. global_defs {
  5. router_id LVS_k8s
  6. }
  7. vrrp_script CheckK8sMaster {
  8. script "curl -k https://192.168.10.100:8443"
  9. interval 3
  10. timeout 9
  11. fall 2
  12. rise 2
  13. }
  14. vrrp_instance VI_1 {
  15. state MASTER
  16. interface ens33
  17. virtual_router_id 100
  18. priority 100
  19. advert_int 1
  20. mcast_src_ip 192.168.10.12
  21. nopreempt
  22. authentication {
  23. auth_type PASS
  24. auth_pass fana123
  25. }
  26. unicast_peer {
  27. 192.168.10.13
  28. 192.168.10.14
  29. }
  30. virtual_ipaddress {
  31. 192.168.10.100/24
  32. }
  33. track_script {
  34. CheckK8sMaster
  35. }
  36. }
  37. EOF
  38. #13机器keepalived配置
  39. cat <<EOF > /etc/keepalived/keepalived.conf
  40. global_defs {
  41. router_id LVS_k8s
  42. }
  43. vrrp_script CheckK8sMaster {
  44. script "curl -k https://192.168.10.100:8443"
  45. interval 3
  46. timeout 9
  47. fall 2
  48. rise 2
  49. }
  50. vrrp_instance VI_1 {
  51. state BACKUP
  52. interface ens33
  53. virtual_router_id 100
  54. priority 90
  55. advert_int 1
  56. mcast_src_ip 192.168.10.13
  57. nopreempt
  58. authentication {
  59. auth_type PASS
  60. auth_pass fana123
  61. }
  62. unicast_peer {
  63. 192.168.10.12
  64. 192.168.10.14
  65. }
  66. virtual_ipaddress {
  67. 192.168.10.100/24
  68. }
  69. track_script {
  70. CheckK8sMaster
  71. }
  72. }
  73. EOF
  74. #14机器上keepalived配置
  75. cat <<EOF > /etc/keepalived/keepalived.conf
  76. global_defs {
  77. router_id LVS_k8s
  78. }
  79. vrrp_script CheckK8sMaster {
  80. script "curl -k https://192.168.10.100:8443"
  81. interval 3
  82. timeout 9
  83. fall 2
  84. rise 2
  85. }
  86. vrrp_instance VI_1 {
  87. state BACKUP
  88. interface ens33
  89. virtual_router_id 100
  90. priority 80
  91. advert_int 1
  92. mcast_src_ip 192.168.10.14
  93. nopreempt
  94. authentication {
  95. auth_type PASS
  96. auth_pass fana123
  97. }
  98. unicast_peer {
  99. 192.168.10.12
  100. 192.168.10.13
  101. }
  102. virtual_ipaddress {
  103. 192.168.10.100/24
  104. }
  105. track_script {
  106. CheckK8sMaster
  107. }
  108. }
  109. EOF
  110. #启动keepalived
  111. systemctl restart keepalived && systemctl enable keepalived && systemctl status keepalived
  112. #查看vip
  113. ip add | grep 10.100

2.7、部署master

kube-scheduler,kube-controller-manager 和 kube-apiserver 三者的功能紧密相关;同时kube-scheduler 和 kube-controller-manager 只能有一个进程处于工作状态,如果运行多个,则需要通过选举产生一个 leader;

2.7.1、部署kubectl命令工具

kubectl 是 kubernetes 集群的命令行管理工具,默认从 ~/.kube/config 文件读取 kube-apiserver 地址、证书、用户名等信息,如果没有配置,执行 kubectl 命令时可能会出错。~/.kube/config只需要部署一次,然后拷贝到其他的master。

  1. #1.解压命令
  2. tar -zxvf kubernetes-server-linux-amd64.tar.gz
  3. cd kubernetes/server/bin/
  4. cp kube-apiserver kubeadm kube-controller-manager kubectl kube-scheduler /usr/local/bin
  5. scp kube-apiserver kubeadm kube-controller-manager kubectl kube-scheduler 192.168.10.13:/usr/local/bin
  6. scp kube-apiserver kubeadm kube-controller-manager kubectl kube-scheduler 192.168.10.14:/usr/local/bin
  7. #2.创建CA证书
  8. cd /root/ssl
  9. cat > admin-csr.json <<EOF
  10. {
  11. "CN": "admin",
  12. "hosts": [],
  13. "key": {
  14. "algo": "rsa",
  15. "size": 2048
  16. },
  17. "names": [
  18. {
  19. "C": "CN",
  20. "ST": "ShangHai",
  21. "L": "ShangHai",
  22. "O": "system:masters",
  23. "OU": "System"
  24. }
  25. ]
  26. }
  27. EOF
  28. #3.生成证书和私钥
  29. cfssl gencert -ca=ca.pem \
  30. -ca-key=ca-key.pem \
  31. -config=ca-config.json \
  32. -profile=kubernetes admin-csr.json | cfssljson -bare admin
  33. #4.创建~/.kube/config文件
  34. kubectl config set-cluster kubernetes \
  35. --certificate-authority=ca.pem \
  36. --embed-certs=true \
  37. --server=https://192.168.10.100:8443 \
  38. --kubeconfig=kubectl.kubeconfig
  39. #4.1.设置客户端认证参数
  40. kubectl config set-credentials admin \
  41. --client-certificate=admin.pem \
  42. --client-key=admin-key.pem \
  43. --embed-certs=true \
  44. --kubeconfig=kubectl.kubeconfig
  45. #4.2.设置上下文参数
  46. kubectl config set-context kubernetes \
  47. --cluster=kubernetes \
  48. --user=admin \
  49. --kubeconfig=kubectl.kubeconfig
  50. #4.3.设置默认上下文
  51. kubectl config use-context kubernetes --kubeconfig=kubectl.kubeconfig
  52. #4.4.拷贝kubectl.kubeconfig文件
  53. cp kubectl.kubeconfig ~/.kube/config
  54. scp kubectl.kubeconfig 192.168.10.13:/root/.kube/config
  55. scp kubectl.kubeconfig 192.168.10.14:/root/.kube/config
  56. cp admin*.pem /etc/kubernetes/ssl/
  57. scp admin*.pem 192.168.10.13:/etc/kubernetes/ssl/
  58. scp admin*.pem 192.168.10.14:/etc/kubernetes/ssl/
2.7.2、部署api-server
  1. #1.创建CA证书,hosts字段指定授权使用该证书的IP或域名列表,这里列出了VIP/apiserver节点IP/kubernetes服务IP和域名
  2. cd /root/ssl
  3. cat > kubernetes-csr.json <<EOF
  4. {
  5. "CN": "kubernetes",
  6. "hosts": [
  7. "127.0.0.1",
  8. "192.168.10.12",
  9. "192.168.10.13",
  10. "192.168.10.14",
  11. "192.168.10.100",
  12. "10.254.0.1",
  13. "kubernetes",
  14. "kubernetes.default",
  15. "kubernetes.default.svc",
  16. "kubernetes.default.svc.cluster",
  17. "kubernetes.default.svc.cluster.local"
  18. ],
  19. "key": {
  20. "algo": "rsa",
  21. "size": 2048
  22. },
  23. "names": [
  24. {
  25. "C": "CN",
  26. "ST": "ShangHai",
  27. "L": "ShangHai",
  28. "O": "k8s",
  29. "OU": "System"
  30. }
  31. ]
  32. }
  33. EOF
  34. #2.生成证书和私钥
  35. cfssl gencert -ca=ca.pem \
  36. -ca-key=ca-key.pem \
  37. -config=ca-config.json \
  38. -profile=kubernetes kubernetes-csr.json | cfssljson -bare kubernetes
  39. #3.将证书拷贝到其他master节点
  40. cp kubernetes*.pem /etc/kubernetes/ssl/
  41. scp kubernetes*.pem 192.168.10.13:/etc/kubernetes/ssl/
  42. scp kubernetes*.pem 192.168.10.14:/etc/kubernetes/ssl/
  43. #4.创建加密配置文件
  44. cat > encryption-config.yaml <<EOF
  45. kind: EncryptionConfig
  46. apiVersion: v1
  47. resources:
  48. - resources:
  49. - secrets
  50. providers:
  51. - aescbc:
  52. keys:
  53. - name: key1
  54. secret: $(head -c 32 /dev/urandom | base64)
  55. - identity: {}
  56. EOF
  57. #4.1创建kube-apiserver使用的客户端令牌文件
  58. cat <<EOF > bootstrap-token.csv
  59. $(head -c 32 /dev/urandom | base64),kubelet-bootstrap,10001,"system:kubelet-bootstrap"
  60. EOF
  61. #5.将加密文件拷贝到其他master节点
  62. cp encryption-config.yaml bootstrap-token.csv /etc/kubernetes/ssl
  63. scp encryption-config.yaml bootstrap-token.csv 192.168.10.13:/etc/kubernetes/ssl
  64. scp encryption-config.yaml bootstrap-token.csv 192.168.10.14:/etc/kubernetes/ssl
  65. #6.创建kube-apiserver.service文件
  66. cat > /etc/systemd/system/kube-apiserver.service << EOF
  67. [Unit]
  68. Description=Kubernetes API Server
  69. Documentation=https://github.com/GoogleCloudPlatform/kubernetes
  70. After=network.target
  71. [Service]
  72. ExecStart=/usr/local/bin/kube-apiserver \
  73. --enable-admission-plugins=NamespaceLifecycle,NodeRestriction,LimitRanger,ServiceAccount,DefaultStorageClass,ResourceQuota \
  74. --anonymous-auth=false \
  75. --experimental-encryption-provider-config=/etc/kubernetes/ssl/encryption-config.yaml \
  76. --advertise-address=0.0.0.0 \
  77. --bind-address=0.0.0.0 \
  78. --insecure-bind-address=127.0.0.1 \
  79. --secure-port=6443 \
  80. --insecure-port=0 \
  81. --authorization-mode=Node,RBAC \
  82. --runtime-config=api/all \
  83. --enable-bootstrap-token-auth \
  84. --service-cluster-ip-range=10.254.0.0/16 \
  85. --service-node-port-range=30000-32700 \
  86. --tls-cert-file=/etc/kubernetes/ssl/kubernetes.pem \
  87. --tls-private-key-file=/etc/kubernetes/ssl/kubernetes-key.pem \
  88. --client-ca-file=/etc/kubernetes/ssl/ca.pem \
  89. --kubelet-client-certificate=/etc/kubernetes/ssl/kubernetes.pem \
  90. --kubelet-client-key=/etc/kubernetes/ssl/kubernetes-key.pem \
  91. --service-account-key-file=/etc/kubernetes/ssl/ca-key.pem \
  92. --etcd-cafile=/etc/kubernetes/ssl/ca.pem \
  93. --etcd-certfile=/etc/kubernetes/ssl/kubernetes.pem \
  94. --etcd-keyfile=/etc/kubernetes/ssl/kubernetes-key.pem \
  95. --etcd-servers=https://192.168.10.12:2379,https://192.168.10.13:2379,https://192.168.10.14:2379 \
  96. --enable-swagger-ui=true \
  97. --allow-privileged=true \
  98. --apiserver-count=3 \
  99. --audit-log-maxage=30 \
  100. --audit-log-maxbackup=3 \
  101. --audit-log-maxsize=100 \
  102. --audit-log-path=/var/log/kubernetes/kube-apiserver-audit.log \
  103. --event-ttl=1h \
  104. --alsologtostderr=true \
  105. --logtostderr=false \
  106. --log-dir=/var/log/kubernetes \
  107. --v=2
  108. Restart=on-failure
  109. RestartSec=5
  110. Type=notify
  111. LimitNOFILE=65536
  112. [Install]
  113. WantedBy=multi-user.target
  114. EOF
  115. mkdir -p /var/log/kubernetes #创建日志目录然后拷贝到其他master
  116. scp /etc/systemd/system/kube-apiserver.service 192.168.10.13:/etc/systemd/system/
  117. scp /etc/systemd/system/kube-apiserver.service 192.168.10.14:/etc/systemd/system/
  118. #7.启动服务
  119. systemctl daemon-reload && systemctl enable kube-apiserver && systemctl start kube-apiserver && systemctl status kube-apiserver
  120. #8.授予kubernetes证书访问kubelet api权限。在执行kubectl exec、run、logs 等命令时,apiserver会转发到kubelet。这里定义 RBAC规则,授权apiserver调用kubelet API。
  121. kubectl create clusterrolebinding kube-apiserver:kubelet-apis --clusterrole=system:kubelet-api-admin --user kubernetes
  122. #8.1预定义的ClusterRole system:kubelet-api-admin授予访问kubelet所有 API 的权限:
  123. kubectl describe clusterrole system:kubelet-api-admin
  124. #9.检查api-server和集群状态
  125. netstat -ptln | grep kube-apiserver
  126. tcp 0 0 192.168.10.12:6443 0.0.0.0:* LISTEN 13000/kube-apiserve
  127. kubectl cluster-info
  128. #显示如下
  129. Kubernetes master is running at https://192.168.10.100:8443
  130. To further debug and diagnose cluster problems, use 'kubectl cluster-info dump'.
  131. kubectl get all --all-namespaces
  132. #显示如下
  133. NAMESPACE NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
  134. default service/kubernetes ClusterIP 10.254.0.1 <none> 443/TCP 12m
  135. kubectl get componentstatuses
  136. #显示如下,因scheduler和controller-manager还没有部署
  137. NAME STATUS MESSAGE ERROR
  138. scheduler Unhealthy Get http://127.0.0.1:10251/healthz: dial tcp 127.0.0.1:10251: connect: connection refused
  139. controller-manager Unhealthy Get http://127.0.0.1:10252/healthz: dial tcp 127.0.0.1:10252: connect: connection refused
  140. etcd-2 Healthy {"health":"true"}
  141. etcd-1 Healthy {"health":"true"}
  142. etcd-0 Healthy {"health":"true"}
2.7.3、部署kube-controller-manager

该集群包含 3 个节点,启动后将通过竞争选举机制产生一个 leader 节点,其它节点为阻塞状态。当 leader 节点不可用后,剩余节点将再次进行选举产生新的 leader 节点,从而保证服务的可用性。

  1. #1.创建CA证书
  2. cd /root/ssl
  3. cat > kube-controller-manager-csr.json << EOF
  4. {
  5. "CN": "system:kube-controller-manager",
  6. "key": {
  7. "algo": "rsa",
  8. "size": 2048
  9. },
  10. "hosts": [
  11. "127.0.0.1",
  12. "192.168.10.12",
  13. "192.168.10.13",
  14. "192.168.10.14"
  15. ],
  16. "names": [
  17. {
  18. "C": "CN",
  19. "ST": "ShangHai",
  20. "L": "ShangHai",
  21. "O": "system:kube-controller-manager",
  22. "OU": "System"
  23. }
  24. ]
  25. }
  26. EOF
  27. #2.生成证书
  28. cfssl gencert -ca=ca.pem \
  29. -ca-key=ca-key.pem \
  30. -config=ca-config.json \
  31. -profile=kubernetes kube-controller-manager-csr.json | cfssljson -bare kube-controller-manager
  32. #3.将证书拷贝到其他master节点
  33. cp kube-controller-manager*.pem /etc/kubernetes/ssl/
  34. scp kube-controller-manager*.pem 192.168.10.13:/etc/kubernetes/ssl/
  35. scp kube-controller-manager*.pem 192.168.10.14:/etc/kubernetes/ssl/
  36. #4.创建kubeconfig文件
  37. kubectl config set-cluster kubernetes \
  38. --certificate-authority=ca.pem \
  39. --embed-certs=true \
  40. --server=https://192.168.10.100:8443 \
  41. --kubeconfig=kube-controller-manager.kubeconfig
  42. kubectl config set-credentials system:kube-controller-manager \
  43. --client-certificate=kube-controller-manager.pem \
  44. --client-key=kube-controller-manager-key.pem \
  45. --embed-certs=true \
  46. --kubeconfig=kube-controller-manager.kubeconfig
  47. kubectl config set-context system:kube-controller-manager \
  48. --cluster=kubernetes \
  49. --user=system:kube-controller-manager \
  50. --kubeconfig=kube-controller-manager.kubeconfig
  51. kubectl config use-context system:kube-controller-manager --kubeconfig=kube-controller-manager.kubeconfig
  52. #5.拷贝kube-controller-manager.kubeconfig到其他master节点
  53. cp kube-controller-manager.kubeconfig /etc/kubernetes/ssl/
  54. scp kube-controller-manager.kubeconfig 192.168.10.13:/etc/kubernetes/ssl/
  55. scp kube-controller-manager.kubeconfig 192.168.10.14:/etc/kubernetes/ssl/
  56. #6.创建kube-controller-manager.service文件
  57. cat > /etc/systemd/system/kube-controller-manager.service << EOF
  58. [Unit]
  59. Description=Kubernetes Controller Manager
  60. Documentation=https://github.com/GoogleCloudPlatform/kubernetes
  61. [Service]
  62. ExecStart=/usr/local/bin/kube-controller-manager \
  63. --address=127.0.0.1 \
  64. --master=https://192.168.10.100:8443 \
  65. --kubeconfig=/etc/kubernetes/ssl/kube-controller-manager.kubeconfig \
  66. --allocate-node-cidrs=true \
  67. --authentication-kubeconfig=/etc/kubernetes/ssl/kube-controller-manager.kubeconfig \
  68. --service-cluster-ip-range=10.254.0.0/16 \
  69. --cluster-cidr=172.30.0.0/16 \
  70. --cluster-name=kubernetes \
  71. --cluster-signing-cert-file=/etc/kubernetes/ssl/ca.pem \
  72. --cluster-signing-key-file=/etc/kubernetes/ssl/ca-key.pem \
  73. --experimental-cluster-signing-duration=8760h \
  74. --leader-elect=true \
  75. --feature-gates=RotateKubeletServerCertificate=true \
  76. --controllers=*,bootstrapsigner,tokencleaner \
  77. --horizontal-pod-autoscaler-use-rest-clients=true \
  78. --horizontal-pod-autoscaler-sync-period=10s \
  79. --tls-cert-file=/etc/kubernetes/ssl/kube-controller-manager.pem \
  80. --tls-private-key-file=/etc/kubernetes/ssl/kube-controller-manager-key.pem \
  81. --service-account-private-key-file=/etc/kubernetes/ssl/ca-key.pem \
  82. --root-ca-file=/etc/kubernetes/ssl/ca.pem \
  83. --use-service-account-credentials=true \
  84. --alsologtostderr=true \
  85. --logtostderr=false \
  86. --log-dir=/var/log/kubernetes \
  87. --v=2
  88. Restart=on
  89. Restart=on-failure
  90. RestartSec=5
  91. [Install]
  92. WantedBy=multi-user.target
  93. EOF
  94. #7.拷贝到其他master节点,然后启动服务
  95. scp /etc/systemd/system/kube-controller-manager.service 192.168.10.13:/etc/systemd/system/
  96. scp /etc/systemd/system/kube-controller-manager.service 192.168.10.14:/etc/systemd/system/
  97. systemctl daemon-reload && systemctl enable kube-controller-manager && systemctl start kube-controller-manager && systemctl status kube-controller-manager
  98. #8.检查服务
  99. netstat -lnpt|grep kube-controll
  100. tcp 0 0 127.0.0.1:10252 0.0.0.0:* LISTEN 14492/kube-controll
  101. tcp6 0 0 :::10257 :::* LISTEN 14492/kube-controll
  102. kubectl get cs
  103. #显示如下
  104. NAME STATUS MESSAGE ERROR
  105. scheduler Unhealthy Get http://127.0.0.1:10251/healthz: dial tcp 127.0.0.1:10251: connect: connection refused
  106. controller-manager Healthy ok
  107. etcd-1 Healthy {"health":"true"}
  108. etcd-2 Healthy {"health":"true"}
  109. etcd-0 Healthy {"health":"true"}
  110. #检查leader所在机器
  111. kubectl get endpoints kube-controller-manager --namespace=kube-system -o yaml
  112. #显示如下,k8s-m12选为leader
  113. apiVersion: v1
  114. kind: Endpoints
  115. metadata:
  116. annotations:
  117. control-plane.alpha.kubernetes.io/leader: '{"holderIdentity":"k8s-m12_6f9b09e6-995b-11e9-b2bf-000c29959a05","leaseDurationSeconds":15,"acquireTime":"2019-06-28T04:16:00Z","renewTime":"2019-06-28T04:21:32Z","leaderTransitions":0}'
  118. creationTimestamp: "2019-06-28T04:16:00Z"
  119. name: kube-controller-manager
  120. namespace: kube-system
  121. resourceVersion: "1481"
  122. selfLink: /api/v1/namespaces/kube-system/endpoints/kube-controller-manager
  123. uid: 6f9d838f-995b-11e9-9cb7-000c29959a05

关于 controller 权限和 use-service-account-credentials 参数
kublet 认证和授权

2.7.4、部署kube-scheduler

该集群包含 3 个节点,启动后将通过竞争选举机制产生一个 leader 节点,其它节点为阻塞状态。当 leader 节点不可用后,剩余节点将再次进行选举产生新的 leader 节点,从而保证服务的可用性

  1. #1.创建CA证书
  2. cd /root/ssl
  3. cat > kube-scheduler-csr.json << EOF
  4. {
  5. "CN": "system:kube-scheduler",
  6. "hosts": [
  7. "127.0.0.1",
  8. "192.168.10.12",
  9. "192.168.10.13",
  10. "192.168.10.14"
  11. ],
  12. "key": {
  13. "algo": "rsa",
  14. "size": 2048
  15. },
  16. "names": [
  17. {
  18. "C": "CN",
  19. "ST": "ShangHai",
  20. "L": "ShangHai",
  21. "O": "system:kube-scheduler",
  22. "OU": "System"
  23. }
  24. ]
  25. }
  26. EOF
  27. #2.生成证书
  28. cfssl gencert -ca=ca.pem \
  29. -ca-key=ca-key.pem \
  30. -config=ca-config.json \
  31. -profile=kubernetes kube-scheduler-csr.json | cfssljson -bare kube-scheduler
  32. #3.创建kube-scheduler.kubeconfig文件
  33. kubectl config set-cluster kubernetes \
  34. --certificate-authority=ca.pem \
  35. --embed-certs=true \
  36. --server=https://192.168.10.100:8443 \
  37. --kubeconfig=kube-scheduler.kubeconfig
  38. kubectl config set-credentials system:kube-scheduler \
  39. --client-certificate=kube-scheduler.pem \
  40. --client-key=kube-scheduler-key.pem \
  41. --embed-certs=true \
  42. --kubeconfig=kube-scheduler.kubeconfig
  43. kubectl config set-context system:kube-scheduler \
  44. --cluster=kubernetes \
  45. --user=system:kube-scheduler \
  46. --kubeconfig=kube-scheduler.kubeconfig
  47. kubectl config use-context system:kube-scheduler --kubeconfig=kube-scheduler.kubeconfig
  48. #4.拷贝kubeconfig到其他master节点
  49. cp kube-scheduler.kubeconfig kube-scheduler*.pem /etc/kubernetes/ssl/
  50. scp kube-scheduler.kubeconfig kube-scheduler*.pem 192.168.10.13:/etc/kubernetes/ssl/
  51. scp kube-scheduler.kubeconfig kube-scheduler*.pem 192.168.10.14:/etc/kubernetes/ssl/
  52. #5.创建kube-scheduler.service文件
  53. cat > /etc/systemd/system/kube-scheduler.service << EOF
  54. [Unit]
  55. Description=Kubernetes Scheduler
  56. Documentation=https://github.com/GoogleCloudPlatform/kubernetes
  57. [Service]
  58. ExecStart=/usr/local/bin/kube-scheduler \
  59. --address=127.0.0.1 \
  60. --master=https://192.168.10.100:8443 \
  61. --kubeconfig=/etc/kubernetes/ssl/kube-scheduler.kubeconfig \
  62. --leader-elect=true \
  63. --alsologtostderr=true \
  64. --logtostderr=false \
  65. --log-dir=/var/log/kubernetes \
  66. --v=2
  67. Restart=on-failure
  68. RestartSec=5
  69. [Install]
  70. WantedBy=multi-user.target
  71. EOF
  72. #6.将kube-scheduler.service拷贝到其他master节点,然后启动服务
  73. scp /etc/systemd/system/kube-scheduler.service 192.168.10.13:/etc/systemd/system
  74. scp /etc/systemd/system/kube-scheduler.service 192.168.10.14:/etc/systemd/system
  75. systemctl daemon-reload && systemctl enable kube-scheduler && systemctl start kube-scheduler && systemctl status kube-scheduler
  76. #7.检查服务
  77. netstat -lnpt|grep kube-sche
  78. tcp 0 0 127.0.0.1:10251 0.0.0.0:* LISTEN 15137/kube-schedule
  79. tcp6 0 0 :::10259 :::* LISTEN 15137/kube-schedule
  80. kubectl get cs
  81. #显示如下
  82. NAME STATUS MESSAGE ERROR
  83. scheduler Healthy ok
  84. controller-manager Healthy ok
  85. etcd-2 Healthy {"health":"true"}
  86. etcd-1 Healthy {"health":"true"}
  87. etcd-0 Healthy {"health":"true"}
  88. kubectl get endpoints kube-scheduler --namespace=kube-system -o yaml
  89. #显示如下,k8s-m12选为leader
  90. apiVersion: v1
  91. kind: Endpoints
  92. metadata:
  93. annotations:
  94. control-plane.alpha.kubernetes.io/leader: '{"holderIdentity":"k8s-m12_1c3f7882-995f-11e9-a5c1-000c29959a05","leaseDurationSeconds":15,"acquireTime":"2019-06-28T04:42:19Z","renewTime":"2019-06-28T04:45:18Z","leaderTransitions":0}'
  95. creationTimestamp: "2019-06-28T04:42:19Z"
  96. name: kube-scheduler
  97. namespace: kube-system
  98. resourceVersion: "2714"
  99. selfLink: /api/v1/namespaces/kube-system/endpoints/kube-scheduler
  100. uid: 1cda2b3a-995f-11e9-ac7d-000c2928fce6
2.7.5、在所有master节点上查看功能是否正常
  1. kubectl get componentstatuses
  2. NAME STATUS MESSAGE ERROR
  3. controller-manager Healthy ok
  4. scheduler Healthy ok
  5. etcd-2 Healthy {"health":"true"}
  6. etcd-1 Healthy {"health":"true"}
  7. etcd-0 Healthy {"health":"true"}

2.8、部署node

node节点运行kubelet kube-proxy docker flannel。

2.8.1、部署kubelet

kubelet运行在每个 worker 节点上,接收 kube-apiserver 发送的请求,管理 Pod 容器,执行交互式命令,如 exec、run、logs 等。kubelet 启动时自动向 kube-apiserver注册节点信息,内置的 cadvisor 统计和监控节点的资源使用情况。

  1. #1.解压包,拷贝命令
  2. tar -zxvf kubernetes-node-linux-amd64.tar.gz
  3. cd /opt/kubernetes/node/bin
  4. cp kubectl kubelet kube-proxy /usr/local/bin
  5. scp kubectl kubelet kube-proxy 192.168.10.16:/usr/local/bin
  6. #2.创建kubelet-bootstrap.kubeconfig文件(也是在12机器上执行)要创建3次分别是(k8s-m12,k8s-m13,k8s-m14)
  7. #2.1.创建 token
  8. cd /root/ssl
  9. export BOOTSTRAP_TOKEN=$(kubeadm token create \
  10. --description kubelet-bootstrap-token \
  11. --groups system:bootstrappers:k8s-m12 \
  12. --kubeconfig ~/.kube/config)
  13. #2.2.设置集群参数
  14. kubectl config set-cluster kubernetes \
  15. --certificate-authority=ca.pem \
  16. --embed-certs=true \
  17. --server=https://192.168.10.100:8443 \
  18. --kubeconfig=kubelet-bootstrap-k8s-m12.kubeconfig
  19. #2.3.设置客户端认证参数
  20. kubectl config set-credentials kubelet-bootstrap \
  21. --token=${BOOTSTRAP_TOKEN} \
  22. --kubeconfig=kubelet-bootstrap-k8s-m12.kubeconfig
  23. #2.4.设置上下文参数
  24. kubectl config set-context default \
  25. --cluster=kubernetes \
  26. --user=kubelet-bootstrap \
  27. --kubeconfig=kubelet-bootstrap-k8s-m12.kubeconfig
  28. #2.5.设置默认上下文
  29. kubectl config use-context default --kubeconfig=kubelet-bootstrap-k8s-m12.kubeconfig
  30. #3.查看kubeadm为各节点创建的token
  31. kubeadm token list --kubeconfig ~/.kube/config
  32. #显示如下
  33. 11rq5j.3f628cf6ura1hf2x 20h 2019-06-29T13:01:52+08:00 authentication,signing kubelet-bootstrap-token system:bootstrappers:k8s-m14
  34. 8zamvk.rfat8wyzh8311f89 20h 2019-06-29T12:59:26+08:00 authentication,signing kubelet-bootstrap-token system:bootstrappers:k8s-m12
  35. lhxalz.busnf6izk82e0xqx 20h 2019-06-29T13:01:03+08:00 authentication,signing kubelet-bootstrap-token system:bootstrappers:k8s-m13
  36. #3.1.r如果需要删除创建的token
  37. kubeadm token --kubeconfig ~/.kube/config delete lhxalz.busnf6izk82e0xqx
  38. # 创建的token有效期为 1 天,超期后将不能再被使用,且会被kube-controller-manager的tokencleaner清理(如果启用该 controller 的话)。
  39. # kube-apiserver接收kubelet的bootstrap token后,将请求的user设置为system:bootstrap;group设置为 system:bootstrappers;
  40. #3.2.查看各token关联的secret
  41. kubectl get secrets -n kube-system
  42. #4.拷贝bootstrap kubeconfig文件到各个node机器上
  43. scp kubelet-bootstrap-kube12.kubeconfig 192.168.10.15:/etc/kubernetes/ssl/kubelet-bootstrap.kubeconfig
  44. scp kubelet-bootstrap-kube12.kubeconfig 192.168.10.16:/etc/kubernetes/ssl/kubelet-bootstrap.kubeconfig
  45. #5.创建kubelet配置文件
  46. cd /root/ssl
  47. cat > kubelet.config.json <<EOF
  48. {
  49. "kind": "KubeletConfiguration",
  50. "apiVersion": "kubelet.config.k8s.io/v1beta1",
  51. "authentication": {
  52. "x509": {
  53. "clientCAFile": "/etc/kubernetes/ssl/ca.pem"
  54. },
  55. "webhook": {
  56. "enabled": true,
  57. "cacheTTL": "2m0s"
  58. },
  59. "anonymous": {
  60. "enabled": false
  61. }
  62. },
  63. "authorization": {
  64. "mode": "Webhook",
  65. "webhook": {
  66. "cacheAuthorizedTTL": "5m0s",
  67. "cacheUnauthorizedTTL": "30s"
  68. }
  69. },
  70. "address": "0.0.0.0",
  71. "port": 10250,
  72. "readOnlyPort": 0,
  73. "cgroupDriver": "cgroupfs",
  74. "hairpinMode": "promiscuous-bridge",
  75. "serializeImagePulls": false,
  76. "featureGates": {
  77. "RotateKubeletClientCertificate": true,
  78. "RotateKubeletServerCertificate": true
  79. },
  80. "clusterDomain": "cluster.local",
  81. "clusterDNS": ["10.254.0.2"]
  82. }
  83. EOF
  84. #6.拷贝到其他主机,注意,可以修改address为本机IP地址
  85. cp kubelet.config.json /etc/kubernetes/ssl
  86. scp kubelet.config.json 192.168.10.15:/etc/kubernetes/ssl
  87. scp kubelet.config.json 192.168.10.16:/etc/kubernetes/ssl
  88. #7.创建kubelet.service文件
  89. mkdir -p /var/log/kubernetes && mkdir -p /var/lib/kubelet #先创建目录
  90. cat <<EOF > /etc/systemd/system/kubelet.service
  91. [Unit]
  92. Description=Kubernetes Kubelet
  93. Documentation=https://github.com/GoogleCloudPlatform/kubernetes
  94. After=docker.service
  95. Requires=docker.service
  96. [Service]
  97. WorkingDirectory=/var/lib/kubelet
  98. ExecStart=/usr/local/bin/kubelet \
  99. --bootstrap-kubeconfig=/etc/kubernetes/ssl/kubelet-bootstrap.kubeconfig \
  100. --cert-dir=/etc/kubernetes/ssl \
  101. --network-plugin=cni \
  102. --cni-conf-dir=/etc/cni/net.d \
  103. --cni-bin-dir=/usr/local/bin/ \
  104. --fail-swap-on=false \
  105. --kubeconfig=/etc/kubernetes/ssl/kubelet.kubeconfig \
  106. --config=/etc/kubernetes/ssl/kubelet.config.json \
  107. --hostname-override=192.168.10.15 \
  108. --pod-infra-container-image=registry.cn-hangzhou.aliyuncs.com/google_containers/pause-amd64:3.1 \
  109. --allow-privileged=true \
  110. --alsologtostderr=true \
  111. --logtostderr=false \
  112. --cgroup-driver=systemd \
  113. --log-dir=/var/log/kubernetes \
  114. --v=2
  115. Restart=on-failure
  116. RestartSec=5
  117. [Install]
  118. WantedBy=multi-user.target
  119. EOF
  120. #拷贝到其他主机,注意修改hostname-override为本机IP地址
  121. #8.Bootstrap Token Auth 和授予权限 ,需要先将bootstrap-token文件中的kubelet-bootstrap用户赋予system:node-bootstrapper角色,然后kubelet才有权限创建认证请求
  122. kubectl create clusterrolebinding kubelet-bootstrap --clusterrole=system:node-bootstrapper --group=system:bootstrappers
  123. #9.启动kubele服务
  124. systemctl daemon-reload && systemctl enable kubelet && systemctl restart kubelet && systemctl status kubelet
  125. #10.检查服务
  126. netstat -lantp|grep kubelet
  127. tcp 0 0 192.168.10.15:46936 192.168.10.100:8443 ESTABLISHED 15299/kubelet
  128. #8.通过kubelet 的TLS 证书请求,kubelet 首次启动时向kube-apiserver 发送证书签名请求,必须通过后kubernetes 系统才会将该 Node 加入到集群。查看未授权的CSR 请求
  129. kubectl get csr
  130. NAME AGE REQUESTOR CONDITION
  131. node-csr-ZyWLfyY4nBb1GPBCCNGf2pCjbFKGHt04q50R1_3oprU 16m system:bootstrap:rhwf4g Pending
  132. node-csr-hiZbOHizDYsE_n36kfuSxWTmUzobCEnCpIXfN54Lh6Y 18m system:bootstrap:rhwf4g Pending

approve kubelet csr请求

  1. #1.手动approve csr请求(推荐自动的方式)
  2. kubectl certificate approve node-csr-ZyWLfyY4nBb1GPBCCNGf2pCjbFKGHt04q50R1_3oprU #手动创建
  3. #显示如下
  4. certificatesigningrequest.certificates.k8s.io/node-csr-ZyWLfyY4nBb1GPBCCNGf2pCjbFKGHt04q50R1_3oprU approved
  5. #1.1.查看Approve结果
  6. kubectl describe csr node-csr-ZyWLfyY4nBb1GPBCCNGf2pCjbFKGHt04q50R1_3oprU
  7. #显示如下
  8. Name: node-csr-ZyWLfyY4nBb1GPBCCNGf2pCjbFKGHt04q50R1_3oprU
  9. Labels: <none>
  10. Annotations: <none>
  11. CreationTimestamp: Wed, 26 Jun 2019 15:12:40 +0800
  12. Requesting User: system:bootstrap:rhwf4g
  13. Status: Approved,Issued
  14. Subject:
  15. Common Name: system:node:192.168.10.16
  16. Serial Number:
  17. Organization: system:nodes
  18. Events: <none>
  19. #1.2.特别多可以用这样的方式
  20. kubectl get csr|grep 'Pending' | awk 'NR>0{print $1}'| xargs kubectl certificate approve
  21. kubectl get csr|awk 'NR==3{print $1}'| xargs kubectl describe csr #查看Approve结果
  22. #2.自动approve csr请求(推荐),创建ClusterRoleBinding,分别用于自动 approve client、renew client、renew server 证书
  23. cd /root/ssl
  24. cat > csr-crb.yaml <<EOF
  25. # Approve all CSRs for the group "system:bootstrappers"
  26. kind: ClusterRoleBinding
  27. apiVersion: rbac.authorization.k8s.io/v1
  28. metadata:
  29. name: auto-approve-csrs-for-group
  30. subjects:
  31. - kind: Group
  32. name: system:bootstrappers
  33. apiGroup: rbac.authorization.k8s.io
  34. roleRef:
  35. kind: ClusterRole
  36. name: system:certificates.k8s.io:certificatesigningrequests:nodeclient
  37. apiGroup: rbac.authorization.k8s.io
  38. ---
  39. # To let a node of the group "system:bootstrappers" renew its own credentials
  40. kind: ClusterRoleBinding
  41. apiVersion: rbac.authorization.k8s.io/v1
  42. metadata:
  43. name: node-client-cert-renewal
  44. subjects:
  45. - kind: Group
  46. name: system:bootstrappers
  47. apiGroup: rbac.authorization.k8s.io
  48. roleRef:
  49. kind: ClusterRole
  50. name: system:certificates.k8s.io:certificatesigningrequests:selfnodeclient
  51. apiGroup: rbac.authorization.k8s.io
  52. ---
  53. # A ClusterRole which instructs the CSR approver to approve a node requesting a
  54. # serving cert matching its client cert.
  55. kind: ClusterRole
  56. apiVersion: rbac.authorization.k8s.io/v1
  57. metadata:
  58. name: approve-node-server-renewal-csr
  59. rules:
  60. - apiGroups: ["certificates.k8s.io"]
  61. resources: ["certificatesigningrequests/selfnodeserver"]
  62. verbs: ["create"]
  63. ---
  64. # To let a node of the group "system:nodes" renew its own server credentials
  65. kind: ClusterRoleBinding
  66. apiVersion: rbac.authorization.k8s.io/v1
  67. metadata:
  68. name: node-server-cert-renewal
  69. subjects:
  70. - kind: Group
  71. name: system:nodes
  72. apiGroup: rbac.authorization.k8s.io
  73. roleRef:
  74. kind: ClusterRole
  75. name: approve-node-server-renewal-csr
  76. apiGroup: rbac.authorization.k8s.io
  77. EOF
  78. #3.拷贝到其他节点
  79. cp csr-crb.yaml /etc/kubernetes/ssl
  80. scp csr-crb.yaml 192.168.10.13:/etc/kubernetes/ssl
  81. scp csr-crb.yaml 192.168.10.14:/etc/kubernetes/ssl
  82. #4.生效配置
  83. kubectl apply -f /etc/kubernetes/ssl/csr-crb.yaml
  84. #5.验证
  85. kubectl get csr #等待一段时间,查看CSR都被自动approve
  86. #显示如下
  87. NAME AGE REQUESTOR CONDITION
  88. node-csr-cF4D5xoTEQCkK5QCsCAmsHGItlZ2cJ43RjkGXpM4BNw 38m system:bootstrap:8zamvk Approved,Issued
  89. node-csr-lUIuS1_ggYM8Q95rgsUrBawzrsAXQ4QfYcP3BbPnWl8 36m system:bootstrap:lhxalz Approved,Issued
  90. kubectl get --all-namespaces -o wide nodes #所有节点均 ready
  91. #显示如下
  92. NAME STATUS ROLES AGE VERSION INTERNAL-IP EXTERNAL-IP OS-IMAGE KERNEL-VERSION CONTAINER-RUNTIME
  93. 192.168.10.15 Ready <none> 5m33s v1.14.3 192.168.10.15 <none> CentOS Linux 7 (Core) 4.4.103-1.el7.elrepo.x86_64 docker://18.9.6
  94. 192.168.10.16 Ready <none> 54s v1.14.3 192.168.10.16 <none> CentOS Linux 7 (Core) 4.4.103-1.el7.elrepo.x86_64 docker://18.9.6
  95. kubectl get nodes
  96. NAME STATUS ROLES AGE VERSION
  97. 192.168.10.15 Ready <none> 6m55s v1.14.3
  98. 192.168.10.16 Ready <none> 2m16s v1.14.3
  99. netstat -lnpt|grep kubelet
  100. tcp 0 0 127.0.0.1:10248 0.0.0.0:* LISTEN 20302/kubelet
  101. tcp 0 0 192.168.10.15:10250 0.0.0.0:* LISTEN 20302/kubelet
  102. tcp 0 0 127.0.0.1:37706 0.0.0.0:* LISTEN 20302/kubelet
  103. tcp 0 0 192.168.10.15:60332 192.168.10.100:8443 ESTABLISHED 20302/kubelet
  104. #10248: healthz http 服务,10250; https API 服务;注意:未开启只读端口 10255;由于关闭了匿名认证,同时开启了 webhook 授权,所有访问 10250 端口 https API 的请求都需要被认证和授权。

kublet api 认证和授权

kublet的配置文件kubelet.config.json配置了如下认证参数:

  • authentication.anonymous.enabled:设置为 false,不允许匿名访问 10250 端口;
  • authentication.x509.clientCAFile:指定签名客户端证书的 CA 证书,开启 HTTPs 证书认证;
  • authentication.webhook.enabled=true:开启 HTTPs bearer token 认证;

同时配置了如下授权参数:

  • authroization.mode=Webhook:开启 RBAC 授权;
  1. # kubelet 收到请求后,使用 clientCAFile 对证书签名进行认证,或者查询 bearer token 是否有效。如果两者都没通过,则拒绝请求,提示 Unauthorized
  2. curl -s --cacert /etc/kubernetes/ssl/ca.pem https://127.0.0.1:10250/metrics
  3. curl -s --cacert /etc/kubernetes/ssl/ca.pem -H "Authorization: Bearer 123456" https://192.168.10.15:10250/metrics
  4. #通过认证后,kubelet 使用 SubjectAccessReview API 向 kube-apiserver 发送请求,查询证书或 token 对应的 user、group 是否有操作资源的权限(RBAC);
  5. #1.证书认证和授权
  6. #权限不足的证书;
  7. curl -s --cacert /etc/kubernetes/ssl/ca.pem --cert /etc/kubernetes/ssl/kube-controller-manager.pem --key /etc/kubernetes/ssl/kube-controller-manager-key.pem https://192.168.10.15:10250/metrics
  8. #使用部署 kubectl 命令行工具时创建的、具有最高权限的 admin 证书;
  9. curl -s --cacert /etc/kubernetes/ssl/ca.pem --cert /etc/kubernetes/ssl/admin.pem --key /etc/kubernetes/ssl/admin-key.pem https://192.168.10.15:10250/metrics|head
  10. #2.bear token认证和授权:
  11. # 创建一个ServiceAccount,将它和ClusterRole system:kubelet-api-admin绑定,从而具有调用kubelet API的权限:
  12. kubectl create sa kubelet-api-test
  13. kubectl create clusterrolebinding kubelet-api-test --clusterrole=system:kubelet-api-admin --serviceaccount=default:kubelet-api-test
  14. SECRET=$(kubectl get secrets | grep kubelet-api-test | awk '{print $1}')
  15. TOKEN=$(kubectl describe secret ${SECRET} | grep -E '^token' | awk '{print $2}')
  16. echo ${TOKEN}
  17. curl -s --cacert /etc/kubernetes/ssl/ca.pem -H "Authorization: Bearer ${TOKEN}" https://192.168.10.15:10250/metrics|head
  18. # cadvisor 和 metrics
  19. # cadvisor 统计所在节点各容器的资源(CPU、内存、磁盘、网卡)使用情况,分别在自己的 http web 页面(4194 端口)和 10250 以 promehteus metrics 的形式输出。
  20. # 浏览器访问 http://192.168.10.15:4194/containers/ 可以查看到 cadvisor 的监控页面:
  21. # 浏览器访问 https://192.168.10.15:10250/metrics 和 https://192.168.10.15:10250/metrics/cadvisor 分别返回 kublet 和 cadvisor 的 metrics。

注意:kublet.config.json 设置 authentication.anonymous.enabled 为 false,不允许匿名证书访问 10250 的 https 服务;参考A.浏览器访问kube-apiserver安全端口.md,创建和导入相关证书,然后访问上面的 10250 端口;

  1. #1.需要安装jdk然后使用keytool工具
  2. .\keytool -import -v -trustcacerts -alias appmanagement -file "E:\ca.pem" -storepass password -keystore cacerts
  3. #2.然后在linux上执行
  4. openssl pkcs12 -export -out admin.pfx -inkey admin-key.pem -in admin.pem -certfile ca.pem
  5. #3.然后把证书导进去,就可以正常访问了
2.8.2、部署kube-proxy

kube-proxy 运行在所有 worker 节点上,,它监听 apiserver 中 service 和 Endpoint 的变化情况,创建路由规则来进行服务负载均衡。

  1. #1.创建CA证书
  2. cd /root/ssl
  3. cat > kube-proxy-csr.json <<EOF
  4. {
  5. "CN": "system:kube-proxy",
  6. "key": {
  7. "algo": "rsa",
  8. "size": 2048
  9. },
  10. "names": [
  11. {
  12. "C": "CN",
  13. "ST": "ShangHai",
  14. "L": "ShangHai",
  15. "O": "k8s",
  16. "OU": "System"
  17. }
  18. ]
  19. }
  20. EOF
  21. #2.生成证书和私钥
  22. cfssl gencert -ca=ca.pem \
  23. -ca-key=ca-key.pem \
  24. -config=ca-config.json \
  25. -profile=kubernetes kube-proxy-csr.json | cfssljson -bare kube-proxy
  26. #3.创建kubeconfig文件
  27. #3.1.设置集群参数
  28. kubectl config set-cluster kubernetes \
  29. --certificate-authority=ca.pem \
  30. --embed-certs=true \
  31. --server=https://192.168.10.100:8443 \
  32. --kubeconfig=kube-proxy.kubeconfig
  33. #3.2.设置客户端认证参数
  34. kubectl config set-credentials kube-proxy \
  35. --client-certificate=kube-proxy.pem \
  36. --client-key=kube-proxy-key.pem \
  37. --embed-certs=true \
  38. --kubeconfig=kube-proxy.kubeconfig
  39. #3.3.设置上下文参数
  40. kubectl config set-context default \
  41. --cluster=kubernetes \
  42. --user=kube-proxy \
  43. --kubeconfig=kube-proxy.kubeconfig
  44. #3.4.设置默认上下文
  45. kubectl config use-context default --kubeconfig=kube-proxy.kubeconfig
  46. #4.拷贝到其他节点
  47. cp kube-proxy*.pem kube-proxy.kubeconfig /etc/kubernetes/ssl/
  48. scp kube-proxy*.pem kube-proxy.kubeconfig 192.168.10.15:/etc/kubernetes/ssl/
  49. scp kube-proxy*.pem kube-proxy.kubeconfig 192.168.10.16:/etc/kubernetes/ssl/
  50. #5.创建kube-proxy配置文件
  51. cd /root/ssl
  52. cat >kube-proxy.config.yaml <<EOF
  53. apiVersion: kubeproxy.config.k8s.io/v1alpha1
  54. bindAddress: 192.168.10.15
  55. clientConnection:
  56. kubeconfig: /etc/kubernetes/ssl/kube-proxy.kubeconfig
  57. clusterCIDR: 172.30.0.0/16
  58. healthzBindAddress: 192.168.10.15:10256
  59. hostnameOverride: 192.168.10.15
  60. kind: KubeProxyConfiguration
  61. metricsBindAddress: 192.168.10.15:10249
  62. mode: "ipvs"
  63. EOF
  64. #6.拷贝到其他节点
  65. cp kube-proxy.config.yaml /etc/kubernetes/ssl/
  66. scp kube-proxy.config.yaml 192.168.10.15:/etc/kubernetes/ssl/
  67. scp kube-proxy.config.yaml 192.168.10.16:/etc/kubernetes/ssl/
  68. #7.创建kube-proxy.service文件,然后拷贝到其他节点
  69. cat << EOF > /etc/systemd/system/kube-proxy.service
  70. [Unit]
  71. Description=Kubernetes Kube-Proxy Server
  72. Documentation=https://github.com/GoogleCloudPlatform/kubernetes
  73. After=network.target
  74. [Service]
  75. WorkingDirectory=/var/lib/kube-proxy
  76. ExecStart=/usr/local/bin/kube-proxy \
  77. --config=/etc/kubernetes/ssl/kube-proxy.config.yaml \
  78. --alsologtostderr=true \
  79. --logtostderr=false \
  80. --log-dir=/var/log/kubernetes/kube-proxy \
  81. --v=2
  82. Restart=on-failure
  83. RestartSec=5
  84. LimitNOFILE=65536
  85. [Install]
  86. WantedBy=multi-user.target
  87. EOF
  88. #8.启动kube-proxy服务
  89. mkdir -p /var/lib/kube-proxy && mkdir -p /var/log/kubernetes/kube-proxy
  90. systemctl daemon-reload && systemctl enable kube-proxy && systemctl restart kube-proxy && systemctl status kube-proxy
  91. netstat -lnpt|grep kube-proxy #查看端口
  92. ipvsadm -ln #查看ipvs路由规则
  93. #显示如下
  94. IP Virtual Server version 1.2.1 (size=4096)
  95. Prot LocalAddress:Port Scheduler Flags
  96. -> RemoteAddress:Port Forward Weight ActiveConn InActConn
  97. TCP 10.254.0.1:443 rr
  98. -> 192.168.10.12:6443 Masq 1 0 0
  99. -> 192.168.10.13:6443 Masq 1 0 0
  100. -> 192.168.10.14:6443 Masq 1 0 0
2.8.3、验证集群功能
  1. kubectl get nodes #查看节点状态
  2. # 1、创建nginx 测试文件
  3. cat << EOF > nginx-web.yml
  4. apiVersion: v1
  5. kind: Service
  6. metadata:
  7. name: nginx-web
  8. labels:
  9. tier: frontend
  10. spec:
  11. type: NodePort
  12. selector:
  13. tier: frontend
  14. ports:
  15. - name: http
  16. port: 80
  17. targetPort: 80
  18. ---
  19. apiVersion: extensions/v1beta1
  20. kind: Deployment
  21. metadata:
  22. name: nginx-con
  23. labels:
  24. tier: frontend
  25. spec:
  26. replicas: 3
  27. template:
  28. metadata:
  29. labels:
  30. tier: frontend
  31. spec:
  32. containers:
  33. - name: nginx-pod
  34. image: nginx
  35. ports:
  36. - containerPort: 80
  37. EOF
  38. #2.执行文件
  39. kubectl create -f nginx-web.yml
  40. #显示已创建
  41. service/nginx-web created
  42. deployment.extensions/nginx-con created
  43. #3.查看pod状态
  44. kubectl get pod -o wide
  45. #显示如下
  46. NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
  47. nginx-con-7dc84bdfb6-h6bt6 1/1 Running 0 105s 172.30.85.2 192.168.10.16 <none> <none>
  48. nginx-con-7dc84bdfb6-nt5qs 1/1 Running 0 105s 172.30.34.3 192.168.10.15 <none> <none>
  49. nginx-con-7dc84bdfb6-sfg87 1/1 Running 0 105s 172.30.34.2 192.168.10.15 <none> <none>
  50. #4.测试IP是否ping通
  51. ping -c4 172.30.34.2
  52. PING 172.30.34.2 (172.30.34.2) 56(84) bytes of data.
  53. 64 bytes from 172.30.34.2: icmp_seq=1 ttl=63 time=0.543 ms
  54. 64 bytes from 172.30.34.2: icmp_seq=2 ttl=63 time=0.684 ms
  55. 64 bytes from 172.30.34.2: icmp_seq=3 ttl=63 time=0.886 ms
  56. 64 bytes from 172.30.34.2: icmp_seq=4 ttl=63 time=0.817 ms
  57. #5.查看server集群IP
  58. kubectl get svc #显示如下
  59. NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
  60. kubernetes ClusterIP 10.254.0.1 <none> 443/TCP 37h
  61. nginx-web NodePort 10.254.153.104 <none> 80:31808/TCP 4m19s
  62. # 10.254.153.104是nginx的集群IP,代理前面3个pod,80是集群IP的端口31808是nodeport端口
  63. #6.curl访问node_ip:nodeport
  64. curl -I 192.168.10.15:31808 #状态200表示访问成功
  65. HTTP/1.1 200 OK
  66. Server: nginx/1.17.0
  67. Date: Sat, 29 Jun 2019 05:03:15 GMT
  68. Content-Type: text/html
  69. Content-Length: 612
  70. Last-Modified: Tue, 21 May 2019 14:23:57 GMT
  71. Connection: keep-alive
  72. ETag: "5ce409fd-264"
  73. Accept-Ranges: bytes
  74. #7.在flannel网络主机上访问集群IP
  75. ip add | grep 10.254
  76. inet 10.254.0.1/32 brd 10.254.0.1 scope global kube-ipvs0
  77. inet 10.254.153.104/32 brd 10.254.153.104 scope global kube-ipvs0
  78. curl -I http://10.254.153.104:80 #返回如下
  79. HTTP/1.1 200 OK
  80. Server: nginx/1.17.0
  81. Date: Sat, 29 Jun 2019 05:05:56 GMT
  82. Content-Type: text/html
  83. Content-Length: 612
  84. Last-Modified: Tue, 21 May 2019 14:23:57 GMT
  85. Connection: keep-alive
  86. ETag: "5ce409fd-264"
  87. Accept-Ranges: bytes

2.9、部署集群插件

插件是集群的附件组件,丰富和完善了集群的功能

2.9.1、部署coredns插件
  1. #1.将kubernetes-server-linux-amd64.tar.gz解压后,再解压其中的 kubernetes-src.tar.gz 文件
  2. tar -zxvf kubernetes-src.tar.gz -C src #coredns对应的目录是:cluster/addons/dns
  3. #2.修改配置文件
  4. cd src/cluster/addons/dns/coredns
  5. cp coredns.yaml.base /etc/kubernetes/coredns.yaml
  6. sed -i "s/__PILLAR__DNS__DOMAIN__/cluster.local/g" /etc/kubernetes/coredns.yaml
  7. sed -i "s/__PILLAR__DNS__SERVER__/10.254.0.2/g" /etc/kubernetes/coredns.yaml
  8. #3.创建coredns
  9. kubectl create -f /etc/kubernetes/coredns.yaml
  10. #4.检查codedns功能
  11. kubectl -n kube-system get all -o wide
  12. #显示如下
  13. NAME READY STATUS RESTARTS AGE
  14. pod/coredns-8854569d4-5vshp 1/1 Running 0 58m
  15. #
  16. NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
  17. service/kube-dns ClusterIP 10.254.0.2 <none> 53/UDP,53/TCP,9153/TCP 81m
  18. #
  19. NAME READY UP-TO-DATE AVAILABLE AGE
  20. deployment.apps/coredns 1/1 1 1 58m
  21. #
  22. NAME DESIRED CURRENT READY AGE
  23. replicaset.apps/coredns-8854569d4 1 1 1 58m
  24. #4.1
  25. kubectl -n kube-system describe pod coredns
  26. #4.2
  27. kubectl -n kube-system logs coredns-8854569d4-5vshp
  28. #5.使用容器验证
  29. kubectl run dns-test --rm -it --image=alpine /bin/sh
  30. #进入容器 ping 百度正常
  31. ping www.baidu.com
  32. PING www.baidu.com (182.61.200.6): 56 data bytes
  33. 64 bytes from 182.61.200.6: seq=0 ttl=127 time=41.546 ms
  34. 64 bytes from 182.61.200.6: seq=1 ttl=127 time=35.043 ms
  35. 64 bytes from 182.61.200.6: seq=2 ttl=127 time=38.977 ms
  36. 64 bytes from 182.61.200.6: seq=3 ttl=127 time=40.633 ms
  37. #查看所有集群pod
  38. kubectl get --all-namespaces pods
  39. #6.如果遇到镜像下载不下来,可以修改文件
  40. sed -i "s/k8s.gcr.io/coredns/g" /etc/kubernetes/coredns.yaml
2.9.2、部署dashboard插件

参考
https://github.com/kubernetes/dashboard/wiki/Access-control
https://github.com/kubernetes/dashboard/issues/2558
https://kubernetes.io/docs/concepts/configuration/organize-cluster-access-kubeconfig/

  1. #1.将kubernetes-server-linux-amd64.tar.gz 解压后,再解压其中的 kubernetes-src.tar.gz 文件。dashboard 对应的目录是:cluster/addons/dashboard ,拷贝dashboard的文件
  2. mkdir -p /etc/kubernetes/dashboard
  3. cp -a /opt/kubernetes/src/cluster/addons/dashboard/{dashboard-configmap.yaml,dashboard-controller.yaml,dashboard-rbac.yaml,dashboard-secret.yaml,dashboard-service.yaml} /etc/kubernetes/dashboard
  4. #2.修改配置文件
  5. sed -i "s@image:.*@image: registry.cn-hangzhou.aliyuncs.com/google_containers/kubernetes-dashboard-amd64:v1.10.1@g" /etc/kubernetes/dashboard/dashboard-controller.yaml
  6. sed -i "/spec/a\ type: NodePort" /etc/kubernetes/dashboard/dashboard-service.yaml
  7. sed -i "/targetPort/a\ nodePort: 32700" /etc/kubernetes/dashboard/dashboard-service.yaml
  8. #3.执行所有定义文件
  9. kubectl create -f /etc/kubernetes/dashboard
  10. #4.查看分配的NodePort
  11. kubectl -n kube-system get all -o wide
  12. #
  13. NAME READY STATUS RESTARTS AGE
  14. pod/coredns-8854569d4-5vshp 1/1 Running 0 119m
  15. pod/kubernetes-dashboard-7d5f7c58f5-mr8zn 1/1 Running 0 5m1s
  16. #
  17. NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
  18. service/kube-dns ClusterIP 10.254.0.2 <none> 53/UDP,53/TCP,9153/TCP 142m
  19. service/kubernetes-dashboard NodePort 10.254.63.16 <none> 443:32700/TCP 51s
  20. #
  21. NAME READY UP-TO-DATE AVAILABLE AGE
  22. deployment.apps/coredns 1/1 1 1 119m
  23. deployment.apps/kubernetes-dashboard 1/1 1 1 5m4s
  24. #
  25. NAME DESIRED CURRENT READY AGE
  26. replicaset.apps/coredns-8854569d4 1 1 1 119m
  27. replicaset.apps/kubernetes-dashboard-7d5f7c58f5 1 1 1 5m4s
  28. kubectl -n kube-system describe pod kubernetes-dashboard
  29. #NodePort映射到dasrd pod 443端口;
  30. #dashboard的 --authentication-mode 支持 token、basic,默认为 token。如果使用 basic,则 kube-apiserver 必须配置 '--authorization-mode=ABAC' 和 '--basic-auth-file' 参数。
  31. #5.查看 dashboard 支持的命令行参数
  32. kubectl exec --namespace kube-system -it kubernetes-dashboard-7d5f7c58f5-mr8zn -- /dashboard --help
  33. #6.访问dashboard
  34. # 为了集群安全,从1.7开始,dashboard只允许通过https访问,如果使用kube proxy则必须监听localhost或 127.0.0.1,对于NodePort没有这个限制,但是仅建议在开发环境中使用。对于不满足这些条件的登录访问,在登录成功后浏览器不跳转,始终停在登录界面。
  35. 参考1:https://github.com/kubernetes/dashboard/wiki/Accessing-Dashboard---1.7.X-and-above
  36. 参考2:https://github.com/kubernetes/dashboard/issues/2540
  37. # 三种访问 dashboard 的方式
  38. # 通过NodePort访问dashboard:
  39. # 通过kubectl proxy访问dashboard:
  40. # 通过kube-apiserver访问dashboard;
  41. #7.通过NodePort访问dashboard
  42. # kubernetes-dashboard服务暴露了NodePort,可以使用http://NodeIP:NodePort地址访问dashboard;
  43. #8.通过 kubectl proxy 访问 dashboard
  44. #启动代理:
  45. kubectl proxy --address='localhost' --port=8086 --accept-hosts='^*$'
  46. # --address 必须为 localhost 或 127.0.0.1;
  47. # 需要指定 --accept-hosts 选项,否则浏览器访问 dashboard 页面时提示 “Unauthorized”;
  48. # 浏览器访问 URL:http://127.0.0.1:8086/api/v1/namespaces/kube-system/services/https:kubernetes-dashboard:/proxy
  49. #9.通过 kube-apiserver 访问 dashboard
  50. # 获取集群服务地址列表:
  51. kubectl cluster-info
  52. # 必须通过 kube-apiserver 的安全端口(https)访问 dashbaord,访问时浏览器需要使用自定义证书,否则会被 kube-apiserver 拒绝访问。
  53. # 创建和导入自定义证书的步骤,参考:A.浏览器访问kube-apiserver安全端口
  54. # 浏览器访问 URL:https://192.168.10.100:8443/api/v1/namespaces/kube-system/services/https:kubernetes-dashboard:/proxy
  55. #10.创建登录 Dashboard 的 token 和 kubeconfig 配置文件
  56. # 上面提到,Dashboard 默认只支持 token 认证,所以如果使用 KubeConfig 文件,需要在该文件中指定 token,不支持使用 client 证书认证。
  57. # 创建登录 token,访问 dashboard时使用
  58. kubectl create sa dashboard-admin -n kube-system
  59. kubectl create clusterrolebinding dashboard-admin --clusterrole=cluster-admin --serviceaccount=kube-system:dashboard-admin
  60. ADMIN_SECRET=$(kubectl get secrets -n kube-system | grep dashboard-admin | awk '{print $1}')
  61. DASHBOARD_LOGIN_TOKEN=$(kubectl describe secret -n kube-system ${ADMIN_SECRET} | grep -E '^token' | awk '{print $2}')
  62. echo ${DASHBOARD_LOGIN_TOKEN}
  63. #使用输出的 token 登录 Dashboard。
  64. #创建使用 token 的 KubeConfig 文件
  65. cd /root/ssl
  66. #设置集群参数
  67. kubectl config set-cluster kubernetes \
  68. --certificate-authority=ca.pem \
  69. --embed-certs=true \
  70. --server=https://192.168.10.100:8443 \
  71. --kubeconfig=dashboard.kubeconfig
  72. #设置客户端认证参数,使用上面创建的 Token
  73. kubectl config set-credentials dashboard_user \
  74. --token=${DASHBOARD_LOGIN_TOKEN} \
  75. --kubeconfig=dashboard.kubeconfig
  76. #设置上下文参数
  77. kubectl config set-context default \
  78. --cluster=kubernetes \
  79. --user=dashboard_user \
  80. --kubeconfig=dashboard.kubeconfig
  81. #设置默认上下文
  82. kubectl config use-context default --kubeconfig=dashboard.kubeconfig
  83. #生成的 dashboard.kubeconfig 登录 Dashboard。
  84. #由于缺少 Heapster 插件,当前 dashboard 不能展示 Pod、Nodes 的 CPU、内存等统计数据和图表;
2.9.3、部署heapster插件

Heapster是一个收集者,将每个Node上的cAdvisor的数据进行汇总,然后导到第三方工具(如InfluxDB)。Heapster 是通过调用 kubelet 的 http API 来获取 cAdvisor 的 metrics 数据的。由于 kublet 只在 10250 端口接收 https 请求,故需要修改 heapster 的 deployment 配置。同时,需要赋予 kube-system:heapster ServiceAccount 调用 kubelet API 的权限。

参考:配置 heapster:https://github.com/kubernetes/heapster/blob/master/docs/source-configuration.md

heapster下载地址:https://github.com/kubernetes-retired/heapster/releases

  1. #1.解压heapster
  2. mkdir /opt/heapster
  3. tar -xzvf heapster-1.5.4.tar.gz -C /opt/heapster
  4. #2.修改配置
  5. mkdir -p /etc/kubernetes/heapster
  6. cp -a /opt/heapster/deploy/kube-config/influxdb/{grafana.yaml,heapster.yaml,influxdb.yaml} /etc/kubernetes/heapster
  7. sed -i "s@image:.*@image: registry.cn-hangzhou.aliyuncs.com/google_containers/heapster-grafana-amd64:v4.4.3@g" /etc/kubernetes/heapster/grafana.yaml
  8. sed -i "67a\ type: NodePort" /etc/kubernetes/heapster/grafana.yaml
  9. sed -i "/targetPort/a\ nodePort: 32699" /etc/kubernetes/heapster/grafana.yaml
  10. sed -i "s@image:.*@image: registry.cn-hangzhou.aliyuncs.com/google_containers/heapster-amd64:v1.5.3@g" /etc/kubernetes/heapster/heapster.yaml
  11. # 由于 kubelet 只在 10250 监听 https 请求,故添加相关参数;
  12. sed -i "s@source=.*@source=kubernetes:https://kubernetes.default?kubeletHttps=true\&kubeletPort=10250@g" /etc/kubernetes/heapster/heapster.yaml
  13. sed -i "s@image:.*@image: registry.cn-hangzhou.aliyuncs.com/google_containers/heapster-influxdb-amd64:v1.3.3@g" /etc/kubernetes/heapster/influxdb.yaml
  14. # 将 serviceAccount kube-system:heapster 与 ClusterRole system:kubelet-api-admin 绑定,授予它调用 kubelet API 的权限;
  15. cp -a /opt/heapster/deploy/kube-config/rbac/heapster-rbac.yaml /etc/kubernetes/heapster
  16. cat > /etc/kubernetes/heapster/heapster-rbac.yaml <<EOF
  17. kind: ClusterRoleBinding
  18. apiVersion: rbac.authorization.k8s.io/v1beta1
  19. metadata:
  20. name: heapster-kubelet-api
  21. roleRef:
  22. apiGroup: rbac.authorization.k8s.io
  23. kind: ClusterRole
  24. name: system:kubelet-api-admin
  25. subjects:
  26. - kind: ServiceAccount
  27. name: heapster
  28. namespace: kube-system
  29. EOF
  30. #3.执行所有定义文件
  31. kubectl create -f /etc/kubernetes/heapster
  32. kubectl apply -f /etc/kubernetes/heapster/heapster-rbac.yaml
  33. #4.检查执行结果
  34. kubectl -n kube-system get all -o wide | grep -E 'heapster|monitoring'
  35. kubectl -n kube-system describe pod heapster
  36. kubectl -n kube-system describe pod monitoring
  37. # 检查 kubernets dashboard 界面,可以正确显示各 Nodes、Pods 的 CPU、内存、负载等统计数据和图表:
  38. kubectl -n kube-system get all -o wide
  39. kubectl -n kube-system logs heapster-7bdc95b5cc-8h7zt
  40. #5.访问 grafana,通过 NodePort 访问:
  41. kubectl get svc -n kube-system|grep -E 'monitoring|heapster'
  42. #显示如下,grafana 监听 NodePort 32699;
  43. heapster ClusterIP 10.254.159.62 <none> 80/TCP 12m k8s-app=heapster
  44. monitoring-grafana NodePort 10.254.167.38 <none> 80:32699/TCP 4m29s k8s-app=grafana
  45. monitoring-influxdb ClusterIP 10.254.155.141 <none> 8086/TCP 12m k8s-app=influxdb
  46. kubectl get pod -n kube-system -o wide |grep -E 'monitoring|heapster'
  47. #显示如下,然后浏览器访问 URL:http://192.168.10.16:32699/?orgId=1
  48. heapster-7bdc95b5cc-8h7zt 1/1 Running 0 13m 172.30.34.4 192.168.10.15
  49. monitoring-grafana-6cf5948cd4-rstxk 1/1 Running 0 5m 172.30.85.11 192.168.10.16
  50. monitoring-influxdb-7d6c5fb944-qfd65 1/1 Running 0 13m 172.30.85.10 192.168.10.16
  51. #6.通过 kube-apiserver 访问: 获取 monitoring-grafana 服务 URL:
  52. kubectl cluster-info
  53. #查到浏览器访问URL:https://192.168.10.100:8443/api/v1/namespaces/kube-system/services/monitoring-grafana/proxy
  54. #通过 kubectl proxy 访问:创建代理
  55. kubectl proxy --address='192.168.10.16' --port=8086 --accept-hosts='^*$'
  56. # 浏览器访问 URL:http://192.168.10.16:8086/api/v1/namespaces/kube-system/services/monitoring-grafana/proxy/?orgId=1

转载于:https://www.cnblogs.com/fan-gx/p/11108276.html

声明:本文内容由网友自发贡献,转载请注明出处:【wpsshop】
推荐阅读
相关标签
  

闽ICP备14008679号