1. 软件版本

首先要把centos7系统的内核升级最好4.4以上(默认3.10的内核,运行大规模docker的时候会有bug)

软件/系统版本备注
Centos7.9最小安装版
k8s1.15.1
flannel0.11
etcd3.3.10

2. 角色分配

k8s角色主机名节点IP备注
master1+etcd1master1.host.com10.0.0.70master节点
master2+etcd2master2.host.com10.0.0.71
master3+etcd3master3.host.com10.0.0.72
node1node1.host.com10.0.0.73node节点
node2node2.host.com10.0.0.74
haproxy1+keepalivedhaproxy1.host.com10.0.0.75负载均衡(vip:10.0.0.80)
haproxy2+keepalivedhaproxy2.host.com10.0.0.76

3. 安装流程第1个里程: 初始化工具安装

所有节点都执行

点击查看代码
yum install net-tools vim wget -y

第2个里程: 关闭防火墙与Selinux

所有节点都执行

systemctl stop firewalldsystemctl disable firewalldsed -i "s/SELINUX=enforcing/SELINUX=disabled/g" /etc/selinux/configreboot

第3个里程: 设置时区

所有节点都执行

\cp /usr/share/zoneinfo/Asia/Shanghai /etc/localtime -rf

第4个里程: 关闭交换分区

所有节点都执行

swapoff -ased -i '/ swap / s/^\(.*\)$/#\1/g' /etc/fstab

第5个里程:设置系统时间同步

所有节点都执行

yum install -y ntpdatentpdate -u ntp.aliyun.comecho "*/5 * * * * ntpdate ntp.aliyun.com >/dev/null 2>&1" >> /etc/crontabsystemctl start crond.servicesystemctl enable crond.service

第6个里程: 设置主机名

cat > /etc/hosts <<EOF127.0.0.1   localhost localhost.localdomain localhost4 localhost4.localdomain4::1         localhost localhost.localdomain localhost6 localhost6.localdomain610.0.0.70   master1.host.com10.0.0.71   master2.host.com10.0.0.72   master3.host.com10.0.0.73   node1.host.com10.0.0.74   node2.host.com10.0.0.75   haproxy1.host.com10.0.0.76   haproxy2.host.comEOF

第7个里程: 免密钥登录

master执行脚本即可

yum install sshpass -ycat > scp.sh </dev/null  if [ $? -eq 0 ];then     echo "${node} 秘钥copy完成"  else     echo "${node} 秘钥copy失败"  fidoneEOF

第8个里程:优化内核参数

master和node节点

cat >/etc/sysctl.d/kubernetes.conf <<EOFnet.bridge.bridge-nf-call-iptables=1net.bridge.bridge-nf-call-ip6tables=1net.ipv4.ip_forward=1vm.swappiness=0fs.file-max=52706963fs.nr_open=52706963EOFsysctl -p

第9个里程: 配置高可用安装keepalived安装

2台haproxy都要安装yum install -y keepalived

编写配置文件

haproxy1配置成master

cat >/etc/keepalived/keepalived.conf<<EOF! Configuration File for keepalivedglobal_defs {  router_id KUB_LVS}vrrp_instance VI_1 {    state MASTER    interface eth0    virtual_router_id 66    priority 100    advert_int 1    authentication {        auth_type PASS        auth_pass 1111    }    virtual_ipaddress {        10.0.0.80/24 dev eth0 label eth0:1    }}EOF

haproxy2配置成BACKUP

cat >/etc/keepalived/keepalived.conf<<EOF! Configuration File for keepalivedglobal_defs {  router_id KUB_LVS}vrrp_instance VI_1 {    state BACKUP    interface eth0    virtual_router_id 66    priority 80    advert_int 1    authentication {        auth_type PASS        auth_pass 1111    }    virtual_ipaddress {        10.0.0.80/24 dev eth0 label eth0:1    }}EOF

设置keepalived开机自启动

systemctl start keepalivedsystemctl enable keepalived

安装haproxy(2台节点只需要修改对应的地址即可)安装

1. 上传软件包root@haproxy1:/usr/local/src# lltotal 3792drwxr-xr-x  4 root root      96 Jun  9 14:01 ./drwxr-xr-x 10 root root     140 Jun  9 14:02 ../drwxrwxr-x 13 root root    4096 Jun  9 14:16 haproxy-2.4.4/-rw-r--r--  1 root root 3570069 May 24 10:25 haproxy-2.4.4.tar.gzdrwxr-xr-x  4 1026 ygw       58 Jun 27  2018 lua-5.3.5/-rw-r--r--  1 root root  303543 Nov 16  2020 lua-5.3.5.tar.gz2. 做软连接ln -s /usr/local/src/lua-5.3.5 /usr/local/lualn -s /usr/local/src/haproxy-2.4.4 /usr/local/haproxy3. 编译安装lua1)安装依赖yum install gcc gcc-c++ readline-devel glibc glibc-devel pcre pcre-devel openssl-devel zlib-devel systemd-devel -y cd /usr/local/lua && make linux 查看编译安装的版本/usr/local/lua/src/lua -vLua 5.3.5  Copyright (C) 1994-2018 Lua.org, PUC-Rio4. 编译安装haproxycd /usr/local/src/haproxy-2.4.4 && make TARGET=linux-glibc USE_PCRE=1 USE_OPENSSL=1 USE_ZLIB=1 USE_SYSTEMD=1 USE_LUA=1 LUA_INC=/usr/local/lua/src/ LUA_LIB=/usr/local/lua/src/ && make install PREFIX=/apps/haproxy5. 方便命令的调用ln -s /apps/haproxy/sbin/haproxy /usr/sbin/6. 查看版本haproxy -v

编写haproxy配置文件

mkdir /etc/haproxycat >/etc/haproxy/haproxy.cfg<<EOFglobal    log          127.0.0.1 local2 info    chroot       /apps/haproxy    pidfile      /var/lib/haproxy/haproxy.pid    stats socket /var/lib/haproxy/haproxy.sock mode 600 level admin    maxconn      100000    user         haproxy    group        haproxy    daemondefaults    mode   http    option http-keep-alive    option forwardfor    timeout connect 300000ms    timeout client  300000ms    timeout server  300000ms    maxconn  100000listen stats  mode http  bind 0.0.0.0:9999  stats enable  log global  stats uri   /haproxy-status  stats auth  haadmin:123456listen k8s-api-6443  bind 10.0.0.80:6443  mode tcp  log global  server master1 10.0.0.70:6443 check inter 3000 fall 3 rise 5  server master2 10.0.0.71:6443 check inter 3000 fall 3 rise 5  server master3 10.0.0.72:6443 check inter 3000 fall 3 rise 5EOF

编写haproxy的service文件

cat >/etc/systemd/system/haproxy.service<<EOF[Unit]Description=HAProxy Load BalancerAfter=syslog.target network.target[Service]ExecStartPre=/usr/sbin/haproxy -f /etc/haproxy/haproxy.cfg -c -qExecStart=/usr/sbin/haproxy -Ws -f /etc/haproxy/haproxy.cfg -p /var/lib/haproxy/haproxy.pidExecReload=/bin/kill -USR2 $MAINPID[Install]WantedBy=multi-user.targetEOF

设置用户和目录权限

mkdir /var/lib/haproxyuseradd -r -s /sbin/nologin -d /var/lib/haproxy haproxy

设置haproxy开机自启动

systemctl daemon-reload systemctl start haproxysystemctl enable haproxy

第10个里程: 配置证书下载自签名证书生成工具

Master1上操作

mkdir /soft && cd /softwget https://pkg.cfssl.org/R1.2/cfssl_linux-amd64wget https://pkg.cfssl.org/R1.2/cfssljson_linux-amd64wget https://pkg.cfssl.org/R1.2/cfssl-certinfo_linux-amd64chmod +x cfssl_linux-amd64 cfssljson_linux-amd64 cfssl-certinfo_linux-amd64mv cfssl_linux-amd64 /usr/local/bin/cfsslmv cfssljson_linux-amd64 /usr/local/bin/cfssljsonmv cfssl-certinfo_linux-amd64 /usr/bin/cfssl-certinfo

生成ETCD证书创建目录(master1)

mkdir /root/etcd && cd /root/etcd

证书配置

Master1

cd /root/etcdcat << EOF | tee ca-config.json{  "signing": {    "default": {      "expiry": "876000h"    },    "profiles": {      "www": {         "expiry": "876000h",         "usages": [            "signing",            "key encipherment",            "server auth",            "client auth"        ]      }    }  }}EOF

创建CA证书请求文件

cd /root/etcdcat << EOF | tee ca-csr.json{    "CA":{"expiry":"876000h"},    "CN": "etcd CA",    "key": {        "algo": "rsa",        "size": 2048    },    "names": [        {            "C": "CN",            "L": "Beijing",            "ST": "Beijing"        }    ]}EOF

创建ETCD证书请求文件

可以把所有的master IP 加入到csr文件中(Master1上执行)

cd /root/etcdcat << EOF | tee server-csr.json{    "CN": "etcd",    "hosts": [    "master-1",    "master-2",    "master-3",    "192.168.3.200",    "192.168.3.201",    "192.168.3.202"    ],    "key": {        "algo": "rsa",        "size": 2048    },    "names": [        {            "C": "CN",            "L": "Beijing",            "ST": "Beijing"        }    ]}EOF

生成 ETCD CA 证书和ETCD公私钥(Master-1上执行)

cd /root/etcd/cfssl gencert -initca ca-csr.json | cfssljson -bare ca -[root@master1 etcd]# lltotal 24-rw-r--r-- 1 root root  289 Mar  5 07:40 ca-config.json  #ca 的配置文件-rw-r--r-- 1 root root  956 Mar  5 07:51 ca.csr          #ca 证书生成文件-rw-r--r-- 1 root root  209 Mar  5 07:45 ca-csr.json     #ca 证书请求文件-rw------- 1 root root 1679 Mar  5 07:51 ca-key.pem      #ca 证书key-rw-r--r-- 1 root root 1265 Mar  5 07:51 ca.pem          #ca 证书-rw-r--r-- 1 root root  350 Mar  5 07:48 server-csr.json

生成etcd证书(Master-1)

cd /root/etcd/cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=www server-csr.json | cfssljson -bare server[root@master1 etcd]# lltotal 36-rw-r--r-- 1 root root  289 Mar  5 07:40 ca-config.json-rw-r--r-- 1 root root  956 Mar  5 07:51 ca.csr-rw-r--r-- 1 root root  209 Mar  5 07:45 ca-csr.json-rw------- 1 root root 1679 Mar  5 07:51 ca-key.pem-rw-r--r-- 1 root root 1265 Mar  5 07:51 ca.pem-rw-r--r-- 1 root root 1086 Mar  5 07:54 server.csr-rw-r--r-- 1 root root  350 Mar  5 07:48 server-csr.json-rw------- 1 root root 1679 Mar  5 07:54 server-key.pem   #etcd客户端使用-rw-r--r-- 1 root root 1415 Mar  5 07:54 server.pem

创建 Kubernetes 相关证书

此证书用于Kubernetes节点直接的通信, 与之前的ETCD证书不同. (Master-1)

创建目录(Master-1)

mkdir /root/kubernetes/ && cd /root/kubernetes/

配置ca 文件(Master-1)

cd /root/kubernetes/cat << EOF | tee ca-config.json{  "signing": {    "default": {      "expiry": "876000h"    },    "profiles": {      "kubernetes": {         "expiry": "876000h",         "usages": [            "signing",            "key encipherment",            "server auth",            "client auth"        ]      }    }  }}EOF[root@master1 kubernetes]# lltotal 4-rw-r--r-- 1 root root 296 Mar  5 07:58 ca-config.json

创建ca证书申请文件(Master-1)

cd /root/kubernetes/cat << EOF | tee ca-csr.json{    "CA": { "expiry": "876000h" },    "CN": "kubernetes",    "key": {        "algo": "rsa",        "size": 2048    },    "names": [        {            "C": "CN",            "L": "Beijing",            "ST": "Beijing",            "O": "k8s",            "OU": "System"        }    ]}EOF[root@master1 kubernetes]# lltotal 8-rw-r--r-- 1 root root 296 Mar  5 07:58 ca-config.json-rw-r--r-- 1 root root 264 Mar  5 08:02 ca-csr.json

生成API SERVER证书申请文件(Master-1)

cd /root/kubernetes/cat << EOF | tee server-csr.json{    "CN": "kubernetes",    "hosts": [      "192.168.0.1", # service网段      "127.0.0.1","192.168.0.2",  #将来DNS需要用的地址"10.0.0.70","10.0.0.71","10.0.0.72","10.0.0.73","10.0.0.74","10.0.0.80","master1.host.com","master2.host.com","master3.host.com","node1.host.com","node2.host.com",      "kubernetes",      "kubernetes.default",      "kubernetes.default.svc",      "kubernetes.default.svc.cluster",      "kubernetes.default.svc.cluster.local"    ],    "key": {        "algo": "rsa",        "size": 2048    },    "names": [        {            "C": "CN",            "L": "Beijing",            "ST": "Beijing",            "O": "k8s",            "OU": "System"        }    ]}EOF[root@master1 kubernetes]# lltotal 12-rw-r--r-- 1 root root 296 Mar  5 07:58 ca-config.json-rw-r--r-- 1 root root 264 Mar  5 08:02 ca-csr.json-rw-r--r-- 1 root root 681 Mar  5 08:21 server-csr.json

创建 Kubernetes Proxy 证书申请文件(Master-1)

cd /root/kubernetes/cat << EOF | tee kube-proxy-csr.json{  "CN": "system:kube-proxy",  "hosts": [],  "key": {    "algo": "rsa",    "size": 2048  },  "names": [    {      "C": "CN",      "L": "Beijing",      "ST": "Beijing",      "O": "k8s",      "OU": "System"    }  ]}EOF

生成 kubernetes CA 证书和公私钥

生成ca证书(Master-1)

[root@master1 kubernetes]# pwd/root/kubernetescfssl gencert -initca ca-csr.json | cfssljson -bare ca -[root@master1 kubernetes]# lltotal 28-rw-r--r-- 1 root root  296 Mar  5 07:58 ca-config.json-rw-r--r-- 1 root root 1001 Mar  5 08:23 ca.csr-rw-r--r-- 1 root root  264 Mar  5 08:02 ca-csr.json-rw------- 1 root root 1679 Mar  5 08:23 ca-key.pem-rw-r--r-- 1 root root 1359 Mar  5 08:23 ca.pem-rw-r--r-- 1 root root  230 Mar  5 08:23 kube-proxy-csr.json-rw-r--r-- 1 root root  681 Mar  5 08:21 server-csr.json

生成 api-server 证书(Master-1)

[root@master1 kubernetes]# pwd/root/kubernetescfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes server-csr.json | cfssljson -bare server[root@master1 kubernetes]# lltotal 40-rw-r--r-- 1 root root  296 Mar  5 07:58 ca-config.json-rw-r--r-- 1 root root 1001 Mar  5 08:23 ca.csr-rw-r--r-- 1 root root  264 Mar  5 08:02 ca-csr.json-rw------- 1 root root 1679 Mar  5 08:23 ca-key.pem-rw-r--r-- 1 root root 1359 Mar  5 08:23 ca.pem-rw-r--r-- 1 root root  230 Mar  5 08:23 kube-proxy-csr.json-rw-r--r-- 1 root root 1419 Mar  5 08:25 server.csr-rw-r--r-- 1 root root  681 Mar  5 08:21 server-csr.json-rw------- 1 root root 1679 Mar  5 08:25 server-key.pem-rw-r--r-- 1 root root 1785 Mar  5 08:25 server.pem

生成 kube-proxy 证书(Master-1)

[root@master1 kubernetes]# pwd/root/kubernetescfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes kube-proxy-csr.json | cfssljson -bare kube-proxy[root@master1 kubernetes]# lltotal 52-rw-r--r-- 1 root root  296 Mar  5 07:58 ca-config.json-rw-r--r-- 1 root root 1001 Mar  5 08:23 ca.csr-rw-r--r-- 1 root root  264 Mar  5 08:02 ca-csr.json-rw------- 1 root root 1679 Mar  5 08:23 ca-key.pem-rw-r--r-- 1 root root 1359 Mar  5 08:23 ca.pem-rw-r--r-- 1 root root 1009 Mar  5 08:27 kube-proxy.csr-rw-r--r-- 1 root root  230 Mar  5 08:23 kube-proxy-csr.json-rw------- 1 root root 1679 Mar  5 08:27 kube-proxy-key.pem-rw-r--r-- 1 root root 1403 Mar  5 08:27 kube-proxy.pem-rw-r--r-- 1 root root 1419 Mar  5 08:25 server.csr-rw-r--r-- 1 root root  681 Mar  5 08:21 server-csr.json-rw------- 1 root root 1679 Mar  5 08:25 server-key.pem-rw-r--r-- 1 root root 1785 Mar  5 08:25 server.pem

第11个里程: 部署ETCD下载etcd二进制安装文件(所有master)

mkdir -p /soft && cd /softwget https://github.com/etcd-io/etcd/releases/download/v3.3.10/etcd-v3.3.10-linux-amd64.tar.gztar -xvf etcd-v3.3.10-linux-amd64.tar.gzcd etcd-v3.3.10-linux-amd64/cp etcd etcdctl /usr/local/bin/

编辑etcd配置文件(所有master)

注意修改每个节点的ETCD_NAME
注意修改每个节点的监听地址

mkdir -p /etc/etcd/{cfg,ssl}cat  >/etc/etcd/cfg/etcd.conf<<EOF#[Member]ETCD_NAME="master1.host.com"ETCD_DATA_DIR="/var/lib/etcd/default.etcd"ETCD_LISTEN_PEER_URLS="https://10.0.0.70:2380"ETCD_LISTEN_CLIENT_URLS="https://10.0.0.70:2379,http://10.0.0.70:2390"#[Clustering]ETCD_INITIAL_ADVERTISE_PEER_URLS="https://10.0.0.70:2380"ETCD_ADVERTISE_CLIENT_URLS="https://10.0.0.70:2379"ETCD_INITIAL_CLUSTER="master1.host.com=https://10.0.0.70:2380,master2.host.com=https://10.0.0.71:2380,master3.host.com=https://10.0.0.72:2380"ETCD_INITIAL_CLUSTER_TOKEN="etcd-cluster"ETCD_INITIAL_CLUSTER_STATE="new"EOF
mkdir -p /etc/etcd/{cfg,ssl}cat  >/etc/etcd/cfg/etcd.conf<<EOF#[Member]ETCD_NAME="master2.host.com"ETCD_DATA_DIR="/var/lib/etcd/default.etcd"ETCD_LISTEN_PEER_URLS="https://10.0.0.71:2380"ETCD_LISTEN_CLIENT_URLS="https://10.0.0.71:2379,http://10.0.0.71:2390"#[Clustering]ETCD_INITIAL_ADVERTISE_PEER_URLS="https://10.0.0.71:2380"ETCD_ADVERTISE_CLIENT_URLS="https://10.0.0.71:2379"ETCD_INITIAL_CLUSTER="master1.host.com=https://10.0.0.70:2380,master2.host.com=https://10.0.0.71:2380,master3.host.com=https://10.0.0.72:2380"ETCD_INITIAL_CLUSTER_TOKEN="etcd-cluster"ETCD_INITIAL_CLUSTER_STATE="new"EOF
mkdir -p /etc/etcd/{cfg,ssl}cat  >/etc/etcd/cfg/etcd.conf<<EOF#[Member]ETCD_NAME="master3.host.com"ETCD_DATA_DIR="/var/lib/etcd/default.etcd"ETCD_LISTEN_PEER_URLS="https://10.0.0.72:2380"ETCD_LISTEN_CLIENT_URLS="https://10.0.0.72:2379,http://10.0.0.72:2390"#[Clustering]ETCD_INITIAL_ADVERTISE_PEER_URLS="https://10.0.0.72:2380"ETCD_ADVERTISE_CLIENT_URLS="https://10.0.0.72:2379"ETCD_INITIAL_CLUSTER="master1.host.com=https://10.0.0.70:2380,master2.host.com=https://10.0.0.71:2380,master3.host.com=https://10.0.0.72:2380"ETCD_INITIAL_CLUSTER_TOKEN="etcd-cluster"ETCD_INITIAL_CLUSTER_STATE="new"EOF

创建ETCD的系统启动服务(所有master)

cat > /usr/lib/systemd/system/etcd.service<<EOF[Unit]Description=Etcd ServerAfter=network.targetAfter=network-online.targetWants=network-online.target[Service]Type=notifyEnvironmentFile=/etc/etcd/cfg/etcd.confExecStart=/usr/local/bin/etcd \--name=\${ETCD_NAME} \--data-dir=\${ETCD_DATA_DIR} \--listen-peer-urls=\${ETCD_LISTEN_PEER_URLS} \--listen-client-urls=\${ETCD_LISTEN_CLIENT_URLS},http://127.0.0.1:2379 \--advertise-client-urls=\${ETCD_ADVERTISE_CLIENT_URLS} \--initial-advertise-peer-urls=\${ETCD_INITIAL_ADVERTISE_PEER_URLS} \--initial-cluster=\${ETCD_INITIAL_CLUSTER} \--initial-cluster-token=\${ETCD_INITIAL_CLUSTER_TOKEN} \--initial-cluster-state=new \--cert-file=/etc/etcd/ssl/server.pem \--key-file=/etc/etcd/ssl/server-key.pem \--peer-cert-file=/etc/etcd/ssl/server.pem \--peer-key-file=/etc/etcd/ssl/server-key.pem \--trusted-ca-file=/etc/etcd/ssl/ca.pem \--peer-trusted-ca-file=/etc/etcd/ssl/ca.pemRestart=on-failureLimitNOFILE=65536[Install]WantedBy=multi-user.targetEOF

复制etcd证书到指定目录(master1)

\cp /root/etcd/*pem /etc/etcd/ssl/scp /etc/etcd/ssl/* 10.0.0.71:/etc/etcd/ssl/scp /etc/etcd/ssl/* 10.0.0.72:/etc/etcd/ssl/拷贝到node节点上要现在node上创建相关目录mkdir -p /etc/etcd/{cfg,ssl}scp /etc/etcd/ssl/* 10.0.0.73:/etc/etcd/ssl/scp /etc/etcd/ssl/* 10.0.0.74:/etc/etcd/ssl/

启动etcd (master节点)

systemctl start etcdsystemctl enable etcdsystemctl status etcd启动的时候master1会先挂起

检查etcd 集群是否运行正常

etcdctl --ca-file=/etc/etcd/ssl/ca.pem --cert-file=/etc/etcd/ssl/server.pem --key-file=/etc/etcd/ssl/server-key.pem --endpoints="https://10.0.0.70:2379"  cluster-healthmember 55829f95b702c087 is healthy: got healthy result from https://10.0.0.71:2379member b1f1be65c0a2eb31 is healthy: got healthy result from https://10.0.0.72:2379member b5d8162db028bc4e is healthy: got healthy result from https://10.0.0.70:2379cluster is healthy

第12个里程: 所有node节点安装docker

参考docker安装文档

第13个里程: 创建Docker所需分配POD 网段(任意master节点)

etcdctl --ca-file=/etc/etcd/ssl/ca.pem \--cert-file=/etc/etcd/ssl/server.pem --key-file=/etc/etcd/ssl/server-key.pem \--endpoints="https://10.0.0.70:2379,https://10.0.0.71:2379,https://10.0.0.72:2379" \ set /coreos.com/network/config  \ '{ "Network": "172.17.0.0/16", "Backend": {"Type": "vxlan"}}'

检查

etcdctl \--endpoints=https://10.0.0.70:2379,https://10.0.0.71:2379,https://10.0.0.72:2379 \--ca-file=/etc/etcd/ssl/ca.pem \--cert-file=/etc/etcd/ssl/server.pem \--key-file=/etc/etcd/ssl/server-key.pem \get /coreos.com/network/config

第14个里程: 部署flannel安装

所有节点都要安装

cd /softtar xf flannel-v0.11.0-linux-amd64.tar.gzmv flanneld mk-docker-opts.sh /usr/local/bin/拷贝到其他节点上scp /usr/local/bin/flanneld 10.0.0.71:/usr/local/binscp /usr/local/bin/mk-docker-opts.sh 10.0.0.71:/usr/local/binscp /usr/local/bin/flanneld 10.0.0.72:/usr/local/binscp /usr/local/bin/mk-docker-opts.sh 10.0.0.72:/usr/local/binscp /usr/local/bin/flanneld 10.0.0.73:/usr/local/binscp /usr/local/bin/mk-docker-opts.sh 10.0.0.73:/usr/local/binscp /usr/local/bin/flanneld 10.0.0.74:/usr/local/binscp /usr/local/bin/mk-docker-opts.sh 10.0.0.74:/usr/local/bin

配置flannel

所有k8s节点

mkdir -p /etc/flannelcat > /etc/flannel/flannel.cfg<<EOFFLANNEL_OPTIONS="-etcd-endpoints=https://10.0.0.70:2379,https://10.0.0.71:2379,https://10.0.0.72:2379 -etcd-cafile=/etc/etcd/ssl/ca.pem -etcd-certfile=/etc/etcd/ssl/server.pem  -etcd-keyfile=/etc/etcd/ssl/server-key.pem"EOF

编写flannel启动文件

所有k8s节点

cat > /usr/lib/systemd/system/flanneld.service <<EOF[Unit]Description=Flanneld overlay address etcd agentAfter=network-online.target network.targetBefore=docker.service[Service]Type=notifyEnvironmentFile=/etc/flannel/flannel.cfgExecStart=/usr/local/bin/flanneld --ip-masq \$FLANNEL_OPTIONSExecStartPost=/usr/local/bin/mk-docker-opts.sh -k DOCKER_NETWORK_OPTIONS -d /run/flannel/subnet.envRestart=on-failure[Install]WantedBy=multi-user.targetEOF

启动flannel

所有节点

systemctl daemon-reloadsystemctl start flanneld.servicesystemctl enable flanneld.service

修改docker启动使用flannel的配置

这一步的目的是让docker和flannel运行在同一个网段

systemctl daemon-reloadsystemctl restart docker


检查是否和flannel在同一个网段

可以在任意一台master能否ping通172.17.1.1
Node 节点验证是否可以访问其他节点Docker0

第15个里程: 安装Master 组件

Master端需要安装的组件如下:
kube-apiserver
kube-scheduler
kube-controller-manager

安装Api Server服务下载Kubernetes二进制包(1.15.1)

在master1上解压后传到其它的master上

cd /softtar xvf kubernetes-server-linux-amd64.tar.gz cd kubernetes/server/bin/cp kube-scheduler kube-apiserver kube-controller-manager kubectl /usr/local/bin/

复制执行文件到其他的master节点

scp /usr/local/bin/kube* 10.0.0.71:/usr/local/binscp /usr/local/bin/kube* 10.0.0.72:/usr/local/bin

配置Kubernetes证书

Kubernetes各个组件之间通信需要证书,需要复制到每个master节点(master1)

master1mkdir -p /etc/kubernetes/{cfg,ssl}cp /root/kubernetes/*.pem /etc/kubernetes/ssl/master2mkdir -p /etc/kubernetes/{cfg,ssl}master3mkdir -p /etc/kubernetes/{cfg,ssl}

复制到其他的节点

scp /etc/kubernetes/ssl/* 10.0.0.71:/etc/kubernetes/sslscp /etc/kubernetes/ssl/* 10.0.0.72:/etc/kubernetes/ssl

创建 TLS Bootstrapping Token(这里记录是告诉其作用,实际操作是下面的一步)

TLS bootstrapping 功能就是让 kubelet 先使用一个预定的低权限用户连接到 apiserver,然后向 apiserver 申请证书,kubelet 的证书由 apiserver 动态签署
Token可以是任意的包含128 bit的字符串,可以使用安全的随机数发生器生成

head -c 16 /dev/urandom | od -An -t x | tr -d ' '

编辑Token 文件(所有master)

f89a76f197526a0d4bc2bf9c86e871c3:随机字符串,自定义生成; kubelet-bootstrap:用户名; 10001:UID; system:kubelet-bootstrap:用户组

cat > /etc/kubernetes/cfg/token.csv << EOFf89a76f197526a0d4bc2bf9c86e871c3,kubelet-bootstrap,10001,"system:kubelet-bootstrap"EOF

创建Apiserver配置文件(所有的master节点)

配置文件内容基本相同, 如果有多个节点, 那么需要修改IP地址即可

cat >/etc/kubernetes/cfg/kube-apiserver.cfg <<EOFKUBE_APISERVER_OPTS="--logtostderr=true \--v=4 \--insecure-bind-address=0.0.0.0 \--insecure-port=8080 \--etcd-servers=https://10.0.0.70:2379,https://10.0.0.71:2379,https://10.0.0.72:2379 \--bind-address=0.0.0.0 \--secure-port=6443 \--advertise-address=0.0.0.0 \--allow-privileged=true \--service-cluster-ip-range=192.168.0.0/24 \--enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,ResourceQuota,NodeRestriction \--authorization-mode=RBAC,Node \--enable-bootstrap-token-auth \--token-auth-file=/etc/kubernetes/cfg/token.csv \--service-node-port-range=30000-50000 \--tls-cert-file=/etc/kubernetes/ssl/server.pem  \--tls-private-key-file=/etc/kubernetes/ssl/server-key.pem \--client-ca-file=/etc/kubernetes/ssl/ca.pem \--service-account-key-file=/etc/kubernetes/ssl/ca-key.pem \--etcd-cafile=/etc/etcd/ssl/ca.pem \--etcd-certfile=/etc/etcd/ssl/server.pem \--etcd-keyfile=/etc/etcd/ssl/server-key.pem"EOF#参数说明--logtostderr 启用日志 ---v4                         日志等级--etcd-servers         etcd 集群地址 --etcd-servers=https://192.168.91.200:2379,https://192.168.91.201:2379,https://192.168.91.202:2379--bind-address       监听地址--service-cluster-ip-range=192.168.0.0/24 这个地址段要和前面etcd签发证书的时候的网段是一样的(本次实验用的是192.168.0.0网段)--secure-port https        安全端口 --advertise-address     集群通告地址 --allow-privileged      启用授权 --service-cluster-ip-range Service 虚拟IP地址段 --enable-admission-plugins 准入控制模块 --authorization-mode  认证授权,启用RBAC授权--enable-bootstrap-token-auth                 启用TLS bootstrap功能--token-auth-file         token 文件 --service-node-port-range Service Node类型默认分配端口范围

配置kube-apiserver 启动文件(所有的master节点)

cat >/usr/lib/systemd/system/kube-apiserver.service<<EOF[Unit]Description=Kubernetes API ServerDocumentation=https://github.com/kubernetes/kubernetes[Service]EnvironmentFile=/etc/kubernetes/cfg/kube-apiserver.cfgExecStart=/usr/local/bin/kube-apiserver \$KUBE_APISERVER_OPTSRestart=on-failure[Install]WantedBy=multi-user.targetEOF

启动kube-apiserver服务(所有master)

systemctl daemon-reloadsystemctl start kube-apiserver.servicesystemctl enable kube-apiserver.service

检查

查看加密的端口是否已经启动

[root@master1 ssl]# netstat -anltup | grep 6443tcp6       0      0 :::6443                 :::*                    LISTEN      49473/kube-apiserve tcp6       0      0 ::1:43658               ::1:6443                ESTABLISHED 49473/kube-apiserve tcp6       0      0 ::1:6443                ::1:43658               ESTABLISHED 49473/kube-apiserve 

查看加密的端口是否已经启动(node节点)

[root@node1 ~]# telnet 10.0.0.70 6443Trying 10.0.0.70...Connected to 10.0.0.70.Escape character is '^]'.

第16个里程: 部署kube-scheduler 服务创建kube-scheduler配置文件(所有的master节点)

cat >/etc/kubernetes/cfg/kube-scheduler.cfg<<EOFKUBE_SCHEDULER_OPTS="--logtostderr=true --v=4 --bind-address=0.0.0.0 --master=127.0.0.1:8080 --leader-elect"EOF

创建kube-scheduler 启动文件(所有master)

cat >/usr/lib/systemd/system/kube-scheduler.service<<EOF[Unit]Description=Kubernetes SchedulerDocumentation=https://github.com/kubernetes/kubernetes[Service]EnvironmentFile=/etc/kubernetes/cfg/kube-scheduler.cfgExecStart=/usr/local/bin/kube-scheduler \$KUBE_SCHEDULER_OPTSRestart=on-failure[Install]WantedBy=multi-user.targetEOF

启动kube-scheduler服务(所有的master节点)

systemctl daemon-reloadsystemctl start kube-scheduler.servicesystemctl enable kube-scheduler.service

查看Master节点组件状态(任意一台master)

[root@master3 ssl]# kubectl get csNAME                 STATUS      MESSAGE  ERROR#这里是因为这个服务还没有安装controller-manager   Unhealthy   Get http://127.0.0.1:10252/healthz: dial tcp 127.0.0.1:10252: connect: connection refused   scheduler            Healthy     ok                                                                                          etcd-2               Healthy     {"health":"true"}                                                                           etcd-1               Healthy     {"health":"true"}                                                                           etcd-0               Healthy     {"health":"true"}  

第17个里程: 部署kube-controller-manager创建kube-controller-manager配置文件(所有master节点)

cat >/etc/kubernetes/cfg/kube-controller-manager.cfg<<EOFKUBE_CONTROLLER_MANAGER_OPTS="--logtostderr=true \--v=4 \--master=127.0.0.1:8080 \--leader-elect=true \--address=0.0.0.0 \--service-cluster-ip-range=192.168.0.0/24 \--cluster-name=kubernetes \--cluster-signing-cert-file=/etc/kubernetes/ssl/ca.pem \--cluster-signing-key-file=/etc/kubernetes/ssl/ca-key.pem  \--root-ca-file=/etc/kubernetes/ssl/ca.pem \--service-account-private-key-file=/etc/kubernetes/ssl/ca-key.pem"EOF参数说明--master=127.0.0.1:8080  #指定Master地址--leader-elect      #竞争选举机制产生一个 leader 节点,其它节点为阻塞状态。--service-cluster-ip-range=192.168.0.0/24  这个地址段要和前面etcd签发证书的时候的网段是一样的(本次实验用的是192.168.0.0网段)--service-cluster-ip-range #kubernetes service 指定的IP地址范围。

创建kube-controller-manager 启动文件(所有master节点)

cat  >/usr/lib/systemd/system/kube-controller-manager.service<<EOF[Unit]Description=Kubernetes Controller ManagerDocumentation=https://github.com/kubernetes/kubernetes[Service]EnvironmentFile=/etc/kubernetes/cfg/kube-controller-manager.cfgExecStart=/usr/local/bin/kube-controller-manager \$KUBE_CONTROLLER_MANAGER_OPTSRestart=on-failure[Install]WantedBy=multi-user.targetEOF

启动kube-controller-manager服务(所有master节点)

systemctl daemon-reloadsystemctl start kube-controller-manager.servicesystemctl enable kube-controller-manager.servicesystemctl status kube-controller-manager.service

查看Master 节点组件状态

必须要在各个节点组件正常的情况下, 才去部署Node节点组件

[root@master1 ssl]# kubectl get csNAME                 STATUS    MESSAGE             ERRORscheduler            Healthy   ok                  controller-manager   Healthy   ok                  etcd-1               Healthy   {"health":"true"}   etcd-2               Healthy   {"health":"true"}   etcd-0               Healthy   {"health":"true"} 

第18个里程: 部署Node节点组件部署 kubelet 组件从Master节点复制kubelet和kube-proxy 执行文件到Node

拷贝到node1scp /soft/kubernetes/server/bin/kubelet 10.0.0.73:/usr/local/bin/scp /soft/kubernetes/server/bin/kube-proxy  10.0.0.73:/usr/local/bin/拷贝到node2scp /soft/kubernetes/server/bin/kubelet 10.0.0.74:/usr/local/bin/scp /soft/kubernetes/server/bin/kube-proxy  10.0.0.74:/usr/local/bin/

创建kubelet bootstrap.kubeconfig 文件

Maste1节点

mkdir /root/config ; cd /root/configcat >/root/config/environment.sh<<EOF# 创建kubelet bootstrapping kubeconfigBOOTSTRAP_TOKEN=f89a76f197526a0d4bc2bf9c86e871c3# 这里监听的是vip地址KUBE_APISERVER="https://10.0.0.80:6443"# 设置集群参数kubectl config set-cluster kubernetes \  --certificate-authority=/etc/kubernetes/ssl/ca.pem \  --embed-certs=true \  --server=\${KUBE_APISERVER} \  --kubeconfig=bootstrap.kubeconfig# 设置客户端认证参数kubectl config set-credentials kubelet-bootstrap \  --token=\${BOOTSTRAP_TOKEN} \  --kubeconfig=bootstrap.kubeconfig# 设置上下文参数kubectl config set-context default \  --cluster=kubernetes \  --user=kubelet-bootstrap \  --kubeconfig=bootstrap.kubeconfig# 设置默认上下文kubectl config use-context default --kubeconfig=bootstrap.kubeconfig#通过 bash environment.sh获取 bootstrap.kubeconfig 配置文件。EOF执行脚本[root@master1 config]# pwd/root/configsh /root/config/environment.sh

创建kube-proxy kubeconfig文件(master-1)

cat  >/root/config/env_proxy.sh<<EOF# 创建kube-proxy kubeconfig文件BOOTSTRAP_TOKEN=f89a76f197526a0d4bc2bf9c86e871c3# 这里换成VIP地址KUBE_APISERVER="https://10.0.0.80:6443"kubectl config set-cluster kubernetes \  --certificate-authority=/etc/kubernetes/ssl/ca.pem \  --embed-certs=true \  --server=\${KUBE_APISERVER} \  --kubeconfig=kube-proxy.kubeconfigkubectl config set-credentials kube-proxy \  --client-certificate=/etc/kubernetes/ssl/kube-proxy.pem \  --client-key=/etc/kubernetes/ssl/kube-proxy-key.pem \  --embed-certs=true \  --kubeconfig=kube-proxy.kubeconfigkubectl config set-context default \  --cluster=kubernetes \  --user=kube-proxy \  --kubeconfig=kube-proxy.kubeconfigkubectl config use-context default --kubeconfig=kube-proxy.kubeconfigEOF执行脚本[root@master1 config]# pwd/root/configsh /root/config/env_proxy.sh

复制kubeconfig文件与证书到所有Node节点

将bootstrap kubeconfig kube-proxy.kubeconfig 文件复制到所有Node节点
先在node节点上创建目录

mkdir -p /etc/kubernetes/{cfg,ssl}

复制证书文件ssl(master1)

scp /etc/kubernetes/ssl/* 10.0.0.73:/etc/kubernetes/ssl/scp /etc/kubernetes/ssl/* 10.0.0.74:/etc/kubernetes/ssl/

复制kubeconfig文件(master1)

拷贝到node1scp /root/config/bootstrap.kubeconfig 10.0.0.73:/etc/kubernetes/cfgscp /root/config/kube-proxy.kubeconfig 10.0.0.73:/etc/kubernetes/cfg拷贝到node2scp /root/config/bootstrap.kubeconfig 10.0.0.74:/etc/kubernetes/cfgscp /root/config/kube-proxy.kubeconfig 10.0.0.74:/etc/kubernetes/cfg

创建kubelet参数配置文件

不同的Node节点, 需要修改IP地址 (node节点操作)

cat >/etc/kubernetes/cfg/kubelet.config<<EOFkind: KubeletConfigurationapiVersion: kubelet.config.k8s.io/v1beta1address: 10.0.0.73port: 10250readOnlyPort: 10255cgroupDriver: cgroupfsclusterDNS: ["192.168.0.2"]clusterDomain: cluster.local.failSwapOn: falseauthentication:  anonymous:    enabled: trueEOF
cat >/etc/kubernetes/cfg/kubelet.config<<EOFkind: KubeletConfigurationapiVersion: kubelet.config.k8s.io/v1beta1address: 10.0.0.74port: 10250readOnlyPort: 10255cgroupDriver: cgroupfsclusterDNS: ["192.168.0.2"]clusterDomain: cluster.local.failSwapOn: falseauthentication:  anonymous:    enabled: trueEOF

创建kubelet配置文件

不同的Node节点, 需要修改IP地址

cat >/etc/kubernetes/cfg/kubelet<<EOFKUBELET_OPTS="--logtostderr=true \--v=4 \--hostname-override=10.0.0.73 \--kubeconfig=/etc/kubernetes/cfg/kubelet.kubeconfig \--bootstrap-kubeconfig=/etc/kubernetes/cfg/bootstrap.kubeconfig \--config=/etc/kubernetes/cfg/kubelet.config \--cert-dir=/etc/kubernetes/ssl \--pod-infra-container-image=docker.io/kubernetes/pause:latest"EOF
cat >/etc/kubernetes/cfg/kubelet<<EOFKUBELET_OPTS="--logtostderr=true \--v=4 \--hostname-override=10.0.0.74 \--kubeconfig=/etc/kubernetes/cfg/kubelet.kubeconfig \--bootstrap-kubeconfig=/etc/kubernetes/cfg/bootstrap.kubeconfig \--config=/etc/kubernetes/cfg/kubelet.config \--cert-dir=/etc/kubernetes/ssl \--pod-infra-container-image=docker.io/kubernetes/pause:latest"EOF

创建kubelet系统启动文件(所有node节点)

cat >/usr/lib/systemd/system/kubelet.service<<EOF[Unit]Description=Kubernetes KubeletAfter=docker.serviceRequires=docker.service[Service]EnvironmentFile=/etc/kubernetes/cfg/kubeletExecStart=/usr/local/bin/kubelet \$KUBELET_OPTSRestart=on-failureKillMode=process[Install]WantedBy=multi-user.targetEOF

将kubelet-bootstrap用户绑定到系统集群角色

master1节点操作

kubectl create clusterrolebinding kubelet-bootstrap --clusterrole=system:node-bootstrapper --user=kubelet-bootstrap

启动kubelet服务(所有node节点)

systemctl daemon-reloadsystemctl start kubelet.servicesystemctl enable kubelet.servicesystemctl status kubelet.service

服务端批准与查看CSR请求

Maste1节点操作

[root@master1 ~]# kubectl get csrNAME                                                   AGE   REQUESTOR           CONDITIONnode-csr-I81WpTLkI1GcJL1RN_7AsH2gDtqkRuIGb9Cvkzktg00   23s   kubelet-bootstrap   Pendingnode-csr-vaqrhpHnGVhoa6lFp3ADkZxKtcZLYFEbuXbJ1r9AtrM   13s   kubelet-bootstrap   Pending

批准请求
Master节点操作

kubectl certificate approve node-csr-I81WpTLkI1GcJL1RN_7AsH2gDtqkRuIGb9Cvkzktg00kubectl certificate approve node-csr-vaqrhpHnGVhoa6lFp3ADkZxKtcZLYFEbuXbJ1r9AtrM

查看节点状态

[root@master1 ~]# kubectl get nodesNAME        STATUS   ROLES    AGE   VERSION10.0.0.73   Ready       5s    v1.15.110.0.0.74   Ready       32m   v1.15.1

部署kube-proxy 组件

kube-proxy 运行在所有Node节点上, 监听Apiserver 中 Service 和 Endpoint 的变化情况,创建路由规则来进行服务负载均衡

创建kube-proxy配置文件

注意修改hostname-override地址, 不同的节点则不同

cat >/etc/kubernetes/cfg/kube-proxy<<EOFKUBE_PROXY_OPTS="--logtostderr=true \--v=4 \--metrics-bind-address=0.0.0.0 \--hostname-override=10.0.0.73 \--cluster-cidr=192.168.0.0/24 \--kubeconfig=/etc/kubernetes/cfg/kube-proxy.kubeconfig"EOF参数说明:--hostname-override=10.0.0.73 节点的ip地址--cluster-cidr=192.168.0.0/24 这里用的网段也是给etcd证书签发的时候用到的网段
cat >/etc/kubernetes/cfg/kube-proxy<<EOFKUBE_PROXY_OPTS="--logtostderr=true \--v=4 \--metrics-bind-address=0.0.0.0 \--hostname-override=10.0.0.74 \--cluster-cidr=192.168.0.0/24 \--kubeconfig=/etc/kubernetes/cfg/kube-proxy.kubeconfig"EOF

创建kube-proxy systemd unit 文件(所有node节点)

cat >/usr/lib/systemd/system/kube-proxy.service<<EOF[Unit]Description=Kubernetes ProxyAfter=network.target[Service]EnvironmentFile=/etc/kubernetes/cfg/kube-proxyExecStart=/usr/local/bin/kube-proxy \$KUBE_PROXY_OPTSRestart=on-failure[Install]WantedBy=multi-user.targetEOF

启动kube-proxy 服务

systemctl daemon-reloadsystemctl start kube-proxy.servicesystemctl enable kube-proxy.servicesystemctl status kube-proxy.service

运行Demo项目检测(任意master执行即可)

[root@master1 ~]# kubectl run nginx --image=nginx --replicas=2kubectl run --generator=deployment/apps.v1 is DEPRECATED and will be removed in a future version. Use kubectl run --generator=run-pod/v1 or kubectl create instead.deployment.apps/nginx created[root@master1 ~]# kubectl get pods -ANAMESPACE   NAME                     READY   STATUS              RESTARTS   AGEdefault     nginx-7bb7cd8db5-8b5gs   0/1     ContainerCreating   0          7sdefault     nginx-7bb7cd8db5-dzc5q   0/1     ContainerCreating   0          7s#获取容器IP与运行节点[root@master1 ~]# kubectl get pods -o wideNAME                     READY   STATUS    RESTARTS   AGE   IP            NODE        NOMINATED NODE   READINESS GATESnginx-7bb7cd8db5-8b5gs   1/1     Running   0          56s   172.17.1.2    10.0.0.74              nginx-7bb7cd8db5-dzc5q   1/1     Running   0          56s   172.17.46.2   10.0.0.73              #创建容器svc端口[root@master1 ~]# kubectl expose deployment nginx --port=88 --target-port=80 --type=NodePort# 查看SVC[root@master1 ~]# kubectl get svcNAME         TYPE        CLUSTER-IP      EXTERNAL-IP   PORT(S)        AGEkubernetes   ClusterIP   192.168.0.1             443/TCP        125mnginx        NodePort    192.168.0.147           88:39073/TCP   11s# 访问node节点上的pod[root@master1 ~]# curl http://10.0.0.73:39073[root@master1 ~]# curl http://10.0.0.74:39073

删除demon项目

kubectl delete deployment nginx kubectl delete pods nginxkubectl delete svc -l run=nginx

第19个里程: 部署DNS

把镜像先传到node节点,然后倒入镜像

docker load -i coredns1.0.6.tar

master1上应用yml文件

kubectl apply -f coredns-1.0.6.yml

第20个里程: 部署dashboard部署

下载yml文件以后,修改一下如下地方


访问的时候前面地址加上https:节点ip:50000

创建用户授权

master上执行(任意一个master节点)

kubectl create serviceaccount  dashboard-admin -n kube-systemkubectl create clusterrolebinding dashboard-admin --clusterrole=cluster-admin --serviceaccount=kube-system:dashboard-admin

获取Token

kubectl describe secrets -n kube-system $(kubectl -n kube-system get secret | awk '/dashboard-admin/{print $1}')

第21个里程: 部署Traefik 2.0创建 traefik-crd.yaml

crd资源不区分命名空间

mkdir /root/ingress && cd /root/ingress[root@master1 ingress]# cat traefik-crd.yamlapiVersion: apiextensions.k8s.io/v1beta1kind: CustomResourceDefinitionmetadata:  name: ingressroutes.traefik.containo.usspec:  scope: Namespaced  group: traefik.containo.us  version: v1alpha1  names:    kind: IngressRoute    plural: ingressroutes    singular: ingressroute---apiVersion: apiextensions.k8s.io/v1beta1kind: CustomResourceDefinitionmetadata:  name: ingressroutetcps.traefik.containo.usspec:  scope: Namespaced  group: traefik.containo.us  version: v1alpha1  names:    kind: IngressRouteTCP    plural: ingressroutetcps    singular: ingressroutetcp---apiVersion: apiextensions.k8s.io/v1beta1kind: CustomResourceDefinitionmetadata:  name: middlewares.traefik.containo.usspec:  scope: Namespaced  group: traefik.containo.us  version: v1alpha1  names:    kind: Middleware    plural: middlewares    singular: middleware---apiVersion: apiextensions.k8s.io/v1beta1kind: CustomResourceDefinitionmetadata:  name: tlsoptions.traefik.containo.usspec:  scope: Namespaced  group: traefik.containo.us  version: v1alpha1  names:    kind: TLSOption    plural: tlsoptions    singular: tlsoption创建kubectl apply -f traefik-crd.yaml

检查

[root@master1 ingress]# kubectl get crdNAME                                   CREATED ATingressroutes.traefik.containo.us      2023-03-11T01:29:34Zingressroutetcps.traefik.containo.us   2023-03-11T01:29:34Zmiddlewares.traefik.containo.us        2023-03-11T01:29:34Ztlsoptions.traefik.containo.us         2023-03-11T01:29:34Z

创建Traefik RBAC文件

rbac区分命名空间

[root@master1 ingress]# cat traefik-rbac.yamlapiVersion: v1kind: ServiceAccountmetadata:  namespace: kube-system  name: traefik-ingress-controller---kind: ClusterRoleapiVersion: rbac.authorization.k8s.io/v1beta1metadata:  name: traefik-ingress-controllerrules:  - apiGroups: [""]    resources: ["services","endpoints","secrets"]    verbs: ["get","list","watch"]  - apiGroups: ["extensions"]    resources: ["ingresses"]    verbs: ["get","list","watch"]  - apiGroups: ["extensions"]    resources: ["ingresses/status"]    verbs: ["update"]  - apiGroups: ["traefik.containo.us"]    resources: ["middlewares"]    verbs: ["get","list","watch"]  - apiGroups: ["traefik.containo.us"]    resources: ["ingressroutes"]    verbs: ["get","list","watch"]  - apiGroups: ["traefik.containo.us"]    resources: ["ingressroutetcps"]    verbs: ["get","list","watch"]  - apiGroups: ["traefik.containo.us"]    resources: ["tlsoptions"]    verbs: ["get","list","watch"]---kind: ClusterRoleBindingapiVersion: rbac.authorization.k8s.io/v1beta1metadata:  name: traefik-ingress-controllerroleRef:  apiGroup: rbac.authorization.k8s.io  kind: ClusterRole  name: traefik-ingress-controllersubjects:  - kind: ServiceAccount    name: traefik-ingress-controller    namespace: kube-systemkubectl apply -f traefik-rbac.yaml

创建Traefik ConfigMap

[root@master1 ingress]# cat traefik-config.yamlkind: ConfigMapapiVersion: v1metadata:  name: traefik-config  namespace: kube-systemdata:  traefik.yaml: |-    serversTransport:      insecureSkipVerify: true    api:      insecure: true      dashboard: true      debug: true    metrics:      prometheus: ""    entryPoints:      web:        address: ":80"      websecure:        address: ":443"    providers:      kubernetesCRD: ""    log:      filePath: ""      level: error      format: json    accessLog:      filePath: ""      format: json      bufferingSize: 0      filters:        retryAttempts: true        minDuration: 20      fields:        defaultMode: keep        names:          ClientUsername: drop        headers:          defaultMode: keep          names:            User-Agent: redact            Authorization: drop            Content-Type: keep创建kubectl apply -f traefik-config.yaml检查[root@master1 ingress]# kubectl get configmap -n kube-systemNAME                                 DATA   AGEcoredns                              1      14hextension-apiserver-authentication   1      15hkubernetes-dashboard-settings        1      13htraefik-config                       1      4s

节点设置label标签

由于是 Kubernetes DeamonSet 这种方式部署 Traefik,所以需要提前给节点设置 Label,这样当程序部署时 Pod 会自动调度到设置 Label 的节点上

kubectl label nodes 192.168.3.203 IngressProxy=true

查看标签是否成功

[root@master1 ingress]# kubectl get nodes --show-labelsNAME            STATUS   ROLES    AGE   VERSION   LABELS192.168.3.203   Ready       13h   v1.15.1   IngressProxy=true,beta.kubernetes.io/arch=amd64,beta.kubernetes.io/os=linux,kubernetes.io/arch=amd64,kubernetes.io/hostname=192.168.3.203,kubernetes.io/os=linux192.168.3.204   Ready       13h   v1.15.1   beta.kubernetes.io/arch=amd64,beta.kubernetes.io/os=linux,kubernetes.io/arch=amd64,kubernetes.io/hostname=192.168.3.204,kubernetes.io/os=linux

注意每个Node节点的80与443端口不能被占用

netstat -antupl | grep -E "80|443"

创建 traefik 部署文件

上面打了节点标签,它只会到到指定的节点上面,即使是用的daemonset

[root@master1 ingress]# cat traefik-deploy.yamlapiVersion: v1kind: Servicemetadata:  name: traefik  namespace: kube-systemspec:  ports:    - name: web      port: 80    - name: websecure      port: 443    - name: admin      port: 8080  selector:    app: traefik---apiVersion: apps/v1kind: DaemonSetmetadata:  name: traefik-ingress-controller  namespace: kube-system  labels:    app: traefikspec:  selector:    matchLabels:      app: traefik  template:    metadata:      name: traefik      labels:        app: traefik    spec:      serviceAccountName: traefik-ingress-controller      terminationGracePeriodSeconds: 1      containers:        - image: traefik:2.0.5          name: traefik-ingress-lb          ports:            - name: web              containerPort: 80              hostPort: 80             - name: websecure              containerPort: 443              hostPort: 443            - name: admin              containerPort: 8080          resources:            limits:              cpu: 2000m              memory: 1024Mi            requests:              cpu: 1000m              memory: 1024Mi          securityContext:            capabilities:              drop:                - ALL              add:                - NET_BIND_SERVICE          args:            - --configfile=/config/traefik.yaml          volumeMounts:            - mountPath: "/config"              name: "config"      volumes:        - name: config          configMap:            name: traefik-config       tolerations:         - operator: "Exists"      nodeSelector:         IngressProxy: "true"创建kubectl apply -f traefik-deploy.yaml

查看运行状态

[root@master1 ingress]# kubectl get DaemonSet -ANAMESPACE     NAME                         DESIRED   CURRENT   READY   UP-TO-DATE   AVAILABLE   NODE SELECTOR       AGEkube-system   traefik-ingress-controller   1         1         0       1            0           IngressProxy=true   18s

Traefik 路由配置

vim traefik-dashboard-route.yamlapiVersion: traefik.containo.us/v1alpha1kind: IngressRoutemetadata:  name: traefik-dashboard-route  namespace: kube-systemspec:  entryPoints:    - web  routes:    - match: Host(`ingress.abcd.com`)      kind: Rule      services:        - name: traefik          port: 8080创建kubectl apply -f traefik-dashboard-route.yaml检查[root@master1 ingress]# kubectl get ingressroute.traefik.containo.us -ANAMESPACE     NAME                      AGEkube-system   traefik-dashboard-route   78s

客户端访问Traefik Dashboard

绑定物理主机Hosts文件或者域名解析/etc/hosts192.168.3.203 ingress.abcd.com访问web