部署kube-proxy
集群规划
主机名 角色 ip
HDSS7-21.host.com kube-proxy 192.168.153.21
HDSS7-22.host.com kube-proxy 192.168.153.22
签发生成证书签名请求(CSR)的JSON配置文件
(vi /opt/certs/kube-proxy-csr.json)
{
"CN": "system:kube-proxy",
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"ST": "beijing",
"L": "beijing",
"O": "od",
"OU": "ops"
}
]
}
生成证书
[root@hdss-1-200 certs]# vim kube-proxy-csr.json
[root@hdss-1-200 certs]# cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=client kube-proxy-csr.json |cfssl-json -bare kube-proxy-client
2021/02/11 12:13:09 [INFO] generate received request
2021/02/11 12:13:09 [INFO] received CSR
2021/02/11 12:13:09 [INFO] generating key: rsa-2048
2021/02/11 12:13:09 [INFO] encoded CSR
2021/02/11 12:13:09 [INFO] signed certificate with serial number 519193978325678435097579671289246901920040072291
2021/02/11 12:13:09 [WARNING] This certificate lacks a "hosts" field. This makes it unsuitable for
websites. For more information see the Baseline Requirements for the Issuance and Management
of Publicly-Trusted Certificates, v.1.1.6, from the CA/Browser Forum (https://cabforum.org);
specifically, section 10.2.3 ("Information Requirements").
分发证书,将证书拷贝到21节点上,22节点类似,注意私钥文件属性600
[root@hdss-1-21 cert]# pwd
/opt/kubernetes/server/bin/cert
[root@hdss-1-21 cert]# scp hdss-1-200:/opt/certs/kube-proxy-client-key.pem .
root@hdss-1-200's password:
kube-proxy-client-key.pem 100% 1679 1.3MB/s 00:00
[root@hdss-1-21 cert]# scp hdss-1-200:/opt/certs/kube-proxy-client.pem .
root@hdss-1-200's password:
kube-proxy-client.pem 100% 1375 878.3KB/s 00:00
[root@hdss-1-21 cert]#
分发证书在conf文件夹下创建配置(四步走)
[root@hdss-1-21 bin]# cd conf/
[root@hdss-1-21 conf]# ls
audit.yaml k8s-node.yaml kubelet.kubeconfig kubelet.Rubeconfig
[root@hdss-1-21 conf]# kubectl config set-cluster myk8s \
> --certificate-authority=/opt/kubernetes/server/bin/cert/ca.pem \
> --embed-certs=true \
> --server=https://192.168.1.10:7443 \
> --kubeconfig=kube-proxy.kubeconfig
Cluster "myk8s" set.
[root@hdss-1-21 conf]# kubectl config set-credentials kube-proxy \
> --client-certificate=/opt/kubernetes/server/bin/cert/kube-proxy-client.pem \
> --client-key=/opt/kubernetes/server/bin/cert/kube-proxy-client-key.pem \
> --embed-certs=true \
> --kubeconfig=kube-proxy.kubeconfig
User "kube-proxy" set.
[root@hdss-1-21 conf]# kubectl config set-context myk8s-context \
> --cluster=myk8s \
> --user=kube-proxy \
> --kubeconfig=kube-proxy.kubeconfig
Context "myk8s-context" created.
[root@hdss-1-21 conf]# kubectl config use-context myk8s-context --kubeconfig=kube-proxy.kubeconfig
Switched to context "myk8s-context".
[root@hdss-1-21 conf]#
第一台node节点部署完成后,将生成的配置文件(kube-proxy.kubeconfig)拷贝至另外一个Node节点
[root@hdss-1-22 cert]# cd /opt/kubernetes/server/bin/conf
[root@hdss-1-22 conf]# scp hdss-1-21:/opt/kubernetes/server/bin/conf/kube-proxy.kubeconfig .
root@hdss-1-21's password:
kube-proxy.kubeconfig 100% 6222 3.1MB/s 00:00
[root@hdss-1-22 conf]#
加载ipvs
[root@hdss-1-21 ~]# lsmod|grep ip_vs
[root@hdss-1-21 ~]# vi ipvs.sh
#!/bin/bash
ipvs_mods_dir="/usr/lib/modules/$(uname -r)/kernel/net/netfilter/ipvs"
for i in $(ls $ipvs_mods_dir|grep -o "^[^.]*")
do
/sbin/modinfo -F filename $i &>/dev/null
if [ $? -eq 0 ];then
/sbin/modprobe $i
fi
done
赋权启动
[root@hdss-1-21 ~]# chmod u+x ipvs.sh
[root@hdss-1-21 ~]# ./ipvs.sh
[root@hdss-1-21 ~]# lsmod|grep ip_vs
ip_vs_wrr 12697 0
ip_vs_wlc 12519 0
ip_vs_sh 12688 0
ip_vs_sed 12519 0
ip_vs_rr 12600 0
ip_vs_pe_sip 12740 0
nf_conntrack_sip 33780 1 ip_vs_pe_sip
ip_vs_nq 12516 0
ip_vs_lc 12516 0
ip_vs_lblcr 12922 0
ip_vs_lblc 12819 0
ip_vs_ftp 13079 0
ip_vs_dh 12688 0
ip_vs 145458 25 ip_vs_dh,ip_vs_lc,ip_vs_nq,ip_vs_rr,ip_vs_sh,ip_vs_ftp,ip_vs_sed,ip_vs_wlc,ip_vs_wrr,ip_vs_pe_sip,ip_vs_lblcr,ip_vs_lblc
nf_nat 26583 3 ip_vs_ftp,nf_nat_ipv4,nf_nat_masquerade_ipv4
nf_conntrack 139264 8 ip_vs,nf_nat,nf_nat_ipv4,xt_conntrack,nf_nat_masquerade_ipv4,nf_conntrack_netlink,nf_conntrack_sip,nf_conntrack_ipv4
libcrc32c 12644 4 xfs,ip_vs,nf_nat,nf_conntrack
[root@hdss-1-21 ~]#
创建启动脚本
[root@hdss-1-21 ~]# vi /opt/kubernetes/server/bin/kube-proxy.sh
#!/bin/sh
./kube-proxy \
--cluster-cidr 172.7.0.0/16 \
--hostname-override hdss-1-21.host.com \
--proxy-mode=ipvs \
--ipvs-scheduler=nq \
--kubeconfig ./conf/kube-proxy.kubeconfig
创建配置目录和赋予执行权限
[root@hdss-1-21 ~]# chmod +x /opt/kubernetes/server/bin/kube-proxy.sh
[root@hdss-1-21 ~]# mkdir -p /data/logs/kubernetes/kube-proxy
[root@hdss-1-21 ~]#
编辑后台启动文件
vi /etc/supervisord.d/kube-proxy.ini
[program:kube-proxy-1-21]
command=/opt/kubernetes/server/bin/kube-proxy.sh ; the program (relative uses PATH, can take args)
numprocs=1 ; number of processes copies to start (def 1)
directory=/opt/kubernetes/server/bin ; directory to cwd to before exec (def no cwd)
autostart=true ; start at supervisord start (default: true)
autorestart=true ; retstart at unexpected quit (default: true)
startsecs=30 ; number of secs prog must stay running (def. 1)
startretries=3 ; max # of serial start failures (default 3)
exitcodes=0,2 ; 'expected' exit codes for process (default 0,2)
stopsignal=QUIT ; signal used to kill process (default TERM)
stopwaitsecs=10 ; max num secs to wait b4 SIGKILL (default 10)
user=root ; setuid to this UNIX account to run the program
redirect_stderr=true ; redirect proc stderr to stdout (default false)
stdout_logfile=/data/logs/kubernetes/kube-proxy/proxy.stdout.log ; stderr log path, NONE for none; default AUTO
stdout_logfile_maxbytes=64MB ; max # logfile bytes b4 rotation (default 50MB)
stdout_logfile_backups=4 ; # of stdout logfile backups (default 10)
stdout_capture_maxbytes=1MB ; number of bytes in 'capturemode' (default 0)
stdout_events_enabled=false ; emit events on stdout writes (default false)
启动
[root@hdss-1-21 ~]# supervisorctl update
kube-proxy-1-21: added process group
[root@hdss-1-21 ~]# supervisorctl status
etcd-server-1-21 RUNNING pid 18457, uptime 0:38:37
kube-apiserver-1-21 RUNNING pid 18451, uptime 0:38:37
kube-controller-manager-1-22 RUNNING pid 18452, uptime 0:38:37
kube-kubelet-1-21 RUNNING pid 18455, uptime 0:38:37
kube-proxy-1-21 RUNNING pid 26432, uptime 0:00:54
kube-scheduler-1-21 RUNNING pid 18453, uptime 0:38:37
查看端口
[root@hdss-1-21 ~]# netstat -luntp |grep kube-proxy
tcp 0 0 127.0.0.1:10249 0.0.0.0:* LISTEN 26433/./kube-proxy
tcp6 0 0 :::10256 :::* LISTEN 26433/./kube-proxy
[root@hdss-1-21 ~]#
查看ipvs是否生效
yum install -y ipvsadm # 只安装,不启动
[root@hdss-1-21 bin]# yum install -y ipvsadm
[root@hdss-1-21 bin]# ipvsadm -Ln
IP Virtual Server version 1.2.1 (size=4096)
Prot LocalAddress:Port Scheduler Flags
-> RemoteAddress:Port Forward Weight ActiveConn InActConn
TCP 10.254.0.1:443 nq
-> 192.168.1.21:6443 Masq 1 0 0
-> 192.168.1.22:6443 Masq 1 0 0
[root@hdss-1-21 bin]# kubectl get svc
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
kubernetes ClusterIP 10.254.0.1 <none> 443/TCP 6h53m
[root@hdss-1-21 bin]#
查看集群状况
[root@hdss-1-21 ~]# kubectl get cs
NAME STATUS MESSAGE ERROR
etcd-0 Healthy {"health": "true"}
etcd-2 Healthy {"health": "true"}
etcd-1 Healthy {"health": "true"}
scheduler Healthy ok
controller-manager Healthy ok
[root@hdss-1-21 ~]# kubectl get node
NAME STATUS ROLES AGE VERSION
hdss-1-21.host.com Ready master,node 60m v1.15.2
hdss-1-22.host.com Ready master,node 61m v1.15.2