记录日常工作关于系统运维,虚拟化云计算,数据库,网络安全等各方面问题。

一键快速部署Kubernetes高可用集群



三个主节点、三个Node节点、两个用于负载平衡的节点以及一个虚拟IP地址。此示例中的虚拟IP地址也称为浮动IP地址。
也就是说,即使节点发生故障,也可以在节点之间交换IP地址,实现故障切换,实现高可用性。


由于资源限制服务器进行了复用,如下:


1
初始化操作


所有机器操作如下:


修改主机名

hostnamectl set-hostname k8s-master01hostnamectl set-hostname k8s-master02hostnamectl set-hostname k8s-master03hostnamectl set-hostname k8s-node01hostnamectl set-hostname k8s-node02
[root@k8s-master01 ~]# MasterNodes='k8s-master01 k8s-master02 k8s-master03'[root@k8s-master01 ~]# WorkNodes='k8s-node01 k8s-node02'[root@k8s-master01 ~]# for NODE in $MasterNodes; do ssh-copy-id $NODE ;done[root@k8s-master01 ~]# for NODE in $WorkNodes; do ssh-copy-id $NODE ;done

执行初始化脚本

[root@k8s-master01 ~]# vim init.sh#!/bin/shecho "192.168.102.71 k8s-master01" >> /etc/hostsecho "192.168.102.72 k8s-master02" >> /etc/hostsecho "192.168.102.73 k8s-master03" >> /etc/hostsecho "192.168.102.74 k8s-node01" >> /etc/hostsecho "192.168.102.75 k8s-node02" >> /etc/hostssystemctl stop firewalldsystemctl disable firewalldswapoff -ased -i 's/SELINUX=enforcing/SELINUX=disabled/g' /etc/selinux/configsetenforce 0sed -ri 's/.*swap.*/#&/' /etc/fstabcurl -o /etc/yum.repos.d/CentOS-Base.repo https://mirrors.aliyun.com/repo/Centos-7.repoyum install -y yum-utils device-mapper-persistent-data lvm2 epel-release install ipvsadm ipset sysstat conntrack libseccomp  socat  git conntrack ebtables ipsetyum install -y ntpln -sf /usr/share/zoneinfo/Asia/Shanghai /etc/localtimeecho 'Asia/Shanghai' > /etc/timezonentpdate time2.aliyun.comecho "*/1 * * * * ntpdate time2.aliyun.com" >> /etc/crontab yum-config-manager --add-repo  https://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repoyum install -y docker-ce-20.10.7 docker-ce-cli-20.10.7 containerd.io-1.4.6mkdir -p /opt/docker &&  mkdir /etc/dockercat > /etc/docker/daemon.json  <<-EOF{  "exec-opts": ["native.cgroupdriver=systemd"],  "log-driver": "json-file",  "log-opts": {    "max-size": "100m"  },  "storage-driver": "overlay2",  "storage-opts": [    "overlay2.override_kernel_check=true"  ],  "registry-mirrors": ["https://7uuu3esz.mirror.aliyuncs.com","https://moefhjht.mirror.aliyuncs.com"],  "data-root": "/opt/docker"}EOF
systemctl daemon-reload && systemctl enable --now docker

[root@k8s-master01 ~]# Nodes='k8s-master02 k8s-master03 k8s-node01 k8s-node02'[root@k8s-master01 ~]# for node in $Nodes;do scp init.sh $node:/root/ ;done[root@k8s-master01 ~]# Nodes='k8s-master01 k8s-master02 k8s-master03 k8s-node01 k8s-node02'[root@k8s-master01 ~]# for node in $Nodes;do ssh $node 'sh /root/init.sh' ;done

所有节点内核升级

[root@k8s-master01 ~]# vim kernel.sh#!/bin/shrpm --import https://www.elrepo.org/RPM-GPG-KEY-elrepo.orgrpm -Uvh http://www.elrepo.org/elrepo-release-7.0-2.el7.elrepo.noarch.rpmyum --enablerepo=elrepo-kernel install kernel-ml kernel-ml-devel -y
[root@k8s-master01 ~]# Nodes='k8s-master02 k8s-master03 k8s-node01 k8s-node02'[root@k8s-master01 ~]# for node in $Nodes;do scp kernel.sh $node:/root/ ;done[root@k8s-master01 ~]# Nodes='k8s-master01 k8s-master02 k8s-master03 k8s-node01 k8s-node02'[root@k8s-master01 ~]# for node in $Nodes;do ssh $node 'sh /root/kernel.sh' ;done

2
 Node节点安装高可用 keepalived、haproxy


K8s-node01 192.168.102.74 操作下

[root@k8s-node01 ~]#  yum install keepalived haproxy psmisc -y[root@k8s-node01 ~]# cat /etc/haproxy/haproxy.cfgglobal    log /dev/log  local0 warning    chroot      /var/lib/haproxy    pidfile     /var/run/haproxy.pid    maxconn     4000    user        haproxy    group       haproxy    daemon   stats socket /var/lib/haproxy/statsdefaults  log global  option  httplog  option  dontlognull        timeout connect 5000        timeout client 50000        timeout server 50000frontend kube-apiserver  bind *:6443  mode tcp  option tcplog  default_backend kube-apiserverbackend kube-apiserver    mode tcp    option tcplog    option tcp-check    balance roundrobin    default-server inter 10s downinter 5s rise 2 fall 2 slowstart 60s maxconn 250 maxqueue 256 weight 100    server kube-apiserver-1 192.168.102.71:6443 check # Replace the IP address    server kube-apiserver-2 192.168.102.72:6443 check # Replace the IP address    server kube-apiserver-3 192.168.102.73:6443 check # Replace the IP address             [root@k8s-node01 ~]# systemctl restart haproxy[root@k8s-node01 ~]# systemctl enable haproxy
#配置keepalived[root@k8s-node01 ~]# vim /etc/keepalived/keepalived.conf global_defs { notification_email { } router_id LVS_DEVEL vrrp_skip_check_adv_addr vrrp_garp_interval 0 vrrp_gna_interval 0}vrrp_script chk_haproxy { script "killall -0 haproxy" interval 2 weight -30}vrrp_instance haproxy-vip { state MASTER priority 100 interface eth0 # 网卡设备名 virtual_router_id 60 advert_int 1 authentication { auth_type PASS auth_pass 1111 } unicast_src_ip 192.168.102.74 # node01IP unicast_peer { 192.168.102.75 # node02IP } virtual_ipaddress { 192.168.102.77/24 # The VIP address } track_script { chk_haproxy }}
[root@k8s-node01 ~]# systemctl restart keepalived[root@k8s-node01 ~]# systemctl enable keepalived


K8s-node02 192.168.102.74 操作下

[root@k8s-node02 ~]#  yum install keepalived haproxy psmisc -y[root@k8s-node02 ~]# cat /etc/haproxy/haproxy.cfgglobal    log /dev/log  local0 warning    chroot      /var/lib/haproxy    pidfile     /var/run/haproxy.pid    maxconn     4000    user        haproxy    group       haproxy    daemon   stats socket /var/lib/haproxy/statsdefaults  log global  option  httplog  option  dontlognull        timeout connect 5000        timeout client 50000        timeout server 50000frontend kube-apiserver  bind *:6443  mode tcp  option tcplog  default_backend kube-apiserverbackend kube-apiserver    mode tcp    option tcplog    option tcp-check    balance roundrobin    default-server inter 10s downinter 5s rise 2 fall 2 slowstart 60s maxconn 250 maxqueue 256 weight 100    server kube-apiserver-1 192.168.102.71:6443 check # Replace the IP address    server kube-apiserver-2 192.168.102.72:6443 check # Replace the IP address    server kube-apiserver-3 192.168.102.73:6443 check # Replace the IP address             [root@k8s-node02 ~]# systemctl restart haproxy[root@k8s-node02 ~]# systemctl enable haproxy
[root@k8s-node02 ~]# cat /etc/keepalived/keepalived.conf global_defs { notification_email { } router_id LVS_DEVEL vrrp_skip_check_adv_addr vrrp_garp_interval 0 vrrp_gna_interval 0}vrrp_script chk_haproxy { script "killall -0 haproxy" interval 2 weight -30}vrrp_instance haproxy-vip { state BACKUP priority 90 interface eth0 # 网卡设备名 virtual_router_id 60 advert_int 1 authentication { auth_type PASS auth_pass 1111 } unicast_src_ip 192.168.102.75 # node02IP unicast_peer { 192.168.102.74 # node01IP } virtual_ipaddress { 192.168.102.77/24 # The VIP address } track_script { chk_haproxy }}[root@k8s-node02 ~]# systemctl restart keepalived[root@k8s-node02 ~]# systemctl restart keepalived


检查VIP是否生成

[root@k8s-node01 ~]# ip a1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1000    link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00    inet 127.0.0.1/8 scope host lo       valid_lft forever preferred_lft forever2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc mq state UP group default qlen 1000    link/ether 00:50:56:bb:85:de brd ff:ff:ff:ff:ff:ff    inet 192.168.102.74/24 brd 192.168.102.255 scope global noprefixroute eth0       valid_lft forever preferred_lft forever    inet 192.168.102.77/24 scope global secondary eth0       valid_lft forever preferred_lft forever3: docker0: <NO-CARRIER,BROADCAST,MULTICAST,UP> mtu 1500 qdisc noqueue state DOWN group default     link/ether 02:42:f4:99:5a:28 brd ff:ff:ff:ff:ff:ff    inet 172.17.0.1/16 brd 172.17.255.255 scope global docker0       valid_lft forever preferred_lft forever[root@k8s-node01 ~]# ping 192.168.102.77PING 192.168.102.77 (192.168.102.77) 56(84) bytes of data.64 bytes from 192.168.102.77: icmp_seq=1 ttl=64 time=0.051 ms64 bytes from 192.168.102.77: icmp_seq=2 ttl=64 time=0.043 ms


3
 K8s-master01执行一键部署


KubeKey(由 Go 语言开发)是一种全新的安装工具,替代了以前使用的基于 ansible 的安装程序。KubeKey 为您提供灵活的安装选择,您可以一条命令安装 Kubernetes集群。(底层基于kubeadm方式)


下载一键安装工具并配置

[root@k8s-master01 ~]# export KKZONE=cn[root@k8s-master01 ~]# curl -sfL https://get-kk.kubesphere.io | sh -#生成安装集群的配置清单[root@k8s-master KubeKey]# ./kk create config --with-kubernetes v1.21.5 -f k8s-cluster.yaml
#修改如下
apiVersion: kubekey.kubesphere.io/v1alpha2kind: Clustermetadata: name: samplespec: hosts: - {name: k8s-master01, address: 192.168.102.71, internalAddress: 192.168.102.71, user: root, password: "123.com"} - {name: k8s-master02, address: 192.168.102.72, internalAddress: 192.168.102.72, user: root, password: "123.com"} - {name: k8s-master03, address: 192.168.102.73, internalAddress: 192.168.102.73, user: root, password: "123.com"} - {name: k8s-node01, address: 192.168.102.74, internalAddress: 192.168.102.74, user: root, password: "123.com"} - {name: k8s-node02, address: 192.168.102.75, internalAddress: 192.168.102.75, user: root, password: "123.com"} roleGroups: etcd: - k8s-master01 - k8s-master02 - k8s-master03 control-plane: - k8s-master01 - k8s-master02 - k8s-master03 worker: - k8s-node01 - k8s-node02 controlPlaneEndpoint: ## Internal loadbalancer for apiservers # internalLoadbalancer: haproxy
domain: lb.kubesphere.local address: "192.168.102.77" #添加你的VIP port: 6443 kubernetes: version: v1.21.5 clusterName: cluster.local autoRenewCerts: true containerManager: docker etcd: type: kubekey network: plugin: calico kubePodsCIDR: 10.233.64.0/18 kubeServiceCIDR: 10.233.0.0/18 ## multus support. https://github.com/k8snetworkplumbingwg/multus-cni multusCNI: enabled: false registry: privateRegistry: "" namespaceOverride: "" registryMirrors: [] insecureRegistries: [] addons: [] [root@k8s-master01 ~]# ./kk create cluster -f k8s-cluster.yaml

4
 安装完查看集群状态


[root@k8s-master01 ~]# kubectl get node NAME           STATUS   ROLES                  AGE   VERSIONk8s-master01   Ready    control-plane,master   22m   v1.21.5k8s-master02   Ready    control-plane,master   21m   v1.21.5k8s-master03   Ready    control-plane,master   21m   v1.21.5k8s-node01     Ready    worker                 21m   v1.21.5k8s-node02     Ready    worker                 21m   v1.21.5

[root@k8s-master01 .kube]# cat /root/.kube/config apiVersion: v1clusters:- cluster: ............ server: https://lb.kubesphere.local:6443 #次地址在hosts中做了映射就是我们的VIP name: cluster.local
[root@k8s-master01 .kube]# cat /etc/hosts.......192.168.102.77 lb.kubesphere.local


转载请标明出处【一键快速部署Kubernetes高可用集群】。

《www.micoder.cc》 虚拟化云计算,系统运维,安全技术服务.

网站已经关闭评论