Administrator
Published on 2022-03-03 / 122 Visits
0
0

K8S搭建01-K8s集群

※1、基础机器内核检查升级
检查内核版本: uname -r

#上传kernel-4.9.220-37.el7.x86_64.rpm 更新包直接执行

  rpm -Uvh kernel-4.9.220-37.el7.x86_64.rpm
  grub2-set-default CentOS Linux (4.9.220-37.el7.x86_64) 7 (Core)

#安装完成reboot重启机器

#重启完机器删除旧的内核(如下为示例,以实际查出来的为准)

rpm -qa | grep kernel*
yum remove kernel-3.10.0-1160* -y

※2、检查机器是否安装过docker或kubernetes服务并卸载

# 2-1、卸载服务
kubeadm reset

# 2-2、删除rpm包
rpm -qa|grep kube*|xargs rpm --nodeps -e

# 2-3、删除容器及镜像
docker images -qa|xargs docker rmi -f

#2-4、移除dockr
sudo systemctl stop docker
yum list installed | grep docker
yum -y remove  docker****
rm -rf /var/lib/docker

※3、Install Docker By~18.09.5

#3-1、使用 root 权限登录 Centos。确保 yum 包更新到最新
sudo yum update
#3-2、卸载旧版本(如果安装过旧版本的话)
sudo yum remove docker  docker-common docker-selinux docker-engine
#3-3、安装需要的软件包, yum-util 提供yum-config-manager功能,另外两个是devicemapper驱动依赖的
sudo yum install -y yum-utils device-mapper-persistent-data lvm2
#3-4、设置yum源
yum-config-manager --add-repo https://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo
#3-5、可以查看所有仓库中所有docker版本,并选择特定版本安装
yum list docker-ce --showduplicates | sort -r
#3-6、安装docker
yum -y install docker-ce-18.09.7 docker-ce-cli-18.09.7 containerd.io 
#3-7、启动并加入开机启动
sudo systemctl start docker
sudo systemctl enable docker
#3-8、验证安装是否成功(有Client和Server两部分表示docker安装启动都成功了)
docker version

※4、配置docker 镜像加速下载源

#4-1、配置docker 镜像加速源并修改docker cgroup driver为systemd(驱动程序)

vim /etc/docker/daemon.json

{
        "exec-opts": ["native.cgroupdriver=systemd"],
        "registry-mirrors": ["http://f1361db2.m.daocloud.io"],
        "log-driver": "json-file",
        "log-opts": {
                "max-size": "10m",
                "max-file": "5"
        },
        "max-concurrent-downloads": 10,
        "max-concurrent-uploads": 10
}

#4-2、重新加载配置:
systemctl daemon-reload

#4-3、设置docker开启自启:
systemctl enable docker

#4-4、重启docker:
systemctl restart docker

※5、机器基础环境操作部分 (全机器清单都需要操作)

主机名字	IP
k8s-master01	192.168.168.194
k8s-master02	192.168.168.195
k8s-master03	192.168.168.196


#5-1、设置主机名并刷新环境变量(请根据实际信息进行修改)
hostnamectl set-hostname kube-master***  && bash 

#5-2、注意:每台k8s集群机器添加如下hosts信息 (请根据实际信息进行修改,并核对主机名与ip信息)
cat <<EOF >> /etc/hosts
192.168.168.194 k8s-master01
192.168.168.195 k8s-master02
192.168.168.196 k8s-master03
EOF

#5-3、在master01节点上做免密,如下免密证书部分不可以照抄(本地闭网环境可以操作,建议生产环境忽略此步骤)
ssh-keygen -t rsa
ssh-copy-id -i ~/.ssh/id_rsa.pub root@192.168.168.195
ssh-copy-id -i ~/.ssh/id_rsa.pub root@192.168.168.196

#5-4、时间同步,请根据机器使用设定NTP
略

#5-5、关闭selinux与设置开机不启动
setenforce 0  && sed -i 's/^SELINUX=enforcing$/SELINUX=permissive/' /etc/selinux/config

#5-6、关闭swap交换分区
swapoff -a &&  sed -i '/swap/s/^\(.*\)$/#\1/g'  /etc/fstab  
             
#5-7、设置内核参数net.bridge.bridge-nf-call-iptables  将桥接的IPv4流量传递到iptables的链(请核对配置信息是否有重复,并以本分配置为准进行修改)
vim /etc/sysctl.conf
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
net.ipv4.ip_nonlocal_bind = 1
net.ipv4.ip_forward = 1
fs.file-max = 1000000
net.ipv4.neigh.default.gc_thresh1 = 1024
net.ipv4.neigh.default.gc_thresh2 = 4096
net.ipv4.neigh.default.gc_thresh3 = 8192
net.netfilter.nf_conntrack_max = 10485760
net.netfilter.nf_conntrack_tcp_timeout_established = 300
net.netfilter.nf_conntrack_buckets = 655360
net.core.netdev_max_backlog = 10000
fs.inotify.max_user_instances = 524288
fs.inotify.max_user_watches = 524288
vm.swappiness = 0
vm.max_map_count = 300000

#5-8、刷新系统参数
sysctl --system

#5-9、效验系统参数是生效
lsmod | grep br_netfilter

#5-10、安装ipvs 管理工具
yum install ipvsadm ipset -y

#5-11、 添加ipvs开启脚本,防止重启机器不生效
cat > /etc/sysconfig/modules/ipvs.modules <<EOF
#!/bin/bash
modprobe -- ip_vs
modprobe -- ip_vs_rr
modprobe -- ip_vs_wrr
modprobe -- ip_vs_sh
modprobe -- nf_conntrack_ipv4
EOF

chmod 755 /etc/sysconfig/modules/ipvs.modules && bash /etc/sysconfig/modules/ipvs.modules && lsmod | grep -e ip_vs -e nf_conntrack_ipv4

#5-12、验证ipvs模块是否开启
 lsmod | grep -e ip_vs -e nf_conntrack_ipv4

※6、Install k8s Command(每台机器都安装)

#6-1、配置kubenetes的yum仓库(阿里yum源)
cat <<EOF > /etc/yum.repos.d/kubernetes.repo
[kubernetes] 
name=Kubernetes 
baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64/ 
enabled=1 
gpgcheck=1 
repo_gpgcheck=1 
gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg
EOF

#6-2、生成yum源缓存
yum makecache

#6-3、安装kubelat、kubectl、kubeadm
yum -y install kubelet-1.18.2 kubeadm-1.18.2 kubectl-1.18.2 --nogpgcheck #(kubectl可选)
rpm -aq kubelet kubectl kubeadm (查看安装软件)
systemctl enable kubelet (kubelet开机启动)

#6-4、设置环境变量,1.8.2默认不启用api的8080端口(仅在master机器上操作,注:只要机器规划在master清单都要操作此步骤)
vim /etc/profile
export KUBERNETES_MASTER="127.0.0.1:8080"
#保存退出然后刷新环境变量
source /etc/profile

kubectl version  #校验是否安装成功

※7、高可用服务搭建(仅master机器安装,如为单机版请忽略此步骤)

#7-1、install haproxy and keepalived  (每台master 都需要执行)
yum -y install pcre-devel bzip2-devel keepalived haproxy

#7-2、修改haproxy配置文件 添加如下信息(每台master都需要同一份,主机机器IP信息需更改)
cat <<EOF > /etc/haproxy/haproxy.cfg
global
    log         127.0.0.1 local2

    chroot      /var/lib/haproxy
    pidfile     /var/run/haproxy.pid
    maxconn     4000
    user        haproxy
    group       haproxy
    daemon
    stats socket /var/lib/haproxy/stats
defaults
    mode                    http
    log                     global
    option                  httplog
    option                  dontlognull
    option http-server-close
    option forwardfor       except 127.0.0.0/8
    option                  redispatch
    retries                 3
    timeout http-request    10s
    timeout queue           1m
    timeout connect         10s
    timeout client          1m
    timeout server          1m
    timeout http-keep-alive 10s
    timeout check           10s
    maxconn                 3000
frontend kubernetes-apiserver
    mode                 tcp
    bind                 *:8443
    option               tcplog
    default_backend      kubernetes-apiserver
backend kubernetes-apiserver
    mode        tcp
    balance     roundrobin
    server  k8s-master01 192.168.168.194:6443 check
    server  k8s-master02 192.168.168.195:6443 check
    server  k8s-master03 192.168.168.196:6443 check
listen stats
    bind                 *:1080
    stats auth           admin:awesomePassword
    stats refresh        5s
    stats realm          HAProxy\ Statistics
    stats uri            /admin?stats
EOF
--复制到本行的上面为止

#7-3、设置开机启动并重启:
systemctl enable haproxy && systemctl restart haproxy

#7-4、查看状态:
systemctl status haproxy|grep Active
#如果失败:journalctl -f 查看原因

#7-5-1、 修改 keepalived 配置 (master主节点配置,根据实际信息修改ip配置等信息)
cat << EOF > /etc/keepalived/keepalived.conf
! Configuration File for keepalived

global_defs {
    router_id lb-master-104
}

vrrp_script check-haproxy {
    script "killall -0 haproxy"
    interval 5
    weight -30
}

vrrp_instance VI-kube-master {
    state MASTER
    priority 120
    dont_track_primary
    interface ens192
    virtual_router_id 68
    advert_int 3
    track_script {
        check-haproxy
    }
    virtual_ipaddress {
        192.168.168.240
    }
}
EOF
---分界线,复制到此处上面执行

7-5-2、其余master机器使用配置(根据实际信息修改ip配置等信息)
cat << EOF > /etc/keepalived/keepalived.conf
! Configuration File for keepalived

global_defs {
    router_id lb-backup-105
}

vrrp_script check-haproxy {
    script "killall -0 haproxy"
    interval 5
    weight -30
}

vrrp_instance VI-kube-master {
    state BACKUP
    priority 110
    dont_track_primary
    interface ens192
    virtual_router_id 68
    advert_int 3
    track_script {
        check-haproxy
    }
    virtual_ipaddress {
        192.168.168.240
    }
}
EOF
--分界线,复制到此处上面

#7-6、设置开机启动并重启 注意每台安装keepalived都需要执行
systemctl enable keepalived && systemctl restart keepalived

#7-7、查看状态:
systemctl status keepalived|grep Active

#7-8、如果失败:
journalctl -f 查看原因

※8、 Master & Node   Images pull

#8-1、node机器生成镜像拉取脚本 (国内机器请复制如下脚本执行,海外机器则直接docker pull 拉取  kube-proxy:v1.18.2 pause:3.2 coredns:1.6.7 三个镜像)
###注意此处为Node机器安装使用
cat <<EOF >for.sh
#!/bin/bash
images=(kube-proxy:v1.18.2 pause:3.2 coredns:1.6.7)
for imageName in ${images[@]} ; do
docker pull registry.aliyuncs.com/google_containers/$imageName
docker tag registry.aliyuncs.com/google_containers/$imageName k8s.gcr.io/$imageName
docker rmi registry.aliyuncs.com/google_containers/$imageName
done
EOF

#8-2、执行镜像拉取脚本
sh for.sh

#8-3、添加docker 支持80端口访问白名单,docker默认支持443协议仓库登入,请根据实际情况进行修改
vim /usr/lib/systemd/system/docker.service
ExecStart=/usr/bin/dockerd --insecure-registry=192.168.168.241 

#8-4、加载配置
systemctl daemon-reload

#8-5、重启docker 生效配置
systemctl restart docker 

#8-6、验证登入信息(此步骤为已安装好Harbor仓库使用)
docker login 192.168.168.241 -u k8s -p DDM54321ddm

#8-7、Master 镜像拉取,(国内机器请复制如下脚本执行,海外机器则直接docker pull 拉取 kube-apiserver:v1.18.2 kube-controller-manager:v1.18.2 kube-scheduler:v1.18.2 kube-proxy:v1.18.2 pause:3.2 etcd:3.4.3-0 coredns:1.6.7)
cat <<EOF >for.sh
#!/bin/bash
images=(kube-apiserver:v1.18.2 kube-controller-manager:v1.18.2 kube-scheduler:v1.18.2 kube-proxy:v1.18.2 pause:3.2 etcd:3.4.3-0 coredns:1.6.7)
for imageName in ${images[@]} ; do
docker pull registry.aliyuncs.com/google_containers/$imageName
docker tag registry.aliyuncs.com/google_containers/$imageName k8s.gcr.io/$imageName
docker rmi registry.aliyuncs.com/google_containers/$imageName
done
EOF

#8-8、执行镜像拉取脚本
sh for.sh

#8-9、开始初始化第一个master 注意如下命令需更改,IP为VIP
kubeadm init --kubernetes-version=v1.18.2 --control-plane-endpoint "把该中文换成VIP网络:8443" --upload-certs  --pod-network-cidr=10.244.0.0/16 --service-cidr=10.96.0.0/12 --ignore-preflight-errors=Swap
#请根据执行完初始化机器的提示进行下一步节点添加,注意每台master设备添加完都需要执行如下图显示命令 列如:mkdir -p $HOME/.kube  三条命令

#8-10、如下提示则是添加master
kubeadm join 192.168.168.240:8443 --token oposbr.5of0qbhayfeojvlv \
--discovery-token-ca-cert-hash sha256:f20863df8c02866b0d4e3241a0548c1aad675346c8105935729ddfeeb0e00c6d \
--control-plane --certificate-key e9d42910ab4178e8fa5db436f188de6aa07b01af4a0aab0bf997f43c77f267e9

#8-11、如下提示则是添加node
kubeadm join 192.168.168.240:8443 --token bwtfaa.lw8pu6uudpedkzwt \
--discovery-token-ca-cert-hash sha256:f20863df8c02866b0d4e3241a0548c1aad675346c8105935729ddfeeb0e00c6d

※9、搭建keepalived+haproxy 高可用负载均衡集群(每台master机器都安装)
###注意该步骤需到另一个文档操作执行观看,做机器限制

#9-1、为kubelet添加参数优化配置,具体说明可参考下面连接说明
vim /usr/lib/systemd/system/kubelet.service.d/10-kubeadm.conf
  #原配置信息ExecStart=/usr/bin/kubelet $KUBELET_KUBECONFIG_ARGS $KUBELET_CONFIG_ARGS $KUBELET_KUBEADM_ARGS $KUBELET_EXTRA_ARGS
  #修改后配置信息(--image-pull-progress-deadline=10m --eviction-hard=nodefs.available<5% --serialize-image-pulls=false)
ExecStart=/usr/bin/kubelet $KUBELET_KUBECONFIG_ARGS $KUBELET_CONFIG_ARGS $KUBELET_KUBEADM_ARGS $KUBELET_EXTRA_ARGS --image-pull-progress-deadline=10m --eviction-hard=nodefs.available<5% --serialize-image-pulls=false
#9-2、优化调度器调度控制单元
vim /etc/kubernetes/manifests/kube-scheduler.yaml
  #在command 位置新增如下配置,任意位置,符合下面配置的格式
- --kube-api-qps=100
#9-3、优化控制器指标池
vim /etc/kubernetes/manifests/kube-controller-manager.yaml 
  #在command 位置新增如下配置,任意位置,符合下面配置的格式
    - --kube-api-qps=100
    - --kube-api-burst=100



#######参数添加完后请保存配置后重启docker与kubelet服务
systemctl daemon-reload && systemctl restart docker && systemctl restart kubelet
参考链接

http://newwiki.fixed-stars.com/pages/viewpage.action?pageId=6160513

※10、网络组件安装

10-1、加入master机器则安装网络插件

wget https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml

编辑下载下来的kube-flannel.yml修改images (外网环境请忽略此步骤)

192.168.168.87/mirror/flannel:v0.14.0-rc1
修改完配置文件则kubectl apply -f kube-flannel.yml 重新加载网络插件

※11、master修改证书到99年过期时间

#11-1、安装go命令编译时需要借助go来编译
wget https://golang.google.cn/dl/go1.14.4.linux-amd64.tar.gz
rm -rf /usr/local/go
tar -zxf go1.14.4.linux-amd64.tar.gz -C /usr/local
vim /etc/profile
export GOROOT=/usr/local/go 
export GOPATH=/data/gopath
export PATH=$PATH:$GOROOT/bin:$GOPATH/bin
source /etc/profile
go version

#11-2、如果没有git命令则安装
yum install -y git
git clone https://github.com/kubernetes/kubernetes.git
cd kubernetes
git checkout v1.18.2
vim cmd/kubeadm/app/constants/constants.go  #找到CertificateValidity,修改如下
CertificateValidity = time.Hour * 24 * 365 * 100

#11-3、保存退出后运行编译命令
make WHAT=cmd/kubeadm
ll _output/bin/  #命令查看文件有没有生成

#11-4、替换证书钱备份做好:
cp /usr/bin/kubeadm{,.bak20210513}
cp -r /etc/kubernetes/pki{,.bak20210513}
cp _output/bin/kubeadm /usr/bin/kubeadm   #请手动确认
cd /etc/kubernetes/pki
kubeadm alpha certs renew all

#11-5、验证结果
kubeadm alpha certs check-expiration

#10-6、重复步骤将证书拷贝到其他master机器上
scp /usr/bin/kubeadm 192.168.168.195:/tmp/
scp /usr/bin/kubeadm 192.168.168.196:/tmp/
cp /usr/bin/kubeadm{,.bak20210513}
cp -r /etc/kubernetes/pki{,.bak20210513}
cp _output/bin/kubeadm /usr/bin/kubeadm   #请手动确认
cd /etc/kubernetes/pki
kubeadm alpha certs renew all

※12、k8s 环境调整
#12-1、设置硬盘校验值,防止默认85%硬盘使用导致k8s集群不能正常运行
vim /usr/lib/systemd/system/kubelet.service.d/10-kubeadm.conf
#在Environment 的参数配置末尾添加如下参数,并重启kubelet生效
--eviction-hard=nodefs.available<1% #参数添加
systemctl restart kubelet

#12-2、设置k8s 路由流量导入ipvs

kubectl edit configmap kube-proxy -n kube-system
#在mode: "" 位置添加IPVS
    kind: KubeProxyConfiguration
    metricsBindAddress: ""
    mode: "ipvs"              #--此处添加
    nodePortAddresses: null
    oomScoreAdj: null
    portRange: ""
    showHiddenMetricsForVersion: ""
    udpIdleTimeout: 0s
    winkernel:
      enableDSR: false

#12-3、启用ARP模式
#匹配字段strictARP: flase 更改为true
strictARP: true

#12-4、删除pod 等于重启kube-proxy
kubectl get pod -A |grep kube-proxy|awk '{print $2}' |xargs -i kubectl delete pod {} -n kube-system --force

※13、ETCD备份与恢复

#13-1、ETCD备份与恢复

mkdir -p /data/etcd$(date +%Y%m%d)
cd /data/etcd$(date +%Y%m%d)
ETCDCTL_API=3 etcdctl --endpoints 192.168.168.194:2379 --cert=/etc/kubernetes/pki/etcd/peer.crt --key=/etc/kubernetes/pki/etcd/peer.key --cacert=/etc/kubernetes/pki/etcd/ca.crt  snapshot save snapshot11
ETCDCTL_API=3 etcdctl snapshot status snap.db  #查看文件存储的信息

#13-2、kubeadm 部署恢复etcd数据库
#先暂停kube-apiserver和etcd容器
    mv /etc/kubernetes/manifests /etc/kubernetes/manifests.bak
    mv /var/lib/etcd/ /var/lib/etcd.bak
#13-3、恢复
ETCDCTL_API=3 etcdctl snapshot restore  /data/etcd20210520/snapshot11 --endpoints=192.168.168.194:2379  --name=k8s-master01 --cert=/etc/kubernetes/pki/etcd/peer.crt --key=/etc/kubernetes/pki/etcd/peer.key --cacert=/etc/kubernetes/pki/etcd/ca.crt  --initial-advertise-peer-urls=https://192.168.168.194:2380  --initial-cluster-token=etcd-cluster-0 --initial-cluster=k8s-master01=https://192.168.168.194:2380,k8s-master02=https://192.168.168.195:2380,k8s-master03=https://192.168.168.196:2380  --data-dir=/var/lib/etcd

##上面恢复的仅单台机器,需吧备份的拷贝的其他机器上使用上面命令恢复,注意修改参数
#13-4、启动kube-apiserver和etcd容器
    mv /etc/kubernetes/manifests.bak /etc/kubernetes/manifests
    mv /var/lib/etcd.bak  /var/lib/etcd/
        systemctl start docker 



############ETCD验证部分

#13-5、.查看etcd高可用集群健康状态

ETCDCTL_API=3 etcdctl --cacert=/etc/kubernetes/pki/etcd/ca.crt --cert=/etc/kubernetes/pki/etcd/peer.crt --key=/etc/kubernetes/pki/etcd/peer.key --write-out=table --endpoints=192.168.168.194:2379,192.168.168.195:2379,192.168.168.196:2379 endpoint health

#13-6、.查看etcd高可用集群列表

ETCDCTL_API=3 etcdctl --cacert=/etc/kubernetes/pki/etcd/ca.crt --cert=/etc/kubernetes/pki/etcd/peer.crt --key=/etc/kubernetes/pki/etcd/peer.key --write-out=table --endpoints=192.168.168.194:2379,192.168.168.195:2379,192.168.168.196:2379 member list

#13-7、查看etcd高可用集群leader

ETCDCTL_API=3 etcdctl --cacert=/etc/kubernetes/pki/etcd/ca.crt --cert=/etc/kubernetes/pki/etcd/peer.crt --key=/etc/kubernetes/pki/etcd/peer.key --write-out=table --endpoints=192.168.168.194:2379,192.168.168.195:2379,192.168.168.196:2379 endpoint status

#13-8、验证数据一致性(每台ETCD都执行查看)

ETCDCTL_API=3 etcdctl --endpoints=192.168.168.194:2379,192.168.168.195:2379,192.168.168.196:2379 --cert=/etc/kubernetes/pki/etcd/peer.crt --key=/etc/kubernetes/pki/etcd/peer.key --cacert=/etc/kubernetes/pki/etcd/ca.crt get / --prefix --keys-only

Comment