代理配置
代理IP : 10.10.110.248
stream {
upstream kube-apiserver {
server 10.10.109.154:6443 max_fails=3 fail_timeout=30s;
server 10.10.108.232:6443 max_fails=3 fail_timeout=30s;
server 10.10.110.16:6443 max_fails=3 fail_timeout=30s;
}
server {
listen 6443;
proxy_connect_timeout 2s;
proxy_timeout 900s;
proxy_pass kube-apiserver;
}
}
hosts文件
10.10.109.154 k8s-master-1
10.10.108.232 k8s-master-2
10.10.110.16 k8s-master-3
10.10.111.159 k8s-node-1
10.10.110.195 k8s-node-2
10.10.111.170 k8s-node-3
安装docker
yum install -y yum-utils device-mapper-persistent-data lvm2 wget
wget -O /etc/yum.repos.d/docker-ce.repo https://download.docker.com/linux/centos/docker-ce.repo
sed -i 's+download.docker.com+mirrors.tuna.tsinghua.edu.cn/docker-ce+' /etc/yum.repos.d/docker-ce.repo
yum install docker-ce-19.03.12 -y
mkdir /etc/docker
vim /etc/docker/daemon.json
{
"storage-driver": "overlay2",
"registry-mirrors": ["https://q2gr04ke.mirror.aliyuncs.com"]
}
systemctl start docker
systemctl enable docker
设置内核参数
vim /etc/sysctl.d/k8s.conf
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
net.ipv4.ip_forward = 1
vm.swappiness = 0
sysctl -p /etc/sysctl.d/k8s.conf
配置ipvs模块
yum install ipvsadm ipset sysstat conntrack libseccomp -y
cat > /etc/sysconfig/modules/ipvs.modules <<EOF
#!/bin/bash
modprobe -- ip_vs
modprobe -- ip_vs_rr
modprobe -- ip_vs_wrr
modprobe -- ip_vs_sh
modprobe -- nf_conntrack
modprobe -- ip_tables
modprobe -- ip_set
modprobe -- xt_set
modprobe -- ipt_set
modprobe -- ipt_rpfilter
modprobe -- ipt_REJECT
modprobe -- ipip
EOF
chmod +x /etc/sysconfig/modules/ipvs.modules && bash /etc/sysconfig/modules/ipvs.modules && lsmod | grep -e ip_vs -e nf_conntrack
安装kubeadm、kubelet、kubectl
cat > /etc/yum.repos.d/kubernetes.repo <<EOF
[kubernetes]
name=kubernetes repo
baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64/
gpgcheck=0
gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg
enabled=1
EOF
yum install kubelet-1.18.3 kubeadm-1.18.3 kubectl-1.18.3 -y
systemctl enable kubelet
初始化群集之前重新编译kubeadm,证书有效期自动为100年
wget https://github.com/kubernetes/kubernetes/archive/v1.18.3.tar.gz
tar xf v1.18.3.tar.gz
mv kubernetes-1.18.3 kubernetes
cd kubernetes
修改 CA 有效期为 100 年(默认为 10 年)
vim ./staging/src/k8s.io/client-go/util/cert/cert.go
...
//NotAfter: now.Add(duration365d * 10).UTC(),
NotAfter: now.Add(duration365d * 100).UTC(),
...
修改证书有效期为 100 年(默认为 1 年)
vim ./cmd/kubeadm/app/constants/constants.go
...
CertificateValidity = time.Hour * 24 * 365 * 100
...
编译
cat ./build/build-image/cross/VERSION
v1.13.4-1
docker pull registry.aliyuncs.com/google_containers/kube-cross:v1.13.6-1
docker run --rm -v /root/kubernetes:/go/src/k8s.io/kubernetes -it registry.aliyuncs.com/google_containers/kube-cross:v1.13.6-1 bash
cd /go/src/k8s.io/kubernetes
make all WHAT=cmd/kubeadm GOFLAGS=-v
exit
mv /usr/bin/kubeadm /usr/bin/kubeadm_backup
cp _output/local/bin/linux/amd64/kubeadm /usr/bin/kubeadm
chmod +x /usr/bin/kubeadm
mv /usr/bin/kubeadm /usr/bin/kubeadm_backup
scp /usr/bin/kubeadm 192.168.66.81:/usr/bin/
mv /usr/bin/kubeadm /usr/bin/kubeadm_backup
scp /usr/bin/kubeadm 192.168.66.178:/usr/bin/
kubeadm version
初始化master节点
kubeadm init --control-plane-endpoint "10.10.110.248:6443" --upload-certs --image-repository "registry.aliyuncs.com/google_containers" --kubernetes-version "v1.18.3" --pod-network-cidr "192.168.0.0/16"
...
Your Kubernetes control-plane has initialized successfully!
To start using your cluster, you need to run the following as a regular user:
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config
You should now deploy a pod network to the cluster.
Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
https://kubernetes.io/docs/concepts/cluster-administration/addons/
You can now join any number of the control-plane node running the following command on each as root:
kubeadm join 10.10.110.248:6443 --token 3wzjpk.9jw2n8g0yasn0foe \
--discovery-token-ca-cert-hash sha256:685d49d24273230e021ae8a30dff6a4e239e66c4ac75dcfb37afd716f848fa56 \
--control-plane --certificate-key 174dbcabedef02f4c53c979f99b9bf3a257acf8d5bf00a8741509aa86f7749c6
Please note that the certificate-key gives access to cluster sensitive data, keep it secret!
As a safeguard, uploaded-certs will be deleted in two hours; If necessary, you can use
"kubeadm init phase upload-certs --upload-certs" to reload certs afterward.
Then you can join any number of worker nodes by running the following on each as root:
kubeadm join 10.10.110.248:6443 --token 3wzjpk.9jw2n8g0yasn0foe \
--discovery-token-ca-cert-hash sha256:685d49d24273230e021ae8a30dff6a4e239e66c4ac75dcfb37afd716f848fa56
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config
修改kube-proxy使用ipvs
kubectl edit cm kube-proxy -n kube-system
mode: ""
kubectl delete pod -n kube-system kube-proxy-7ctr2
安装网络插件
wget https://docs.projectcalico.org/v3.18/manifests/canal.yaml
kubectl apply -f canal.yaml
其他master节点加入集群
kubeadm join 10.10.110.248:6443 --token 3wzjpk.9jw2n8g0yasn0foe \
--discovery-token-ca-cert-hash sha256:685d49d24273230e021ae8a30dff6a4e239e66c4ac75dcfb37afd716f848fa56 \
--control-plane --certificate-key 174dbcabedef02f4c53c979f99b9bf3a257acf8d5bf00a8741509aa86f7749c6
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config
node节点加入集群
kubeadm join 10.10.110.248:6443 --token 3wzjpk.9jw2n8g0yasn0foe \
--discovery-token-ca-cert-hash sha256:685d49d24273230e021ae8a30dff6a4e239e66c4ac75dcfb37afd716f848fa56
|