#!/usr/bin/env bash
set -e
# 声明集群成员信息
declare -A MasterArray otherMaster NodeArray
MasterArray=(['k8s-m1']=172.20.23.167 ['k8s-m2']=172.20.23.168 ['k8s-m3']=172.20.23.169)
otherMaster=(['k8s-m2']=172.20.23.168 ['k8s-m3']=172.20.23.169)
NodeArray=(['k8s-n1']=172.20.23.173)
export VIP=172.20.23.4
export INGRESS_VIP=172.20.23.11
[ "${#MasterArray[@]}" -eq 1 ] && export VIP=${MasterArray[@]} || export API_PORT=8443
export KUBE_APISERVER=https://${VIP}:${API_PORT:-6443}
#声明需要安装的的k8s版本
export KUBE_VERSION=v1.12.3
# 网卡名
export interface=ens160
export K8S_DIR=/etc/kubernetes
export PKI_DIR=${K8S_DIR}/pki
export ETCD_SSL=/etc/etcd/ssl
export MANIFESTS_DIR=/etc/kubernetes/manifests/
# cni
export CNI_URL="https://github.com/containernetworking/plugins/releases/download"
export CNI_VERSION=v0.7.4
# cfssl
export CFSSL_URL="https://pkg.cfssl.org/R1.2"
# etcd
export ETCD_version=v3.3.10
#首先在k8s-m1通过git获取部署要用到的二进制配置文件和yml
# curl https://storage.googleapis.com/kubernetes-release/release/${KUBE_VERSION}/kubernetes-server-linux-amd64.tar.gz > kubernetes-server-linux-amd64.tar.gz
# tar -zxvf kubernetes-server-linux-amd64.tar.gz --strip-components=3 -C /usr/local/bin kubernetes/server/bin/kube{let,ctl,-apiserver,-controller-manager,-scheduler,-proxy}
#分发master相关组件到其他master上
#(这边不想master跑pod的话就不复制kubelet和kube-proxy过去,以及后面master节点上的kubelet的相关操作)
# for NODE in "${!otherMaster[@]}"; do
# echo "--- $NODE ${otherMaster[$NODE]} ---"
# scp /usr/local/bin/kube{let,ctl,-apiserver,-controller-manager,-scheduler,-proxy} ${otherMaster[$NODE]}:/usr/local/bin/
# done
#分发node的kubernetes二进制文件
# for NODE in "${!NodeArray[@]}"; do
# echo "--- $NODE ${NodeArray[$NODE]} ---"
# scp /usr/local/bin/kube{let,-proxy} ${NodeArray[$NODE]}:/usr/local/bin/
# done
#在k81-m1下载Kubernetes CNI 二进制文件并分发
#分发cni文件到otherMaster
# mkdir -p /opt/cni/bin
# wget "${CNI_URL}/${CNI_VERSION}/cni-plugins-amd64-${CNI_VERSION}.tgz"
# tar -zxf cni-plugins-amd64-${CNI_VERSION}.tgz -C /opt/cni/bin
# 分发cni文件
# for NODE in "${!otherMaster[@]}"; do
# echo "--- $NODE ${otherMaster[$NODE]} ---"
# ssh ${otherMaster[$NODE]} 'mkdir -p /opt/cni/bin'
# scp /opt/cni/bin/* ${otherMaster[$NODE]}:/opt/cni/bin/
# done
# #分发cni文件到node
# for NODE in "${!NodeArray[@]}"; do
# echo "--- $NODE ${NodeArray[$NODE]} ---"
# ssh ${NodeArray[$NODE]} 'mkdir -p /opt/cni/bin'
# scp /opt/cni/bin/* ${NodeArray[$NODE]}:/opt/cni/bin/
# done
#在k8s-m1需要安裝CFSSL工具,这将会用來建立 TLS Certificates。
# wget "${CFSSL_URL}/cfssl_linux-amd64" -O /usr/local/bin/cfssl
# wget "${CFSSL_URL}/cfssljson_linux-amd64" -O /usr/local/bin/cfssljson
# chmod +x /usr/local/bin/cfssl /usr/local/bin/cfssljson
#建立集群CA keys 与Certificates
#Etcd CA
#首先在k8s-m1建立/etc/etcd/ssl文件夹
cd ~/k8s-manual-files/pki
mkdir -p ${ETCD_SSL}
#从CSR json文件ca-config.json与etcd-ca-csr.json生成etcd的CA keys与Certificate:
cfssl gencert -initca etcd-ca-csr.json | cfssljson -bare ${ETCD_SSL}/etcd-ca
#生成Etcd证书
cfssl gencert \
-ca=${ETCD_SSL}/etcd-ca.pem \
-ca-key=${ETCD_SSL}/etcd-ca-key.pem \
-config=ca-config.json \
-hostname=127.0.0.1,$(xargs -n1<<<${MasterArray[@]} | sort | paste -d, -s -) \
-profile=kubernetes \
etcd-csr.json | cfssljson -bare ${ETCD_SSL}/etcd
#-hostname值为所有masters节点的IP,如果后续master节点扩容此处可以多预留ip到证书里
#完成后删除不必要文件,确认/etc/etcd/ssl有以下文件
rm -rf ${ETCD_SSL}/*.csr
ls $ETCD_SSL
#etcd-ca-key.pem etcd-ca.pem etcd-key.pem etcd.pem
#在k8s-m1上复制相关文件至其他Etcd节点,这边etcd跑在所有master节点上,所以etcd的证书复制到其他mster节点:
for NODE in "${!otherMaster[@]}"; do
echo "--- $NODE ${otherMaster[$NODE]} ---"
ssh ${otherMaster[$NODE]} "mkdir -p ${ETCD_SSL}"
for FILE in etcd-ca-key.pem etcd-ca.pem etcd-key.pem etcd.pem; do
scp ${ETCD_SSL}/${FILE} ${otherMaster[$NODE]}:${ETCD_SSL}/${FILE}
done
done
#ETCD二进制文件
#etcd所有标准版本可以在下面url查看
#https://github.com/etcd-io/etcd/releases
#在k8s-m1上下载etcd的二进制文件,单台的话建议使用v3.1.9因为有bug
#[ "${#MasterArray[@]}" -eq 1 ] && ETCD_version=v3.3.10 || :
cd ~/k8s-manual-files
#在k8s-m1上分发etcd的二进制文件到其他master上
for NODE in "${!otherMaster[@]}"; do
echo "--- $NODE ${otherMaster[$NODE]} ---"
scp /usr/local/bin/etcd* ${otherMaster[$NODE]}:/usr/local/bin/
done
#在k8s-m1上配置etcd配置文件并分发相关文件
#配置文件存放在/etc/etcd/etcd.config.yml里
#注入基础变量
cd ~/k8s-manual-files/master/
etcd_servers=$( xargs -n1<<<${MasterArray[@]} | sort | sed 's#^#https://#;s#$#:2379#;$s#\n##' | paste -d, -s - )
etcd_initial_cluster=$( for i in ${!MasterArray[@]};do echo $i=https://${MasterArray[$i]}:2380; done | sort | paste -d, -s - )
sed -ri "/initial-cluster:/s#'.+'#'${etcd_initial_cluster}'#" etc/etcd/config.yml
#分发systemd和配置文件
for NODE in "${!MasterArray[@]}"; do
echo "--- $NODE ${MasterArray[$NODE]} ---"
ssh ${MasterArray[$NODE]} "mkdir -p $MANIFESTS_DIR /etc/etcd /var/lib/etcd"
scp systemd/etcd.service ${MasterArray[$NODE]}:/usr/lib/systemd/system/etcd.service
scp etc/etcd/config.yml ${MasterArray[$NODE]}:/etc/etcd/etcd.config.yml
ssh ${MasterArray[$NODE]} "sed -i "s/{HOSTNAME}/$NODE/g" /etc/etcd/etcd.config.yml"
ssh ${MasterArray[$NODE]} "sed -i "s/{PUBLIC_IP}/${MasterArray[$NODE]}/g" /etc/etcd/etcd.config.yml"
ssh ${MasterArray[$NODE]} 'systemctl daemon-reload'
done
#在k8s-m1上启动所有etcd
#etcd 进程首次启动时会等待其它节点的 etcd 加入集群,命令 systemctl start etcd 会卡住一段时间,为正常现象
#可以全部启动后后面的etcdctl命令查看状态确认正常否
for NODE in "${!MasterArray[@]}"; do
echo "--- $NODE ${MasterArray[$NODE]} ---"
ssh ${MasterArray[$NODE]} 'systemctl enable --now etcd' &
done
wait
#然后输出到终端了的时候多按几下回车直到等光标回到终端状态
#k8s-m1上执行下面命令验证 ETCD 集群状态,下面第二个是使用3的api去查询集群的键值
etcdctl \
--cert-file /etc/etcd/ssl/etcd.pem \
--key-file /etc/etcd/ssl/etcd-key.pem \
--ca-file /etc/etcd/ssl/etcd-ca.pem \
--endpoints $etcd_servers cluster-health
#使用3的api去查询集群的键值
ETCDCTL_API=3 \
etcdctl \
--cert=/etc/etcd/ssl/etcd.pem \
--key=/etc/etcd/ssl/etcd-key.pem \
--cacert /etc/etcd/ssl/etcd-ca.pem \
--endpoints $etcd_servers get / --prefix --keys-only
#Kubernetes CA
#为确保安全,kubernetes 系统各组件需要使用 x509 证书对通信进行加密和认证。
#在k8s-m1建立pki文件夹,并生成根CA凭证用于签署其它的k8s证书
mkdir -p ${PKI_DIR}
cd ~/k8s-manual-files/pki
cfssl gencert -initca ca-csr.json | cfssljson -bare ${PKI_DIR}/ca
ls ${PKI_DIR}/ca*.pem
#/etc/kubernetes/pki/ca-key.pem /etc/kubernetes/pki/ca.pem
#API Server Certificate
#此凭证将被用于API Server和Kubelet Client通信使用,使用下面命令生成kube-apiserver凭证
cfssl gencert \
-ca=${PKI_DIR}/ca.pem \
-ca-key=${PKI_DIR}/ca-key.pem \
-config=ca-config.json \
-hostname=10.96.0.1,${VIP},127.0.0.1,kubernetes,kubernetes.default,kubernetes.default.svc,kubernetes.default.svc.cluster,kubernetes.default.svc.cluster.local,$(xargs -n1<<<${MasterArray[@]} | sort | paste -d, -s -) \
-profile=kubernetes \
apiserver-csr.json | cfssljson -bare ${PKI_DIR}/apiserver
ls ${PKI_DIR}/apiserver*.pem
#/etc/kubernetes/pki/apiserver-key.pem /etc/kubernetes/pki/apiserver.pem
#这边-hostname的10.96.0.1是Cluster IP的Kubernetes端点(默认占用第一个ip,用于给集群里的pod要调用Kubernetes的API server);
#kubernetes.default为Kubernets DN。
#如果使用域名可以加上域名
#如果后续master节点扩容此处可以多预留ip到证书里
#Front Proxy Certificate
#此凭证将被用于Authenticating Proxy的功能上,而该功能主要是提供API Aggregation的认证。使用下面命令生成CA:
cfssl gencert \
-initca front-proxy-ca-csr.json | cfssljson -bare ${PKI_DIR}/front-proxy-ca
ls ${PKI_DIR}/front-proxy-ca*.pem
#/etc/kubernetes/pki/front-proxy-ca-key.pem /etc/kubernetes/pki/front-proxy-ca.pem
#接着生成front-proxy-client凭证(hosts的warning忽略即可):
cfssl gencert \
-ca=${PKI_DIR}/front-proxy-ca.pem \
-ca-key=${PKI_DIR}/front-proxy-ca-key.pem \
-config=ca-config.json \
-profile=kubernetes \
front-proxy-client-csr.json | cfssljson -bare ${PKI_DIR}/front-proxy-client
ls ${PKI_DIR}/front-proxy-client*.pem
#front-proxy-client-key.pem front-proxy-client.pem
#Controller Manager Certificate
#凭证会建立system:kube-controller-manager的使用者(凭证 CN),并被绑定在RBAC
#Cluster Role中的system:kube-controller-manager来让Controller Manager
#元件能够存取需要的API object。
#这边通过以下命令生成 Controller Manager 凭证(hosts的warning忽略即可)
cfssl gencert \
-ca=${PKI_DIR}/ca.pem \
-ca-key=${PKI_DIR}/ca-key.pem \
-config=ca-config.json \
-profile=kubernetes \
manager-csr.json | cfssljson -bare ${PKI_DIR}/controller-manager
ls ${PKI_DIR}/controller-manager*.pem
#controller-manager-key.pem controller-manager.pem
#接着利用kubectl生成Controller Manager的kubeconfig文件
# controller-manager set cluster
kubectl config set-cluster kubernetes \
--certificate-authority=${PKI_DIR}/ca.pem \
--embed-certs=true \
--server=${KUBE_APISERVER} \
--kubeconfig=${K8S_DIR}/controller-manager.kubeconfig
# controller-manager set credentials
kubectl config set-credentials system:kube-controller-manager \
--client-certificate=${PKI_DIR}/controller-manager.pem \
--client-key=${PKI_DIR}/controller-manager-key.pem \
--embed-certs=true \
--kubeconfig=${K8S_DIR}/controller-manager.kubeconfig
# controller-manager set context
kubectl config set-context system:kube-controller-manager@kubernetes \
--cluster=kubernetes \
--user=system:kube-controller-manager \
--kubeconfig=${K8S_DIR}/controller-manager.kubeconfig
# controller-manager set default context
kubectl config use-context system:kube-controller-manager@kubernetes \
--kubeconfig=${K8S_DIR}/controller-manager.kubeconfig
#Scheduler Certificate
#凭证会建立system:kube-scheduler的使用者(凭证 CN),并被绑定在 RBAC Cluster Role
#中的system:kube-scheduler来让 Scheduler 元件能够存取需要的 API object。
#这边通过以下命令生成 Scheduler 凭证(hosts的warning忽略即可):
cfssl gencert \
-ca=${PKI_DIR}/ca.pem \
-ca-key=${PKI_DIR}/ca-key.pem \
-config=ca-config.json \
-profile=kubernetes \
scheduler-csr.json | cfssljson -bare ${PKI_DIR}/scheduler
ls ${PKI_DIR}/scheduler*.pem
#/etc/kubernetes/pki/scheduler-key.pem /etc/kubernetes/pki/scheduler.pem
#接着利用kubectl生成Scheduler的kubeconfig文件
# scheduler set cluster
kubectl config set-cluster kubernetes \
--certificate-authority=${PKI_DIR}/ca.pem \
--embed-certs=true \
--server=${KUBE_APISERVER} \
--kubeconfig=${K8S_DIR}/scheduler.kubeconfig
# scheduler set credentials
kubectl config set-credentials system:kube-scheduler \
--client-certificate=${PKI_DIR}/scheduler.pem \
--client-key=${PKI_DIR}/scheduler-key.pem \
--embed-certs=true \
--kubeconfig=${K8S_DIR}/scheduler.kubeconfig
# scheduler set context
kubectl config set-context system:kube-scheduler@kubernetes \
--cluster=kubernetes \
--user=system:kube-scheduler \
--kubeconfig=${K8S_DIR}/scheduler.kubeconfig
# scheduler use default context
kubectl config use-context system:kube-scheduler@kubernetes \
--kubeconfig=${K8S_DIR}/scheduler.kubeconfig
#Admin Certificate
#Admin 被用来绑定 RBAC Cluster Role 中 cluster-admin,当想要(最常见的就是使用kubectl)操作
#所有 Kubernetes 集群功能时,就必须利用这边生成的 kubeconfig 文件。
cfssl gencert \
-ca=${PKI_DIR}/ca.pem \
-ca-key=${PKI_DIR}/ca-key.pem \
-config=ca-config.json \
-profile=kubernetes \
admin-csr.json | cfssljson -bare ${PKI_DIR}/admin
ls ${PKI_DIR}/admin*.pem
#/etc/kubernetes/pki/admin-key.pem /etc/kubernetes/pki/admin.pem
#kubectl 默认从 ~/.kube/config 文件读取 kube-apiserver 地址、证书、用户名等信息,
#如果没有配置,执行 kubectl 命令时可能会出错(因为默认连接8080匿名端口)
#接着利用kubectl生成 Admin 的kubeconfig文件
# admin set cluster
kubectl config set-cluster kubernetes \
--certificate-authority=${PKI_DIR}/ca.pem \
--embed-certs=true \
--server=${KUBE_APISERVER} \
--kubeconfig=${K8S_DIR}/admin.kubeconfig
# admin set credentials
kubectl config set-credentials kubernetes-admin \
--client-certificate=${PKI_DIR}/admin.pem \
--client-key=${PKI_DIR}/admin-key.pem \
--embed-certs=true \
--kubeconfig=${K8S_DIR}/admin.kubeconfig
# admin set context
kubectl config set-context kubernetes-admin@kubernetes \
--cluster=kubernetes \
--user=kubernetes-admin \
--kubeconfig=${K8S_DIR}/admin.kubeconfig
# admin set default context
kubectl config use-context kubernetes-admin@kubernetes \
--kubeconfig=${K8S_DIR}/admin.kubeconfig
# Master Kubelet Certificate
# 这边使用 Node authorizer 来让节点的 kubelet 能够存取如 services、endpoints 等 API,
# 而使用 Node authorizer 需定义system:nodesCLusterRole(凭证的 Organization),
# 并且包含system:node:<nodeName>的使用者名称(凭证的 Common Name)。
# 首先在k8s-m1节点生成所有 master 节点的 kubelet 凭证,这边通过下面命令來生成:
for NODE in "${!MasterArray[@]}"; do
echo "--- $NODE ---"
\cp kubelet-csr.json kubelet-$NODE-csr.json;
sed -i "s/\$NODE/$NODE/g" kubelet-$NODE-csr.json;
cfssl gencert \
-ca=${PKI_DIR}/ca.pem \
-ca-key=${PKI_DIR}/ca-key.pem \
-config=ca-config.json \
-hostname=$NODE \
-profile=kubernetes \
kubelet-$NODE-csr.json | cfssljson -bare ${PKI_DIR}/kubelet-$NODE;
rm -f kubelet-$NODE-csr.json
done
ls ${PKI_DIR}/kubelet*.pem
#/etc/kubernetes/pki/kubelet-k8s-m1-key.pem /etc/kubernetes/pki/kubelet-k8s-m2.pem
#/etc/kubernetes/pki/kubelet-k8s-m1.pem /etc/kubernetes/pki/kubelet-k8s-m3-key.pem
#/etc/kubernetes/pki/kubelet-k8s-m2-key.pem /etc/kubernetes/pki/kubelet-k8s-m3.pem
#这边需要依据节点修改-hostname与$NODE。
#完成后复制kubelet凭证至所有master节点:
for NODE in "${!MasterArray[@]}"; do
echo "--- $NODE ${MasterArray[$NODE]} ---"
ssh ${MasterArray[$NODE]} "mkdir -p ${PKI_DIR}"
scp ${PKI_DIR}/ca.pem ${MasterArray[$NODE]}:${PKI_DIR}/ca.pem
scp ${PKI_DIR}/kubelet-$NODE-key.pem ${MasterArray[$NODE]}:${PKI_DIR}/kubelet-key.pem
scp ${PKI_DIR}/kubelet-$NODE.pem ${MasterArray[$NODE]}:${PKI_DIR}/kubelet.pem
rm -f ${PKI_DIR}/kubelet-$NODE-key.pem ${PKI_DIR}/kubelet-$NODE.pem
done
#接着在k8s-m1执行以下命令给所有master产生kubelet的kubeconfig文件
for NODE in "${!MasterArray[@]}"; do
echo "--- $NODE ---"
ssh ${MasterArray[$NODE]} "cd ${PKI_DIR} && \
kubectl config set-cluster kubernetes \
--certificate-authority=${PKI_DIR}/ca.pem \
--embed-certs=true \
--server=${KUBE_APISERVER} \
--kubeconfig=${K8S_DIR}/kubelet.kubeconfig && \
kubectl config set-credentials system:node:${NODE} \
--client-certificate=${PKI_DIR}/kubelet.pem \
--client-key=${PKI_DIR}/kubelet-key.pem \
--embed-certs=true \
--kubeconfig=${K8S_DIR}/kubelet.kubeconfig && \
kubectl config set-context system:node:${NODE}@kubernetes \
--cluster=kubernetes \
--user=system:node:${NODE} \
--kubeconfig=${K8S_DIR}/kubelet.kubeconfig && \
kubectl config use-context system:node:${NODE}@kubernetes \
--kubeconfig=${K8S_DIR}/kubelet.kubeconfig"
done
# Service Account Key
# Kubernetes Controller Manager 利用 Key pair 生成与签署 Service Account 的 tokens,
# 而这边不能通过 CA 做认证,而是建立一组公私钥来让 API Server 与 Controller Manager 使用:
# 在k8s-m1执行以下指令
openssl genrsa -out ${PKI_DIR}/sa.key 2048
openssl rsa -in ${PKI_DIR}/sa.key -pubout -out ${PKI_DIR}/sa.pub
ls ${PKI_DIR}/sa.*
#/etc/kubernetes/pki/sa.key /etc/kubernetes/pki/sa.pub
# 删除不必要文件
# 所有资讯准备完成后,就可以将一些不必要文件删除
rm -f ${PKI_DIR}/*.csr \
${PKI_DIR}/scheduler*.pem \
${PKI_DIR}/controller-manager*.pem \
${PKI_DIR}/admin*.pem \
${PKI_DIR}/kubelet*.pem
# 复制文件至其他节点
# 复制凭证文件至其他master节点
for NODE in "${!otherMaster[@]}"; do
echo "--- $NODE ${otherMaster[$NODE]}---"
for FILE in $(ls ${PKI_DIR}); do
scp ${PKI_DIR}/${FILE} ${otherMaster[$NODE]}:${PKI_DIR}/${FILE}
done
done
# 复制Kubernetes config文件至其他master节点:
for NODE in "${!otherMaster[@]}"; do
echo "--- $NODE ${otherMaster[$NODE]}---"
for FILE in admin.kubeconfig controller-manager.kubeconfig scheduler.kubeconfig; do
scp ${K8S_DIR}/${FILE} ${otherMaster[$NODE]}:${K8S_DIR}/${FILE}
done
done
# Kubernetes Masters
# HA(haproxy+keepalived) 单台master就不要用HA了
for NODE in "${!MasterArray[@]}"; do
echo "--- $NODE ${MasterArray[$NODE]} ---"
ssh ${MasterArray[$NODE]} 'yum install haproxy keepalived -y' &
done
wait
# 在k8s-m1节点下把相关配置文件配置后再分发
cd ~/k8s-manual-files/master/etc
# 修改haproxy.cfg配置文件
sed -i '$r '<(paste <( seq -f' server k8s-api-%g' ${#MasterArray[@]} ) <( xargs -n1<<<${MasterArray[@]} | sort | sed 's#$#:6443 check#')) haproxy/haproxy.cfg
# 修改keepalived(网卡和VIP写进去,使用下面命令)
sed -ri "s#\{\{ VIP \}\}#${VIP}#" keepalived/*
sed -ri "s#\{\{ interface \}\}#${interface}#" keepalived/keepalived.conf
sed -i '/unicast_peer/r '<(xargs -n1<<<${MasterArray[@]} | sort | sed 's#^#\t#') keepalived/keepalived.conf
# 分发文件
for NODE in "${!MasterArray[@]}"; do
echo "--- $NODE ${MasterArray[$NODE]} ---"
scp -r haproxy/ ${MasterArray[$NODE]}:/etc
scp -r keepalived/ ${MasterArray[$NODE]}:/etc
ssh ${MasterArray[$NODE]} 'systemctl enable --now haproxy keepalived'
done
# ping下vip看看能通否,先等待大概四五秒等keepalived和haproxy起来
ping -c1 $VIP
# 如果vip没起来就是keepalived没起来就每个节点上去restart下keepalived或者确认下配置文件
# /etc/keepalived/keepalived.conf里网卡名和ip是否注入成功
for NODE in "${!MasterArray[@]}"; do
echo "--- $NODE ${MasterArray[$NODE]} ---"
ssh ${MasterArray[$NODE]} 'systemctl restart haproxy keepalived'
done
# Master组件
# 在k8s-m1节点下把相关配置文件配置后再分发
cd ~/k8s-manual-files/master/
etcd_servers=$( xargs -n1<<<${MasterArray[@]} | sort | sed 's#^#https://#;s#$#:2379#;$s#\n##' | paste -d, -s - )
# 注入VIP和etcd_servers
sed -ri '/--advertise-address/s#=.+#='"$VIP"' \\#' systemd/kube-apiserver.service
sed -ri '/--etcd-servers/s#=.+#='"$etcd_servers"' \\#' systemd/kube-apiserver.service
# 修改encryption.yml
ENCRYPT_SECRET=$( head -c 32 /dev/urandom | base64 )
sed -ri "/secret:/s#(: ).+#\1${ENCRYPT_SECRET}#" encryption/config.yml
# 分发文件(不想master跑pod的话就不复制kubelet的配置文件)
for NODE in "${!MasterArray[@]}"; do
echo "--- $NODE ${MasterArray[$NODE]} ---"
ssh ${MasterArray[$NODE]} "mkdir -p $MANIFESTS_DIR /etc/systemd/system/kubelet.service.d /var/lib/kubelet /var/log/kubernetes"
scp systemd/kube-*.service ${MasterArray[$NODE]}:/usr/lib/systemd/system/
scp encryption/config.yml ${MasterArray[$NODE]}:/etc/kubernetes/encryption.yml
scp audit/policy.yml ${MasterArray[$NODE]}:/etc/kubernetes/audit-policy.yml
scp systemd/kubelet.service ${MasterArray[$NODE]}:/lib/systemd/system/kubelet.service
scp systemd/10-kubelet.conf ${MasterArray[$NODE]}:/etc/systemd/system/kubelet.service.d/10-kubelet.conf
scp etc/kubelet/kubelet-conf.yml ${MasterArray[$NODE]}:/etc/kubernetes/kubelet-conf.yml
done
#在k8s-m1上给所有master机器启动kubelet 服务并设置kubectl补全脚本:
for NODE in "${!MasterArray[@]}"; do
echo "--- $NODE ${MasterArray[$NODE]} ---"
ssh ${MasterArray[$NODE]} 'systemctl enable --now kubelet kube-apiserver kube-controller-manager kube-scheduler;
\cp /etc/kubernetes/admin.kubeconfig ~/.kube/config;
kubectl completion bash > /etc/bash_completion.d/kubectl'
done
# 建立TLS Bootstrapping RBAC 与Secret
# 后面kubectl命令只需要在任何一台master执行就行了
# 首先在k8s-m1建立一个变数来产生BOOTSTRAP_TOKEN,并建立bootstrap-kubelet.conf的Kubernetes config文件
export TOKEN_ID=$(openssl rand 3 -hex)
export TOKEN_SECRET=$(openssl rand 8 -hex)
export BOOTSTRAP_TOKEN=${TOKEN_ID}.${TOKEN_SECRET}
# bootstrap set cluster
kubectl config set-cluster kubernetes \
--certificate-authority=${PKI_DIR}/ca.pem \
--embed-certs=true \
--server=${KUBE_APISERVER} \
--kubeconfig=${K8S_DIR}/bootstrap-kubelet.kubeconfig
# bootstrap set credentials
kubectl config set-credentials tls-bootstrap-token-user \
--token=${BOOTSTRAP_TOKEN} \
--kubeconfig=${K8S_DIR}/bootstrap-kubelet.kubeconfig
# bootstrap set context
kubectl config set-context tls-bootstrap-token-user@kubernetes \
--cluster=kubernetes \
--user=tls-bootstrap-token-user \
--kubeconfig=${K8S_DIR}/bootstrap-kubelet.kubeconfig
# bootstrap use default context
kubectl config use-context tls-bootstrap-token-user@kubernetes \
--kubeconfig=${K8S_DIR}/bootstrap-kubelet.kubeconfig
#接着在k8s-m1建立TLS bootstrap secret来提供自动签证使用:
cd ~/k8s-manual-files/master
# 注入变量
sed -ri "s#\{TOKEN_ID\}#${TOKEN_ID}#g" resources/bootstrap-token-Secret.yml
sed -ri "/token-id/s#\S+\$#'&'#" resources/bootstrap-token-Secret.yml
sed -ri "s#\{TOKEN_SECRET\}#${TOKEN_SECRET}#g" resources/bootstrap-token-Secret.yml
kubectl create -f resources/bootstrap-token-Secret.yml
# 下面是输出
#secret "bootstrap-token-65a3a9" created
#在k8s-m1建立 TLS Bootstrap Autoapprove RBAC来自动处理 CSR
kubectl apply -f resources/kubelet-bootstrap-rbac.yml
# 下面是输出
#clusterrolebinding.rbac.authorization.k8s.io "kubelet-bootstrap" created
#clusterrolebinding.rbac.authorization.k8s.io "node-autoapprove-bootstrap" created
#clusterrolebinding.rbac.authorization.k8s.io "node-autoapprove-certificate-rotation" created
#为了方便管理集群,因此需要通过 kubectl logs 来查看,但由于 API 权限,故需要建立一个 RBAC Role 来获取存取权限,这边在k8s-m1节点执行下面命令创建
kubectl apply -f resources/apiserver-to-kubelet-rbac.yml
# 下面是输出
#clusterrole.rbac.authorization.k8s.io "system:kube-apiserver-to-kubelet" configured
#clusterrolebinding.rbac.authorization.k8s.io "system:kube-apiserver" configured
#设定master节点加上污点Taint不让(没有声明容忍该污点的)pod跑在master节点上:
kubectl taint nodes node-role.kubernetes.io/master="":NoSchedule --all
# 下面是输出
# node "k8s-m1" tainted
# node "k8s-m2" tainted
# node "k8s-m3" tainted
# Kubernetes Nodes
# 本部分将说明如何建立与设定Kubernetes Node 角色,Node 是主要执行容器实例(Pod)的工作节点。
# 在开始部署前,先在k8-m1将需要用到的文件复制到所有node节点上
cd ${PKI_DIR}
for NODE in "${!NodeArray[@]}"; do
echo "--- $NODE ${NodeArray[$NODE]} ---"
ssh ${NodeArray[$NODE]} "mkdir -p ${PKI_DIR} ${ETCD_SSL}"
# Etcd
for FILE in etcd-ca.pem etcd.pem etcd-key.pem; do
scp ${ETCD_SSL}/${FILE} ${NodeArray[$NODE]}:${ETCD_SSL}/${FILE}
done
# Kubernetes
for FILE in pki/ca.pem pki/ca-key.pem pki/front-proxy-ca.pem bootstrap-kubelet.kubeconfig ; do
scp ${K8S_DIR}/${FILE} ${NodeArray[$NODE]}:${K8S_DIR}/${FILE}
done
done
# 部署与设定
# 在k8s-m1节点分发kubelet.service相关文件到每台node上去管理kubelet:
cd ~/k8s-manual-files/
for NODE in "${!NodeArray[@]}"; do
echo "--- $NODE ${NodeArray[$NODE]} ---"
ssh ${NodeArray[$NODE]} "mkdir -p /var/lib/kubelet /var/log/kubernetes /etc/systemd/system/kubelet.service.d $MANIFESTS_DIR"
scp node/systemd/kubelet.service ${NodeArray[$NODE]}:/lib/systemd/system/kubelet.service
scp node/systemd/10-kubelet.conf ${NodeArray[$NODE]}:/etc/systemd/system/kubelet.service.d/10-kubelet.conf
scp node/etc/kubelet/kubelet-conf.yml ${NodeArray[$NODE]}:/etc/kubernetes/kubelet-conf.yml
done
# 最后在k8s-m1上去启动每个node节点的kubelet 服务:
for NODE in "${!NodeArray[@]}"; do
echo "--- $NODE ${NodeArray[$NODE]} ---"
ssh ${NodeArray[$NODE]} 'systemctl enable --now kubelet.service'
done
# Kubernetes Core Addons部署
# daemonSet方式部署
cd ~/k8s-manual-files
# 注入变量
sed -ri "/server:/s#(: ).+#\1${KUBE_APISERVER}#" addons/kube-proxy/kube-proxy.yml
sed -ri "/image:.+kube-proxy/s#:[^:]+\$#:$KUBE_VERSION#" addons/kube-proxy/kube-proxy.yml
kubectl apply -f addons/kube-proxy/kube-proxy.yml
# 下面是输出
# serviceaccount "kube-proxy" created
# clusterrolebinding.rbac.authorization.k8s.io "system:kube-proxy" created
# configmap "kube-proxy" created
# daemonset.apps "kube-proxy" created
# Calico
# 拉取镜像
curl -s https://zhangguanzhang.github.io/bash/pull.sh | bash -s -- quay.io/calico/node:v3.1.3
curl -s https://zhangguanzhang.github.io/bash/pull.sh | bash -s -- quay.io/calico/cni:v3.1.3
sed -ri "s#\{\{ interface \}\}#${interface}#" addons/calico/v3.1/calico.yml
kubectl apply -f addons/calico/v3.1
kubectl -n kube-system get pod --all-namespaces
# CoreDNS
# 这里节点使用的是hostname,所以建议把hosts关系写到Coredns的解析里
sed -i '57r '<(echo ' hosts {';for NODE in "${!MasterArray[@]}";do echo " ${MasterArray[$NODE]} $NODE"; done;for NODE in "${!NodeArray[@]}";do echo " ${NodeArray[$NODE]} $NODE";done;echo ' }';) addons/coredns/coredns.yml
# 如果后期增加类似解析记录的话可以改cm后(注意cm是yaml格式写的,所以不要使用tab必须用空格)用kill信号让coredns去reload,
# 因为主进程是前台也就是PID为1,找到对应的pod执行即可,也可以利用deploy的更新机制去伪更新实现重启
# kubectl exec coredns-xxxxxx -- kill -SIGUSR1 1
# 在k8s-m1通过 kubeclt 执行下面命令來创建,并检查是否部署成功:
# 注: 目前coredns官方有bug, 在此不使用
# kubectl apply -f addons/coredns/coredns.yml
# kubectl -n kube-system get po -l k8s-app=kube-dns
# 测试coredns正常否看脸,可以下面创建pod来测试
# 先创建一个dnstool的pod
cat<<EOF | kubectl apply -f -
apiVersion: v1
kind: Pod
metadata:
name: busybox
namespace: default
spec:
containers:
- name: busybox
image: busybox:1.28
command:
- sleep
- "3600"
imagePullPolicy: IfNotPresent
restartPolicy: Always
EOF
# nslookup下看看能返回地址不
kubectl exec -ti busybox -- nslookup kubernetes
# 如果已安装的话, 如果CoreDNS工作不正常,先删掉它,删掉后确保coredns的pod和svc不存在
# kubectl delete -f addons/coredns/coredns.yml
# kubectl -n kube-system get pod,svc -l k8s-app=kube-dns
# KubeDNS(如果遇到上面的CoreDNS的bug的话使用它)
kubectl apply -f addons/Kubedns/kubedns.yml
#查看pod状态
kubectl -n kube-system get pod,svc -l k8s-app=kube-dns
#检查集群dns正常否
kubectl exec -ti busybox -- nslookup kubernetes
# 等待官方修复的话可以后期先创建出coredns的deploy,svc会负载到coredns之后再删掉kubedns的副本控制器和pod
# Metrics Server(1.8+的k8s)
kubectl create -f addons/metric-server/metrics-server-1.12+.yml
# Kubernets Extra Addons部署
cd ~/k8s-manual-files
# Dashboard
# 在k8s-m1通过kubectl来建立kubernetes dashboard即可:
kubectl apply -f ExtraAddons/dashboard
kubectl -n kube-system get po,svc -l k8s-app=kubernetes-dashboard
# 完成后,就可以通过浏览器存取Dashboard https://{YOUR_VIP}:6443/api/v1/namespaces/kube-system/services/https:kubernetes-dashboard:/proxy/
# 用如下命令得到token, 复制token,然后贴到Kubernetes dashboard。注意这边一般来说要针对不同User开启特定存取权限。
kubectl -n kube-system describe secrets | sed -rn '/\sdashboard-token-/,/^token/{/^token/s#\S+\s+##p}'
# 这边会额外建立一个名称为anonymous-dashboard-proxy的 Cluster Role(Binding)
# 来让system:anonymous这个匿名使用者能够通过 API Server 来 proxy 到 Kubernetes Dashboard,
# 而这个 RBAC 规则仅能够存取services/proxy资源,以及https:kubernetes-dashboard:资源名称同时
# 在 1.7 版本以后的 Dashboard 将不再提供所有权限,因此需要建立一个 service account 来绑定
# cluster-admin role(这系列已经写在dashboard/anonymous-proxy-rbac.yml里)
# 完成后,就可以通过浏览器存取Dashboard https://{YOUR_VIP}:6443/api/v1/namespaces/kube-system/services/https:kubernetes-dashboard:/proxy/。
kubectl -n kube-system describe secrets | sed -rn '/\sdashboard-token-/,/^token/{/^token/s#\S+\s+##p}'
# 复制token,然后贴到Kubernetes dashboard。注意这边一般来说要针对不同User开启特定存取权限。
# Ingress Controller
# 首先在k8s-m1执行下列命令来建立 Ingress Controller,并检查是否部署正常:
sed -ri 's#\{\{ INGRESS_VIP \}\}#'"${INGRESS_VIP}"'#' ExtraAddons/ingress-controller/ingress-controller-svc.yml
kubectl create ns ingress-nginx
kubectl apply -f ExtraAddons/ingress-controller/
kubectl -n ingress-nginx get po,svc
# External DNS(非公有云下且内网没DNS下很实用)
# 首先在k8s-m1执行下面命令来建立 CoreDNS Server,并检查是否部署正常
sed -ri 's#\{\{ INGRESS_VIP \}\}#'"${INGRESS_VIP}"'#' ExtraAddons/external-dns/coredns/coredns-svc-tcp.yml
sed -ri 's#\{\{ INGRESS_VIP \}\}#'"${INGRESS_VIP}"'#' ExtraAddons/external-dns/coredns/coredns-svc-udp.yml
kubectl create ns external-dns
kubectl create -f ExtraAddons/external-dns/coredns/
kubectl -n external-dns get po,svc
# 接着部署 ExternalDNS 来与 CoreDNS 同步资源记录:
kubectl apply -f ExtraAddons/external-dns/external-dns/
kubectl -n external-dns get po -l k8s-app=external-dns
# Prometheus Operator
# 在k8s-m1通过kubectl来建立Prometheus 需要的元件
kubectl apply -f ExtraAddons/prometheus/
kubectl apply -f ExtraAddons/prometheus/operator/
# 这边要等 operator 起來并建立好 CRDs 才能进行
kubectl apply -f ExtraAddons/prometheus/alertmanater/
kubectl apply -f ExtraAddons/prometheus/node-exporter/
kubectl apply -f ExtraAddons/prometheus/kube-state-metrics/
kubectl apply -f ExtraAddons/prometheus/grafana/
kubectl apply -f ExtraAddons/prometheus/kube-service-discovery/
# 这里建议先执行后面的补全ep信息部分的操作后再回来
kubectl apply -f ExtraAddons/prometheus/prometheus/
kubectl apply -f ExtraAddons/prometheus/servicemonitor/