使用kubespray搭建k8s集群, 如果没有外部LB做高可用的话, 对于node节点, kubespray默认为通过选择nginx或haproxy做apiserver的HA, 在master节点上, 是用127.0.0.1:6443来访问本机的apiserver, 而且程序调用apiserver时也没有做HA, 传统HA方式会搭建keepalived和haproxy, 本次是通过kube-vip来做HA
这是kube-vip官方对于ha的架构文档:
https://github.com/kube-vip/kube-vip/blob/main/kubernetes-control-plane.md
https://kube-vip.io/architecture/
尝试着按照官方方法来运行: https://kube-vip.io/install_static/ , 始终失败, 由于docker run –network host –rm ghcr.io/kube-vip/kube-vip:v0.3.8 manifest pod的方式生成的配置默认是用的/etc/kubernetes/admin.conf来做backend的, 但是kubespray init集群时也是用的admin.conf来做认证的, 这就陷入了死循环.
后来通过直接配置文件的方式来做成功了. 参考:
https://github.com/kube-vip/kube-vip/blob/main/kubernetes-control-plane.md
https://www.codeleading.com/article/58065570523/
ansible如下:
注: 其中,templates/kube-vip.yaml.j2这个文件是通过这个来生成的:
docker run –network host –rm 172.20.48.169:81/yks/kube-vip:v0.3.9 sample config | sudo tee /etc/kubernetes/manifests/kube-vip.yaml
kubespray定义的默认k8s apiserver监听端口为6443(变量: kube_apiserver_port), 这里kube-vip定义的vip绑定的域名是k8s.apiserver.io, 监听端口是8443(变量: loadbalancer_apiserver.port)
tasks/main.yml:
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 |
--- - name: kube-vip | Create addon dir file: path: "{{ item }}" state: directory owner: root group: root mode: 0755 recurse: yes with_items: - "{{ kube_config_dir }}/manifests" - "/etc/kube-vip" - name: kube-vip | set facts for kube-vip set_fact: interface: "{{ ansible_default_ipv4.interface|default('eth0') }}" vip: "{{ loadbalancer_apiserver.address }}" port: "{{ loadbalancer_apiserver.port | default(6443) }}" when: - container_manager == "docker" - name: kube-vip | Create config template: src: "config.yaml.j2" dest: "/etc/kube-vip/config.yaml" - name: kube-vip | Create manifests template: src: "kube-vip.yaml.j2" dest: "{{ kube_config_dir }}/manifests/kube-vip.yaml" # - name: kube-vip | Create manifests for kube-vip # shell: >- # {{ kube_vip }} manifest pod \ # --interface {{ interface }} \ # --vip {{ vip }} \ # --controlplane \ # --services \ # --arp \ # --port {{ port }} \ # --leaderElection | tee {{ kube_config_dir }}/manifests/kube-vip.yaml # - name: kube-vip | Wait for kube-vip running # shell: "set -o pipefail && {{ bin_dir }}/kubectl get pods -o wide --namespace kube-system | grep ^kube-vip" # args: # executable: /bin/bash # register: kubevip_pod # until: kubevip_pod.stdout.find('Running') != -1 # retries: 15 # delay: 5 |
templates/config.yaml.j2:
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 |
localPeer: id: {{ inventory_hostname }} address: {{ kube_apiserver_access_address }} port: 20000 remotePeers: {% for host in groups['kube_control_plane'] | difference(inventory_hostname) %} - id: {{ host }} address: {{ hostvars[host]['access_ip'] | default(hostvars[host]['ip'] | default(fallback_ips[host])) }} port: 20000 {% endfor %} vip: {{ loadbalancer_apiserver.address }} gratuitousARP: true singleNode: false startAsLeader: true # 第一个kube-vip节点设为true, 其余kube-vip节点设为false interface: "{{ interface }}" loadBalancers: - name: API Server Load Balancer type: tcp port: {{ port }} bindToVip: true # true时只在VIP上监听端口, false时在所有网卡上监听端口 backends: {% for host in groups['kube_control_plane'] %} - port: {{ kube_apiserver_port }} address: {{ hostvars[host]['access_ip'] | default(hostvars[host]['ip'] | default(fallback_ips[host])) }} {% endfor %} |
templates/kube-vip.yaml.j2:
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 |
apiVersion: v1 kind: Pod metadata: creationTimestamp: null name: kube-vip namespace: kube-system spec: containers: - args: - start - -c - /etc/kube-vip/config.yaml image: {{ kubevip_image_repo }}:{{ kubevip_image_tag }} name: kube-vip resources: {} securityContext: capabilities: add: - NET_ADMIN - SYS_TIME volumeMounts: - mountPath: /etc/kube-vip/ name: config hostNetwork: true volumes: - hostPath: path: /etc/kube-vip/ name: config status: {} |
0 Comments