# KubeSphere

# 一、高可用集群安装

官方文档:使用 Keepalived 和 HAproxy 创建高可用 Kubernetes 集群 (opens new window)

# 3.1 准备主机

P 地址 主机名 角色
192.168.50.26 k8s-lb1 Keepalived & HAproxy
192.168.50.233 k8s-lb2 Keepalived & HAproxy
192.168.50.189 k8s-control1 control-plane, etcd
192.168.50.143 k8s-control2 control-plane, etcd
192.168.50.20 k8s-control3 control-plane, etcd
192.168.50.164 k8s-worker1 worker
192.168.50.176 k8s-worker2 worker
192.168.50.247 k8s-worker3 worker
192.168.50.222 VIP

# 3.2 配置负载均衡

两台机器同样配置

# HAproxy

# 安装依赖
tpxcer@k8s-lb1:~$ sudo apt install keepalived haproxy psmisc -y

# HAproxy 配置
MAC-➜  BigData git:(master)vi /etc/haproxy/haproxy.cfg

# 以下是示例配置,供您参考(请注意 server 字段。请记住 6443 是 apiserver 端口):
global
    log /dev/log  local0 warning
    chroot      /var/lib/haproxy
    pidfile     /var/run/haproxy.pid
    maxconn     4000
    user        haproxy
    group       haproxy
    daemon
   stats socket /var/lib/haproxy/stats

defaults
  log global
  option  httplog
  option  dontlognull
        timeout connect 5000
        timeout client 50000
        timeout server 50000

frontend kube-apiserver
  bind *:6443
  mode tcp
  option tcplog
  default_backend kube-apiserver

backend kube-apiserver
    mode tcp
    option tcplog
    option tcp-check
    balance roundrobin
    default-server inter 10s downinter 5s rise 2 fall 2 slowstart 60s maxconn 250 maxqueue 256 weight 100
    server k8s-control1 192.168.50.189:6443 check
    server k8s-control2 192.168.50.143:6443 check
    server k8s-control3 192.168.50.20:6443 check

# 保存文件并运行以下命令以重启 HAproxy。
tpxcer@k8s-lb1:~$ sudo systemctl restart haproxy

# 使 HAproxy 在开机后自动运行
tpxcer@k8s-lb1:~$ sudo systemctl enable haproxy
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46

# Keepalived

tpxcer@k8s-lb2:~$ sudo vi /etc/keepalived/keepalived.conf

# 备注内容需要修改
# lb1配置
global_defs {
  notification_email {
  }
  router_id LVS_DEVEL
  vrrp_skip_check_adv_addr
  vrrp_garp_interval 0
  vrrp_gna_interval 0
}

vrrp_script chk_haproxy {
  script "killall -0 haproxy"
  interval 2
  weight 2
}

vrrp_instance haproxy-vip {
  state BACKUP
  priority 100
  interface ens192                       # Network card
  virtual_router_id 60
  advert_int 1
  authentication {
    auth_type PASS
    auth_pass 1111
  }
  unicast_src_ip 192.168.50.26      # The IP address of this machine
  unicast_peer {
    192.168.50.233                         # The IP address of peer machines
  }

  virtual_ipaddress {
    192.168.50.222/24                  # The VIP address
  }
  
  track_script {
    chk_haproxy
  }
}
# lb2配置
global_defs {
  notification_email {
  }
  router_id LVS_DEVEL
  vrrp_skip_check_adv_addr
  vrrp_garp_interval 0
  vrrp_gna_interval 0
}

vrrp_script chk_haproxy {
  script "killall -0 haproxy"
  interval 2
  weight 2
}

vrrp_instance haproxy-vip {
  state BACKUP
  priority 100
  interface ens192                       # Network card
  virtual_router_id 60
  advert_int 1
  authentication {
    auth_type PASS
    auth_pass 1111
  }
  unicast_src_ip 192.168.50.233      # The IP address of this machine
  unicast_peer {
    192.168.50.26                         # The IP address of peer machines
  }

  virtual_ipaddress {
    192.168.50.222/24                  # The VIP address
  }
  
  track_script {
    chk_haproxy
  }
}

# 保存文件并运行以下命令以重启 Keepalived。
tpxcer@k8s-lb1:~$ sudo systemctl restart keepalived

# 使 Keepalived 在开机后自动运行:
tpxcer@k8s-lb1:~$ sudo systemctl enable keepalived
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87

# 验证高可用

# 在机器 lb1 上,运行以下命令
tpxcer@k8s-lb1:~$ ip a s
2: ens34: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc mq state UP group default qlen 1000
    link/ether 00:0c:29:25:d2:2c brd ff:ff:ff:ff:ff:ff
    altname enp2s2
    inet 192.168.50.235/24 metric 100 brd 192.168.50.255 scope global dynamic ens34
       valid_lft 84596sec preferred_lft 84596sec
    inet 192.168.50.233/24 scope global secondary ens34
       valid_lft forever preferred_lft forever
    inet6 240e:388:6d08:bf00:20c:29ff:fe25:d22c/64 scope global dynamic mngtmpaddr noprefixroute
       valid_lft 600sec preferred_lft 600sec
    inet6 fe80::20c:29ff:fe25:d22c/64 scope link
       valid_lft forever preferred_lft forever
       
# 虚拟 IP 地址已经成功添加。模拟此节点上的故障:
systemctl stop haproxy

# 再次检查浮动 IP 地址,您可以看到该地址在 lb1 上消失了。
tpxcer@k8s-lb1:~$ ip a s
2: ens34: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc mq state UP group default qlen 1000
    link/ether 00:0c:29:25:d2:2c brd ff:ff:ff:ff:ff:ff
    altname enp2s2
    inet 192.168.50.235/24 metric 100 brd 192.168.50.255 scope global dynamic ens34
       valid_lft 84456sec preferred_lft 84456sec
    inet6 240e:388:6d08:bf00:20c:29ff:fe25:d22c/64 scope global dynamic mngtmpaddr noprefixroute
       valid_lft 596sec preferred_lft 596sec
    inet6 fe80::20c:29ff:fe25:d22c/64 scope link
       valid_lft forever preferred_lft forever
       
# 理论上讲,若配置成功,该虚拟 IP 会漂移到另一台机器 (lb2) 上。在 lb2 上运行以下命令,这是预期的
tpxcer@k8s-lb2:~$ ip a s
2: ens34: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc mq state UP group default qlen 1000
    link/ether 00:0c:29:98:e7:c6 brd ff:ff:ff:ff:ff:ff
    altname enp2s2
    inet 192.168.50.15/24 metric 100 brd 192.168.50.255 scope global dynamic ens34
       valid_lft 84444sec preferred_lft 84444sec
    inet 192.168.50.233/24 scope global secondary ens34
       valid_lft forever preferred_lft forever
    inet6 240e:388:6d08:bf00:20c:29ff:fe98:e7c6/64 scope global dynamic mngtmpaddr noprefixroute
       valid_lft 595sec preferred_lft 595sec
    inet6 fe80::20c:29ff:fe98:e7c6/64 scope link
       valid_lft forever preferred_lft forever
       
# 如上所示,高可用已经配置成功
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44

# 3.3 正式安装

# 下载kk
curl -sfL https://get-kk.kubesphere.io | VERSION=v3.0.12 sh -
# 查看支持的k8s版本
./kk version --show-supported-k8s
# 创建配置文件
./kk create config --with-kubesphere v3.4.0 --with-kubernetes v1.26.5
# 修改相关配置
spec:
  hosts:
  - {name: k8s-control1, address: 192.168.50.189, internalAddress: 192.168.50.189, privateKeyPath: "~/.ssh/id_rsa"}
  - {name: k8s-control2, address: 192.168.50.143, internalAddress: 192.168.50.143, privateKeyPath: "~/.ssh/id_rsa"}
  - {name: k8s-control3, address: 192.168.50.20, internalAddress: 192.168.50.20, privateKeyPath: "~/.ssh/id_rsa"}
  - {name: k8s-worker1, address: 192.168.50.164, internalAddress: 192.168.50.164, privateKeyPath: "~/.ssh/id_rsa"}
  - {name: k8s-worker2, address: 192.168.50.176, internalAddress: 192.168.50.176, privateKeyPath: "~/.ssh/id_rsa"}
  - {name: k8s-worker3, address: 192.168.50.247, internalAddress: 192.168.50.247, privateKeyPath: "~/.ssh/id_rsa"}
  roleGroups:
    etcd:
    - k8s-control1
    - k8s-control2
    - k8s-control2
    control-plane: 
    - k8s-control1
    - k8s-control2
    - k8s-control2
    worker:
    - k8s-worker1
    - k8s-worker2
    - k8s-worker2
  controlPlaneEndpoint:
    ## Internal loadbalancer for apiservers 
    # internalLoadbalancer: haproxy

    domain: lb.kubesphere.local
    address: 192.168.50.222
    port: 6443
# 开始安装
./kk create cluster -f config-sample.yaml
# 查看日志
kubectl logs -n kubesphere-system $(kubectl get pod -n kubesphere-system -l 'app in (ks-install, ks-installer)' -o jsonpath='{.items[0].metadata.name}') -f
^@#####################################################
###              Welcome to KubeSphere!           ###
#####################################################

Console: http://192.168.50.189:30880
Account: admin
Password: P@88w0rd
NOTES:
# 复制配置文件,以便可以用其他账号访问k8s
root@k8s-control1:~# cp -R .kube /home/tpxcer/
tpxcer@k8s-control1:~$ sudo chown tpxcer:tpxcer -R .kube/
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50

# 四、KubeSphere给Kubernetes上部署中应用

# 一、部署MySQL

传送门

# 二、部署redis

  1. 创建配置(ConfigMap)
# name
redis6-conf

# key
redis.conf

# value
appendonly yes
port 6379
bind 0.0.0.0
1
2
3
4
5
6
7
8
9
10
  1. statefulsets创建
# Name
redis6

#Image
redis:6

# start command
## Command
redis-server
## Parameters
/etc/redis/redis.conf

# 勾选 Synchronize Host Timezone

# Volumes
## Volume Name
redis6-pvc
## access mode
Read and write
## Mount path
/data

# Configmap
## access mode
Read-only
## Mount path
/etc/redis
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
  1. 配置网络
# Name
redis6-node

# Internal Access Mode
Virtual IP Address

# Ports
6379

# External Access
NodePort
1
2
3
4
5
6
7
8
9
10
11

# 三、部署ElasticSearch

  1. es容器启动
# 创建数据目录
mkdir -p /mydata/es-01 && chmod 777 -R /mydata/es-01

# 容器启动
docker run --restart=always -d -p 9200:9200 -p 9300:9300 \
-e "discovery.type=single-node" \
-e ES_JAVA_OPTS="-Xms512m -Xmx512m" \
-v es-config:/usr/share/elasticsearch/config \
-v /mydata/es-01/data:/usr/share/elasticsearch/data \
--name es-01 \
elasticsearch:7.13.4

docker ps |grep es-01
docker exec -it es-01 /bin/bash
docker rm -f es-01
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
  1. es配置

两个文件 elasticsearch.ymljvm.options

# 四、部署Nacos

  1. 配置

application.propertiescluster.conf

配置文件路径 /home/nacos/conf

  1. 外部网络访问
kind: Service
apiVersion: v1
metadata:
  name: nacos-node
  namespace: default
  labels:
    app: nacos-node
  annotations:
    kubesphere.io/creator: admin
spec:
  ports:
    - name: http-8848
      protocol: TCP
      port: 8848
      targetPort: 8848
      nodePort: 31307
  selector:
    app: nacos
  clusterIP: 10.233.2.176
  clusterIPs:
    - 10.233.2.176
  type: NodePort
  sessionAffinity: None
  externalTrafficPolicy: Cluster
  ipFamilies:
    - IPv4
  ipFamilyPolicy: SingleStack
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27

五、RuoYi-Cloud

Dockerfile

FROM openjdk:8-jdk
LABEL maintainer=leifengyang


#docker run -e PARAMS="--server.port 9090"
ENV PARAMS="--server.port=8080 --spring.profiles.active=prod --spring.cloud.nacos.discovery.server-addr=nacos-lth4.default:8848 --spring.cloud.nacos.config.server-addr=nacos-lth4.default:8848 --spring.cloud.nacos.config.namespace=prod --spring.cloud.nacos.config.file-extension=yml"
RUN /bin/cp /usr/share/zoneinfo/Asia/Shanghai /etc/localtime && echo 'Asia/Shanghai' >/etc/timezone

COPY target/*.jar /app.jar
EXPOSE 8080

#
ENTRYPOINT ["/bin/sh","-c","java -Dfile.encoding=utf8 -Djava.security.egd=file:/dev/./urandom -jar app.jar ${PARAMS}"]
1
2
3
4
5
6
7
8
9
10
11
12
13

创建镜像

docker build -t ruoyi-auth:v1.0 -f Dockerfile .
1

推送镜像

  • 开通阿里云“容器镜像服务(个人版)”
    • 创建一个名称空间(lfy_ruoyi)。(存储镜像)
    • 推送镜像到阿里云镜像仓库
$ docker login --username=forsum**** registry.cn-hangzhou.aliyuncs.com

#把本地镜像,改名,成符合阿里云名字规范的镜像。
$ docker tag [ImageId] registry.cn-hangzhou.aliyuncs.com/lfy_ruoyi/镜像名:[镜像版本号]
## docker tag 461955fe1e57 registry.cn-hangzhou.aliyuncs.com/lfy_ruoyi/ruoyi-visual-monitor:v1

$ docker push registry.cn-hangzhou.aliyuncs.com/lfy_ruoyi/镜像名:[镜像版本号]
## docker push registry.cn-hangzhou.aliyuncs.com/lfy_ruoyi/ruoyi-visual-monitor:v1
1
2
3
4
5
6
7
8

# 五、DevOps

更新时间: 11/6/2023, 4:09:30 PM