部署环境
Host | IP | k8s 版本 | glusterFS版本 | heketi版本 | heketi-client 版本 |
---|
k8s-master1 | 192.168.10.1 | 1.20.0 | 9.5-1.el7 | heketi-8.0.0-1 | heketi-client-8.0.0-1 | k8s-master2 | 192.168.10.2 | 1.20.0 | 9.5-1.el7 | heketi-8.0.0-1 | heketi-client-8.0.0-1 | k8s-master3 | 192.168.10.3 | 1.20.0 | 9.5-1.el7 | heketi-8.0.0-1 | heketi-client-8.0.0-1 |
前置条件: 每个用来做GFS数据存储必须是一个设备块,不能是一个目录。如/dev/sdb ,/dev/sdb1 。这块需要提前规划
部署 glusterFS 集群
hosts信息
# /etc/hosts
192.168.10.1 k8s-master1
192.168.10.2 k8s-master2
192.168.10.3 k8s-master3
$ ntpdate time.windows.com
$ yum -y install centos-release-gluster9.noarch
$ yum -y install glusterfs glusterfs-fuse glusterfs-server
配置服务和集群
$ systemctl enable glusterd
$ systemctl start glusterd
$ gluster peer probe k8s-master1
peer probe: success. Probe on localhost not needed
$ gluster peer probe k8s-master2
peer probe: success.
$ gluster peer probe k8s-master3
peer probe: success.
$ gluster peer status
Number of Peers: 2
Hostname: k8s-master2
Uuid: c08a504c-4a92-403b-ae86-48e51b1efd05
State: Peer in Cluster (Connected)
Hostname: k8s-master3
Uuid: 9bbc907a-3f60-49fd-9911-9cfed9bb0844
State: Peer in Cluster (Connected)
部署GlusterFS管理客户端heketi
节点初始化
adduser heketi
passwd heketi
chmod u+w /etc/sudoers
heketi ALL=(ALL) NOPASSWD: ALL
chmod u-w /etc/sudoers
su - heketi
ssh-keygen -t rsa -b 4096
ssh-copy-id -i heketi@k8s-master1
ssh-copy-id -i heketi@k8s-master2
ssh-copy-id -i heketi@k8s-master3
安装heketi
yum install -y heketi-client
yum install -y heketi
cd /etc/heketi/
cp heketi.json heketi.json_bak
heketi.json
{
"_port_comment": "Heketi Server Port Number",
"port": "18080",
"_use_auth": "Enable JWT authorization. Please enable for deployment",
"use_auth": true,
"_jwt": "Private keys for access",
"jwt": {
"_admin": "Admin has access to all APIs",
"admin": {
"key": "admin" ## 与后面一致
},
"_user": "User only has access to /volumes endpoint",
"user": {
"key": "admin"
}
},
"_glusterfs_comment": "GlusterFS Configuration",
"glusterfs": {
"_executor_comment": [
"Execute plugin. Possible choices: mock, ssh",
"mock: This setting is used for testing and development.",
" It will not send commands to any node.",
"ssh: This setting will notify Heketi to ssh to the nodes.",
" It will need the values in sshexec to be configured.",
"kubernetes: Communicate with GlusterFS containers over",
" Kubernetes exec api."
],
"executor": "ssh",
"_sshexec_comment": "SSH username and private key file information",
"sshexec": {
"keyfile": "/home/heketi/.ssh/id_rsa",
"user": "heketi",
"port": "22",
"sudo": true,
"fstab": "/etc/fstab"
},
"_kubeexec_comment": "Kubernetes configuration",
"kubeexec": {
"host" :"https://kubernetes.host:8443",
"cert" : "/path/to/crt.file",
"insecure": false,
"user": "kubernetes username",
"password": "password for kubernetes user",
"namespace": "OpenShift project or Kubernetes namespace",
"fstab": "Optional: Specify fstab file on node. Default is /etc/fstab"
},
"_db_comment": "Database file name",
"db": "/var/lib/heketi/heketi.db",
"_loglevel_comment": [
"Set log level. Choices are:",
" none, critical, error, warning, info, debug",
"Default is warning"
],
"loglevel" : "debug"
}
}
systemctl enable heketi
systemctl start heketi
systemctl status heketi
初始化GFS 复制卷集群
复制卷:所有组成卷的服务器中存放的内容都完全相同,类似RAID1。
$ heketi-cli --user admin --secret admin --server http://k8s-master1:18080 --json cluster create
{"id":"60d0c41c0b232906f90b528fbb58400a","nodes":[],"volumes":[],"block":true,"file":true,"blockvolumes":[]}
$ heketi-cli --user admin --secret admin --server http://k8s-master1:18080 --json node add --cluster "60d0c41c0b232906f90b528fbb58400a" --management-host-name 192.168.10.1 --storage-host-name 192.168.10.1 --zone 1
{"zone":1,"hostnames":{"manage":["192.168.10.1"],"storage":["192.168.10.1"]},"cluster":"60d0c41c0b232906f90b528fbb58400a","id":"918f73f5e17662abbcd0552445a53766","state":"online","devices":[]}
$ heketi-cli --user admin --secret admin --server http://k8s-master1:18080 --json node add --cluster "60d0c41c0b232906f90b528fbb58400a" --management-host-name 192.168.10.2 --storage-host-name 192.168.10.2 --zone 1
{"zone":1,"hostnames":{"manage":["192.168.10.2"],"storage":["192.168.10.2"]},"cluster":"60d0c41c0b232906f90b528fbb58400a","id":"918f73f5e17662abbcd0552445a53766","state":"online","devices":[]}
$ heketi-cli --user admin --secret admin --server http://k8s-master1:18080 --json node add --cluster "60d0c41c0b232906f90b528fbb58400a" --management-host-name 192.168.10.3 --storage-host-name 192.168.10.3 --zone 1
{"zone":1,"hostnames":{"manage":["192.168.10.3"],"storage":["192.168.10.3"]},"cluster":"60d0c41c0b232906f90b528fbb58400a","id":"918f73f5e17662abbcd0552445a53766","state":"online","devices":[]}
$ heketi-cli --user admin --secret admin --server http://k8s-master1:18080 --json node list
Id:46e7b325ce20528bf8160d8c65a3c652 Cluster:60d0c41c0b232906f90b528fbb58400a
Id:524481c119e6295117979bb80046f9c8 Cluster:60d0c41c0b232906f90b528fbb58400a
Id:918f73f5e17662abbcd0552445a53766 Cluster:60d0c41c0b232906f90b528fbb58400a
$ heketi-cli --user admin --secret admin --server http://k8s-master1:18080 device add --name "/dev/sdb" --node 46e7b325ce20528bf8160d8c65a3c652
$ heketi-cli --user admin --secret admin --server http://k8s-master1:18080 device add --name "/dev/sdb" --node 524481c119e6295117979bb80046f9c8
$ heketi-cli --user admin --secret admin --server http://k8s-master1:18080 device add --name "/dev/sdb" --node 918f73f5e17662abbcd0552445a53766
$ heketi-cli --user admin --secret admin --server http://k8s-master1:18080 topology info
Cluster Id: 60d0c41c0b232906f90b528fbb58400a
File: true
Block: true
Volumes:
Name: vol_af2533d49e30b106c7c1181807c93ad1
Size: 10
Id: af2533d49e30b106c7c1181807c93ad1
Cluster Id: 60d0c41c0b232906f90b528fbb58400a
Mount: 192.168.10.2:vol_af2533d49e30b106c7c1181807c93ad1
Mount Options: backup-volfile-servers=192.168.10.3,192.168.10.1
Durability Type: replicate
Replica: 3
Snapshot: Enabled
Snapshot Factor: 1.00
Bricks:
Id: 3dbf6dd1f57f119054c583489a29242d
Path: /var/lib/heketi/mounts/vg_a161942c4ea242e92cb753a18abc0483/brick_3dbf6dd1f57f119054c583489a29242d/brick
Size (GiB): 10
Node: 524481c119e6295117979bb80046f9c8
Device: a161942c4ea242e92cb753a18abc0483
Id: 90dc64eea016285370a4df707f235371
Path: /var/lib/heketi/mounts/vg_04041ffb900f9e8a2d73cdd9a32ed62a/brick_90dc64eea016285370a4df707f235371/brick
Size (GiB): 10
Node: 46e7b325ce20528bf8160d8c65a3c652
Device: 04041ffb900f9e8a2d73cdd9a32ed62a
Id: bf602a9741293e6f1cd5102a385f87d5
Path: /var/lib/heketi/mounts/vg_c0307d49fd4e8b3a5bd2b4ed48c4dcd9/brick_bf602a9741293e6f1cd5102a385f87d5/brick
Size (GiB): 10
Node: 918f73f5e17662abbcd0552445a53766
Device: c0307d49fd4e8b3a5bd2b4ed48c4dcd9
Nodes:
Node Id: 46e7b325ce20528bf8160d8c65a3c652
State: online
Cluster Id: 60d0c41c0b232906f90b528fbb58400a
Zone: 1
Management Hostnames: 192.168.10.2
Storage Hostnames: 192.168.10.2
Devices:
Id:04041ffb900f9e8a2d73cdd9a32ed62a Name:/dev/sdb State:online Size (GiB):99 Used (GiB):10 Free (GiB):89
Bricks:
Id:90dc64eea016285370a4df707f235371 Size (GiB):10 Path: /var/lib/heketi/mounts/vg_04041ffb900f9e8a2d73cdd9a32ed62a/brick_90dc64eea016285370a4df707f235371/brick
Node Id: 524481c119e6295117979bb80046f9c8
State: online
Cluster Id: 60d0c41c0b232906f90b528fbb58400a
Zone: 1
Management Hostnames: 192.168.10.3
Storage Hostnames: 192.168.10.3
Devices:
Id:a161942c4ea242e92cb753a18abc0483 Name:/dev/sdb State:online Size (GiB):99 Used (GiB):10 Free (GiB):89
Bricks:
Id:3dbf6dd1f57f119054c583489a29242d Size (GiB):10 Path: /var/lib/heketi/mounts/vg_a161942c4ea242e92cb753a18abc0483/brick_3dbf6dd1f57f119054c583489a29242d/brick
Node Id: 918f73f5e17662abbcd0552445a53766
State: online
Cluster Id: 60d0c41c0b232906f90b528fbb58400a
Zone: 1
Management Hostnames: 192.168.10.1
Storage Hostnames: 192.168.10.1
Devices:
Id:c0307d49fd4e8b3a5bd2b4ed48c4dcd9 Name:/dev/sdb State:online Size (GiB):99 Used (GiB):10 Free (GiB):89
Bricks:
Id:bf602a9741293e6f1cd5102a385f87d5 Size (GiB):10 Path: /var/lib/heketi/mounts/vg_c0307d49fd4e8b3a5bd2b4ed48c4dcd9/brick_bf602a9741293e6f1cd5102a385f87d5/brick
GlusterFS 在 kubernetes 集群的应用
StorageClass
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: gluster-heketi-storageclass
provisioner: kubernetes.io/glusterfs
reclaimPolicy: Retain
parameters:
resturl: "http://192.168.10.1:18080"
restauthenabled: "true"
restuser: "admin"
restuserkey: "admin"
gidMin: "2000"
volumetype: "replicate:3"
clusterid: "60d0c41c0b232906f90b528fbb58400a"
allowVolumeExpansion: true
test
kind: Deployment
apiVersion: apps/v1
metadata:
name: demo-mode3-nginx
labels:
name: demo-mode3-nginx
spec:
replicas: 1
selector:
matchLabels:
name: demo-mode3-nginx
template:
metadata:
labels:
name: demo-mode3-nginx
spec:
containers:
- name: nginx
image: nginx
ports:
- containerPort: 80
volumeMounts:
- name: demo-mode3-nginx-vol
mountPath: "/usr/share/nginx/html"
volumes:
- name: demo-mode3-nginx-vol
persistentVolumeClaim:
claimName: glusterfs-vol-pvc02
---
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: glusterfs-vol-pvc02
namespace: default
spec:
storageClassName: gluster-heketi-storageclass
accessModes:
- ReadWriteMany
resources:
requests:
storage: 10Gi
$ kubectl get pvc,pv |grep glusterfs-vol-pvc02
persistentvolumeclaim/glusterfs-vol-pvc02 Bound pvc-7623c393-0f56-40cc-ae86-ab71c9900895 10Gi RWX gluster-heketi-storageclass 11m
persistentvolume/pvc-7623c393-0f56-40cc-ae86-ab71c9900895 10Gi RWX Retain Bound default/glusterfs-vol-pvc02 gluster-heketi-storageclass 9m50s
$ kubectl get po |grep demo-mode3-nginx
demo-mode3-nginx-68df46746d-qhh4g 1/1 Running 0 12m
pod,pvc 的状态不为 Pending 即正常。
|