1.共三台机器分别为 data1~3
- 设置节点信息
[root@data1 /]# export NODE_NAMES=(data1 data2 data3)
[root@data1 /]# export NODE_IPS=(192.168.31.7 192.168.31.8 192.168.31.9)
2. 安装gluster源
[root@data1 /]# for NODE_IP in ${NODE_IPS[@]}
>do
> ssh root@${NODE_IP} "yum install centos-release-gluster -y"
>done
3. 安装glusterfs组件
[root@data1 /]# for NODE_IP in ${NODE_IPS[@]}
>do
> ssh root@${NODE_IP} "yum install -y glusterfs glusterfs-server glusterfs-fuse glusterfs-rdma glusterfs-geo-replication glusterfs-devel"
>done
可能出现的问题:
Error:
Problem 1: cannot install the best candidate for the job
- nothing provides python3-pyxattr needed by glusterfs-server-7.6-1.el8.x86_64
Problem 2: package glusterfs-geo-replication-7.6-1.el8.x86_64 requires glusterfs-server = 7.6-1.el8, but none of the providers can be installed
- cannot install the best candidate for the job
- nothing provides python3-pyxattr needed by glusterfs-server-7.6-1.el8.x86_64
(try to add '--skip-broken' to skip uninstallable packages or '--nobest' to use not only best candidate packages)
解决方法:
- 解决pyxattr依赖
[root@data1 /]# for NODE_IP in ${NODE_IPS[@]}
>do
> ssh root@${NODE_IP} "dnf --enablerepo=PowerTools install python3-pyxattr"
>done
- 添加–nobest
[root@data1 ~]# for NODE_IP in ${NODE_IPS[@]}
>do
> ssh root@${NODE_IP} "yum install -y glusterfs glusterfs-server glusterfs-fuse glusterfs-rdma glusterfs-geo-replication glusterfs-devel --nobest"
>done
4. 修改gluster的工作目录并设置systemctl托管
[root@data1 /]# for NODE_IP in ${NODE_IPS[@]}
>do
> mkdir /opt/glusterfs
> ssh root@${NODE_IP} "sed -i 's/var\/lib/opt/g' /etc/glusterfs/glusterd.vol"
> ssh root@${NODE_IP} "systemctl start glusterd.service"
> ssh root@${NODE_IP} "systemctl enable glusterd.service"
> ssh root@${NODE_IP} "systemctl status glusterd.service | grep active"
>done
192.168.31.7
Active: active (running) since Mon 2020-07-06 03:15:13 UTC; 2h 35min ago
192.168.31.8
Active: active (running) since Mon 2020-07-06 05:50:51 UTC; 583ms ago
192.168.31.9
Active: active (running) since Mon 2020-07-06 05:50:54 UTC; 598ms ago
5. 开放gluster端口
[root@data1 /]# for NODE_IP in ${NODE_IPS[@]}
>do
> ssh root@${NODE_IP} "iptables -I INPUT -p tcp --dport 24007 -j ACCEPT"
>done
6. 添加节点
[root@data1 /]# gluster peer probe data2
peer probe: success.
[root@data1 /]# gluster peer probe data3
peer probe: success.
[root@data1 opt]# gluster peer status # 查看节点状态
Number of Peers: 2
Hostname: data2
Uuid: d38c773f-01d3-4dcf-87af-b09c62717849
State: Peer in Cluster (Connected)
Hostname: data3
Uuid: 472fa46f-837f-4d5c-a68f-d8cd35c822ea
State: Peer in Cluster (Connected)
如果出现如下错误
[root@data1 /]# gluster peer probe data2
peer probe: failed: Probe returned with Transport endpoint is not connected
可能是因为连接的节点中gluster服务没有启动或者端口没有开放
7. 创建有三个副本在6个目录内分布式存储的卷
[root@data1 /]# for NODE_IP in ${NODE_IPS[@]};
>do
> echo ${NODE_IP}
> ssh root@${NODE_IP} "mkdir -p /data3/gfs_data/vol01"
> ssh root@${NODE_IP} "mkdir -p /data4/gfs_data/vol01"
>done
192.168.31.7
192.168.31.8
192.168.31.9
[root@data1 /]#
[root@data1 /]# gluster volume create vol01 replica 3 arbiter 1 transport tcp \
>data1:/data3/gfs_data/vol01 \
>data1:/data4/gfs_data/vol01 \
>data2:/data3/gfs_data/vol01 \
>data2:/data4/gfs_data/vol01 \
>data3:/data3/gfs_data/vol01 \
>data3:/data4/gfs_data/vol01 force
volume create: vol01: success: please start the volume to access data
[root@data1 /]#
[root@data1 /]# gluster volume start vol01
volume start: vol01: success
[root@data1 /]#
[root@data1 opt]# gluster volume info
Volume Name: vol01
Type: Distributed-Replicate
Volume ID: c34302b9-bf08-467c-8118-019a53a2e321
Status: Started
Snapshot Count: 0
Number of Bricks: 2 x (2 + 1) = 6
Transport-type: tcp
Bricks:
Brick1: data1:/data3/gfs_data/vol01
Brick2: data1:/data4/gfs_data/vol01
Brick3: data2:/data3/gfs_data/vol01 (arbiter)
Brick4: data2:/data4/gfs_data/vol01
Brick5: data3:/data3/gfs_data/vol01
Brick6: data3:/data4/gfs_data/vol01 (arbiter)
Options Reconfigured:
transport.address-family: inet
storage.fips-mode-rchecksum: on
nfs.disable: on
performance.client-io-threads: off
[root@data1 /]#
[root@data1 vol01]# gluster volume status vol01
Status of volume: vol01
Gluster process TCP Port RDMA Port Online Pid
------------------------------------------------------------------------------
Brick data1:/data3/gfs_data/vol01 49152 0 Y 78025
Brick data1:/data4/gfs_data/vol01 49153 0 Y 78045
Brick data2:/data3/gfs_data/vol01 49152 0 Y 34425
Brick data2:/data4/gfs_data/vol01 49153 0 Y 34445
Brick data3:/data3/gfs_data/vol01 49152 0 Y 15601
Brick data3:/data4/gfs_data/vol01 49153 0 Y 15621
Self-heal Daemon on localhost N/A N/A Y 78066
Self-heal Daemon on data3 N/A N/A Y 15642
Self-heal Daemon on data2 N/A N/A Y 34466
Task Status of Volume vol01
------------------------------------------------------------------------------
There are no active volume tasks
如果出现如下错误:
volume create: vol01: failed: parent directory /data3/gfs_data is already part of a volume
说明之前在gfs_data路径挂载过卷,需要删除旧的配置文件
[root@data1 gfs_data]# ls -a
. .. .glusterfs
[root@data1 gfs_data]# rm -rf .glusterfs/
[root@data1 gfs_data]# setfattr -x trusted.glusterfs.volume-id /data3/gfs_data/
[root@data1 gfs_data]# setfattr -x trusted.gfid /data3/gfs_data/ # 如果旧的卷创建失败,这里会找不到文件
setfattr: /data3/gfs_data/: No such attribute
关闭时间同步
gluster volume set vol01 ctime off