有很多学习运维的朋友开始搞不懂glusterfs集群安装,那么下面的文章就让我们来学习一下
环境准备
系统
-
1 [root@VM_0_9_centos ~]# uname -a
-
2 Linux VM_0_9_centos 3.10.0-957.el7.x86_64 #1 SMP Thu Nov 8 23:39:32 UTC 2018 x86_64 x86_64 x86_64 GNU/Linux
-
3 [root@VM_0_9_centos ~]# cat /etc/redhat-release
-
4 CentOS Linux release 7.6.1810 (Core)
硬盘:一块50G硬盘
-
1 [root@slave-09 ~]# lsblk
-
2 NAME MAJ:MIN RM SIZE RO TYPE MOUNTPOINT
-
3 sr0 11:0 1 37.7M 0 rom
-
4 vda 253:0 0 50G 0 disk
-
5 └─vda1 253:1 0 50G 0 part /
-
6 vdb 253:16 0 50G 0 disk
-
7 ├─vdb1 253:17 0 45G 0 part
-
8 └─vdb2 253:18 0 5G 0 part
节点信息
ip |
主机名 |
172.17.0.9 |
slave-09 |
172.17.0.12 |
master-12 |
172.17.0.8 |
slave-08 |
格式化磁盘 采用xfs存储
-
1 mkfs.xfs -f -i size=512 /dev/vdb
-
2 mkdir -p /data/gcluster
-
3 echo '/dev/vdb /data/gcluster xfs defaults 1 2' >> /etc/fstab
-
4 mount -a && mount
-
5
-
6 [root@master-12 ~]# mkfs.xfs -f -i size=512 /dev/vdb
-
7 meta-data=/dev/vdb isize=512 agcount=4, agsize=3276800 blks
-
8 = sectsz=512 attr=2, projid32bit=1
-
9 = crc=1 finobt=0, sparse=0
-
10 data = bsize=4096 blocks=13107200, imaxpct=25
-
11 naming =version 2 bsize=4096 ascii-ci=0 ftype=1
-
12 log =internal log bsize=4096 blocks=6400, version=2
-
13 = sectsz=512 sunit=0 blks, lazy-count=1
-
14 realtime =none extsz=4096 blocks=0, rtextents=0
-
15
-
16
-
17 [root@slave-09 ~]# lsblk
-
18 NAME MAJ:MIN RM SIZE RO TYPE MOUNTPOINT
-
19 sr0 11:0 1 37.7M 0 rom
-
20 vda 253:0 0 50G 0 disk
-
21 └─vda1 253:1 0 50G 0 part /
-
22 vdb 253:16 0 50G 0 disk /data/gcluster
-
23
-
24
安装glusterFS
-
1 yum install -y centos-release-gluster
-
2 yum install -y glusterfs glusterfs-server glusterfs-fuse glusterfs-rdma
-
3
-
4 [root@slave-09 ~]# systemctl start glusterd.service
-
5 [root@slave-09 ~]# systemctl status glusterd.service
加入集群
-
1 [root@master-12 ~]# gluster peer probe master-12
-
2 peer probe: success. Probe on localhost not needed
-
3 [root@master-12 ~]#
-
4 [root@master-12 ~]# gluster peer probe slave-09
-
5 peer probe: success.
-
6 [root@master-12 ~]# gluster peer probe slave-08
查看集群状态
-
1 [root@master-12 ~]# gluster peer status
-
2 Number of Peers: 2
-
3
-
4
-
5 Hostname: slave-09
-
6 Uuid: 28815fe9-ce7f-4ed3-ae94-ef7d032b6854
-
7 State: Peer in Cluster (Connected)
-
8
-
9 Hostname: slave-08
-
10 Uuid: ef857dbe-7801-4f26-aa0e-95b8f98e1c64
-
11 State: Peer in Cluster (Connected)
安装gluster volume
在每个节点上创建volume目录
1- mkdir -p /data/gcluster/data
2- gluster volume create gv0 replica 3 master-12:/data/gcluster/data slave-09:/data/gcluster/data slave-08:/data/gcluster/data
3-[root@master-12 ~]# gluster volume create gv0 replica 3 master-12:/data/gcluster/data slave-09:/data/gcluster/data slave-08:/data/gcluster/data
4-volume create: gv0: success: please start the volume to access data
5-[root@master-12 ~]# gluster volume start gv0
6-volume start: gv0: success
1-[root@master-12 ~]# gluster volume info
2-
3-Volume Name: gv0
4-Type: Replicate
5-Volume ID: e5dcd35f-94af-4a6d-a8ad-feb1b5a4278d
6-Status: Started
7-Snapshot Count: 0
8-Number of Bricks: 1 x 3 = 3
9-Transport-type: tcp
10-Bricks:
11-Brick1: master-12:/data/gcluster/data
12-Brick2: slave-09:/data/gcluster/data
13-Brick3: slave-08:/data/gcluster/data
14-Options Reconfigured:
15-transport.address-family: inet
16-nfs.disable: on
17-performance.client-io-threads: off
测试
-
1 mount -t glusterfs master-12:/gv0 /mnt
-
1 for i in `seq -w 1 100`; do cp -rp /var/log/messages /mnt/copy-test-$i; done
在每个服务器上都会有100个文件