系统 | 主机 | ip | 角色 | 存储 | 配置 |
---|---|---|---|---|---|
Centos7.7 | node1 | 192.168.150.61 | 存储节点 | 两块硬盘分别50G,10G | 联网状态,拥有所有主机的hosts记录,关闭防火墙selinux |
Centos7.7 | node2 | 192.168.150.62 | 存储节点 | 两块硬盘分别50G,10G | 联网状态,拥有所有主机的hosts记录,关闭防火墙selinux |
Centos7.7 | node3 | 192.168.150.63 | 存储节点 | 两块硬盘分别50G,10G | 联网状态,拥有所有主机的hosts记录,关闭防火墙selinux |
Redhat7.7 | computer | 192.168.150.11 | 存储客户端 | 一块硬盘50G | 联网状态,拥有所有主机的hosts记录,关闭防火墙selinux |
#下载网易的Centos7的yum源
[root@node1 yum.repos.d]# wget -O CentOS-Base.repo http://mirrors.163.com/.help/CentOS7-Base-163.repo
#安装Glusterfs源
[root@node1 yum.repos.d]# yum install centos-release-gluster -y
#复制到其它存储节点中
[root@node1 yum.repos.d]#scp * node2:/etc/yum.repos.d/
[root@node1 yum.repos.d]#scp * node3:/etc/yum.repos.d/
#把glusterfs源复制到computer节点
[root@node1 yum.repos.d]#scp CentOS-Gluster-6.repo CentOS-Storage-common.repo computer:/etc/yum.repos.d/
#取消软件包检查
[root@node1 /]# sed -i 's#gpgcheck=1#gpgcheck=0#g' /etc/yum.repos.d/*
[root@node2 /]# sed -i 's#gpgcheck=1#gpgcheck=0#g' /etc/yum.repos.d/*
[root@node3 /]# sed -i 's#gpgcheck=1#gpgcheck=0#g' /etc/yum.repos.d/*
[root@computer /]# sed -i 's#gpgcheck=1#gpgcheck=0#g' /etc/yum.repos.d/*
#安装gluster服务
[root@node1 yum.repos.d]# yum install glusterfs-server -y
[root@node2 yum.repos.d]# yum install glusterfs-server -y
[root@node3 yum.repos.d]# yum install glusterfs-server -y
[root@node1 /]#systemctl start glusterd && systemctl enable glusterd
[root@node2 /]#systemctl start glusterd && systemctl enable glusterd
[root@node3 /]#systemctl start glusterd && systemctl enable glusterd
#因为是rhel系统,把$releasever改成7
[root@computer yum.repos.d]# sed -i 's#\$releasever#7#g' /etc/yum.repos.d/*.repo
#安装客户端
[root@computer yum.repos.d]# yum install -y glusterfs-fuse
#客户端
[root@computer yum.repos.d]# rpm -qa|grep glu
glusterfs-cli-6.6-1.el7.x86_64
glusterfs-6.6-1.el7.x86_64
glusterfs-fuse-6.6-1.el7.x86_64
libvirt-daemon-driver-storage-gluster-4.5.0-23.el7.x86_64
glusterfs-libs-6.6-1.el7.x86_64
glusterfs-api-6.6-1.el7.x86_64
glusterfs-client-xlators-6.6-1.el7.x86_64
#服务端
[root@node1 ~]# rpm -qa|grep glu
glusterfs-client-xlators-6.6-1.el7.x86_64
glusterfs-api-6.6-1.el7.x86_64
glusterfs-fuse-6.6-1.el7.x86_64
glusterfs-server-6.6-1.el7.x86_64
glusterfs-6.6-1.el7.x86_64
glusterfs-cli-6.6-1.el7.x86_64
glusterfs-libs-6.6-1.el7.x86_64
#如果失败请检查防火墙和selinux
[root@node1 /]# gluster peer probe node2
peer probe: success.
[root@node1 /]# gluster peer probe node3
peer probe: success.
#创建物理卷和卷组
[root@node1 /]# pvcreate /dev/sdb && vgcreate vg_gluster /dev/sdb
Physical volume "/dev/sdb" successfully created
Volume group "vg_gluster" successfully created
#创建逻辑卷池
[root@node1 /]# lvcreate -l 2500 -T vg_gluster/gluster_pool1
Logical volume "lvol0" created
Logical volume "gluster_pool1" created
#创建一个逻辑卷
[root@node1 /]# lvcreate -n dist_brick1 -V 3G -T vg_gluster/gluster_pool1
Logical volume "dist_brick1" created
#格式化dist_brick1逻辑卷
[root@node1 /]# mkfs.xfs -i size=512 /dev/vg_gluster/dist_brick1
#创建目录
[root@node1 /]# mkdir -p /mygluster/dist_brick1
#挂载上去
[root@node1 /]# mount /dev/vg_gluster/dist_brick1 /mygluster/dist_brick1/
#在目录下创建一个brick文件夹,待会共享出去用的
[root@node1 /]# mkdir /mygluster/dist_brick1/brick
[root@node2 ~]# pvcreate /dev/sdb ; vgcreate vg_gluster /dev/sdb
Physical volume "/dev/sdb" successfully created
Volume group "vg_gluster" successfully created
[root@node2 ~]# lvcreate -l 2500 -T vg_gluster/gluster_pool2
Logical volume "lvol0" created
Logical volume "gluster_pool2" created
[root@node2 ~]# lvcreate -n dist_brick2 -V 3G -T vg_gluster/gluster_pool2
Logical volume "dist_brick2" created
[root@node2 ~]# mkfs.xfs -i size=512 /dev/vg_gluster/dist_brick2
[root@node2 ~]# mkdir -p /mygluster/dist_brick2
[root@node2 ~]# mount /dev/vg_gluster/dist_brick2 /mygluster/dist_brick2
[root@node2 ~]# mkdir /mygluster/dist_brick2/brick
[root@node1 /]# gluster volume create distvol node1:/mygluster/dist_brick1/brick/
volume create: distvol: success: please start the volume to access data
[root@node1 /]# gluster volume start distvol
volume start: distvol: success
[root@node1 /]# gluster volume info
Volume Name: distvol
Type: Distribute
Volume ID: ac914d3a-9619-4694-9a96-2c6d14adf364
Status: Started
Snapshot Count: 0
Number of Bricks: 1
Transport-type: tcp
Bricks:
Brick1: node1:/mygluster/dist_brick1/brick
Options Reconfigured:
transport.address-family: inet
nfs.disable: on
#在node2中往distvol存储池中添加存储
[root@node2 dist_brick2]# gluster volume add-brick distvol node2:/mygluster/dist_brick2/brick/
#再次查看发现多了一个
[root@node2 dist_brick2]# gluster volume info
Volume Name: distvol
Type: Distribute
Volume ID: ac914d3a-9619-4694-9a96-2c6d14adf364
Status: Started
Snapshot Count: 0
Number of Bricks: 2
Transport-type: tcp
Bricks:
Brick1: node1:/mygluster/dist_brick1/brick
Brick2: node2:/mygluster/dist_brick2/brick
Options Reconfigured:
transport.address-family: inet
nfs.disable: on
[root@computer yum.repos.d]# mount.glusterfs node1:/distvol /mnt/
[root@computer yum.repos.d]# df -hT
文件系统 类型 容量 已用 可用 已用% 挂载点
devtmpfs devtmpfs 894M 0 894M 0% /dev
tmpfs tmpfs 910M 0 910M 0% /dev/shm
tmpfs tmpfs 910M 11M 900M 2% /run
tmpfs tmpfs 910M 0 910M 0% /sys/fs/cgroup
/dev/mapper/rhel-root xfs 47G 4.2G 43G 9% /
/dev/sda1 xfs 1014M 182M 833M 18% /boot
tmpfs tmpfs 182M 12K 182M 1% /run/user/42
tmpfs tmpfs 182M 0 182M 0% /run/user/0
node1:/distvol fuse.glusterfs 6.0G 127M 5.9G 3% /mnt
[root@node1 /]# yum install -y nfs-ganesha
[root@node1 /]# yum install -y nfs-utils
[root@node1 ganesha]# gluster volume info distvol
Volume Name: distvol
Type: Distribute
Volume ID: ac914d3a-9619-4694-9a96-2c6d14adf364
Status: Started
Snapshot Count: 0
Number of Bricks: 2
Transport-type: tcp
Bricks:
Brick1: node1:/mygluster/dist_brick1/brick
Brick2: node2:/mygluster/dist_brick2/brick
Options Reconfigured:
#此时NFS功能是关闭状态
nfs.disable: on
[root@node1 ganesha]# gluster volume set distvol nfs.disable off
Gluster NFS is being deprecated in favor of NFS-Ganesha Enter "yes" to continue using Gluster NFS (y/n) y
[root@node1 ganesha]# gluster volume info distvol
Volume Name: distvol
Type: Distribute
Volume ID: ac914d3a-9619-4694-9a96-2c6d14adf364
Status: Started
Snapshot Count: 0
Number of Bricks: 2
Transport-type: tcp
Bricks:
Brick1: node1:/mygluster/dist_brick1/brick
Brick2: node2:/mygluster/dist_brick2/brick
Options Reconfigured:
nfs.disable: off
[root@node1 /]# cp /usr/share/doc/ganesha/config_samples/gluster.conf /etc/ganesha/
[root@node1 /]# vim /etc/ganesha/gluster.conf
EXPORT
{
# Export Id (mandatory, each EXPORT must have a unique Export_Id)
Export_Id = 77;
#自己gluster定义的共享目录
Path = "/distvol";
# Pseudo Path (required for NFS v4)
Pseudo = "/distvol";
# Required for access (default is None)
# Could use CLIENT blocks instead
Access_Type = RW;
# Allow root access
Squash = No_Root_Squash;
# Security flavor supported
SecType = "sys";
# Exporting FSAL
FSAL {
#共享名称
Name = "gluster";
#允许所有主机
Hostname = 0.0.0.0;
Volume = "distvol";
Up_poll_usec = 10; # Upcall poll interval in microseconds
Transport = tcp; # tcp or rdma
}
}
[root@node1 /]# mv /etc/ganesha/gluster.conf /etc/ganesha/ganesha.conf
[root@node1 /]# systemctl restart nfs-ganesha
[root@node1 /]# systemctl restart nfs-ganesha-config.service
[root@node1 /]# systemctl restart rpcbind
[root@node1 ganesha]# showmount -e node1
Export list for node1:
/distvol (everyone)
[root@node1 ganesha]# mount.nfs node1:/distvol /mnt/
[root@node1 ganesha]# df -hT|grep mnt
node1:/distvol nfs4 6.0G 126M 5.9G 3% /mnt
#检查NFS客户端的版本,是不是和服务端一致
[root@computer ~]# rpm -qa|grep nfs-utils
nfs-utils-1.3.0-0.65.el7.x86_64
[root@computer ~]# mount.nfs node1:/distvol /mnt/
[root@computer ~]# df -hT|grep mnt
node1:/distvol nfs4 6.0G 126M 5.9G 3% /mnt
[root@controller cinder]# vim /etc/cinder/cinder.conf
#后端加上一个自己定义的nas
enabled_backends=lvm,nas
#在文件最后加上nas的配置项
[nas]
volume_group=cinder-volumes #卷组,这个不能掉!!!
volume_driver=cinder.volume.drivers.nfs.NfsDriver #驱动
nfs_shares_config=/etc/cinder/nfs_shares.conf #共享配置
volume_backend_name=nfs #后端存储名
nfs_sparsed_volumes=True
[root@controller cinder]# vim /etc/cinder/nfs_shares.conf
node1:/distvol
[root@controller ~(keystone_admin)]# cinder type-create nas
[root@controller ~(keystone_admin)]# cinder type-key nas set volume_backend_name=nfs
[root@controller ~(keystone_admin)]# cinder extra-specs-list
+--------------------------------------+-------+---------------------------------------------+
| ID | Name | extra_specs |
+--------------------------------------+-------+---------------------------------------------+
| 4fd547b2-3645-4a99-b494-454c58992708 | iscsi | {'volume_backend_name': 'lvm'} |
| 7c69bd74-b60e-488a-b7d2-4d3953b8cca7 | nas | {'set': None, 'volume_backend_name': 'nfs'} |
+--------------------------------------+-------+---------------------------------------------+
[root@controller cinder]# systemctl restart openstack-cinder-volume.service
[root@controller cinder]# systemctl restart openstack-cinder-api.service
[root@controller ~(keystone_admin)]# cinder service-list
+------------------+-----------------+------+---------+-------+----------------------------+-----------------+
| Binary | Host | Zone | Status | State | Updated_at | Disabled Reason |
+------------------+-----------------+------+---------+-------+----------------------------+-----------------+
| cinder-backup | controller | nova | enabled | up | 2019-12-15T09:23:10.000000 | - |
| cinder-scheduler | controller | nova | enabled | up | 2019-12-15T09:23:10.000000 | - |
| cinder-volume | controller@lvm | nova | enabled | up | 2019-12-15T09:23:09.000000 | - |
| cinder-volume | controller@nas | nova | enabled | up | 2019-12-15T09:23:08.000000 | - |
+------------------+-----------------+------+---------+-------+----------------------------+-----------------+
#如果是出现了这种 未知的错误 521是gluster储存节点的问题,重启服务再开就好了
[root@controller mnt]# df
df: "/var/lib/cinder/mnt/8295b8103a107e8dd0526c7e0c8f6859": 未知的错误 521
文件系统 1K-块 已用 可用 已用% 挂载点
devtmpfs 2945828 0 2945828 0% /dev
tmpfs 2962860 4 2962856 1% /dev/shm
tmpfs 2962860 123632 2839228 5% /run
tmpfs 2962860 0 2962860 0% /sys/fs/cgroup
/dev/mapper/rhel-root 52403200 6836376 45566824 14% /
/dev/mapper/rhel-home 45189572 33100 45156472 1% /home
/dev/sda1 1038336 230252 808084 23% /boot
tmpfs 592572 12 592560 1% /run/user/42
tmpfs 592572 0 592572 0% /run/user/0
/dev/loop1 1900368 610852 1168276 35% /srv/node/swiftloopback
192.168.150.61:/distvol 6270976 129024 6141952 3% /var/lib/cinder/mnt/d026d44a6c05b310f43a314539126d90
[root@controller ~(keystone_admin)]# cinder create 1 --volume-type nas --name WEB02
+--------------------------------+--------------------------------------+
| Property | Value |
+--------------------------------+--------------------------------------+
| attachments | [] |
| availability_zone | nova |
| bootable | false |
| consistencygroup_id | None |
| created_at | 2019-12-15T12:05:40.000000 |
| description | None |
| encrypted | False |
| id | 493f382b-9ce6-4fd3-9d1d-5a296e649627 |
| metadata | {} |
| migration_status | None |
| multiattach | False |
| name | WEB02 |
| os-vol-host-attr:host | None |
| os-vol-mig-status-attr:migstat | None |
| os-vol-mig-status-attr:name_id | None |
| os-vol-tenant-attr:tenant_id | 0fe8e6b7060342ca9c1f06f84aa24628 |
| replication_status | None |
| size | 1 |
| snapshot_id | None |
| source_volid | None |
| status | creating |
| updated_at | None |
| user_id | 22394636c4034eaf9c7060a82372a657 |
| volume_type | nas |
+--------------------------------+--------------------------------------+
[root@controller ~(keystone_admin)]# cinder list
+--------------------------------------+-----------+-------+------+-------------+----------+-------------+
| ID | Status | Name | Size | Volume Type | Bootable | Attached to |
+--------------------------------------+-----------+-------+------+-------------+----------+-------------+
| 493f382b-9ce6-4fd3-9d1d-5a296e649627 | available | WEB02 | 1 | nas | false | |
| d8cde449-488c-415c-92be-1a5334bee12e | available | WEB01 | 1 | iscsi | false | |
+--------------------------------------+-----------+-------+------+-------------+----------+-------------+
[root@controller /(keystone_admin)]# tailf /var/log/cinder/volume.log
#从后端储存中创建卷
2019-12-15 20:05:42.661 108461 INFO cinder.volume.drivers.remotefs [req-49ca016d-dcea-4586-ad3d-f1a84a9302e3 22394636c4034eaf9c7060a82372a657 0fe8e6b7060342ca9c1f06f84aa24628 - default default] casted to 192.168.150.61:/distvol
2019-12-15 20:05:43.332 108461 WARNING cinder.volume.drivers.remotefs [req-49ca016d-dcea-4586-ad3d-f1a84a9302e3 22394636c4034eaf9c7060a82372a657 0fe8e6b7060342ca9c1f06f84aa24628 - default default] /var/lib/cinder/mnt/d026d44a6c05b310f43a314539126d90/volume-493f382b-9ce6-4fd3-9d1d-5a296e649627 is being set with open permissions: ugo+rw
2019-12-15 20:05:44.062 108461 INFO cinder.volume.flows.manager.create_volume [req-49ca016d-dcea-4586-ad3d-f1a84a9302e3 22394636c4034eaf9c7060a82372a657 0fe8e6b7060342ca9c1f06f84aa24628 - default default] Volume volume-493f382b-9ce6-4fd3-9d1d-5a296e649627 (493f382b-9ce6-4fd3-9d1d-5a296e649627): created successfully
#只要是上面出现successfully表示成功
#关于创建时错误的原因都在这个日志里
[root@controller ~]# tailf /var/log/cinder/scheduler.log
原创声明:本文系作者授权腾讯云开发者社区发表,未经许可,不得转载。
如有侵权,请联系 cloudcommunity@tencent.com 删除。
原创声明:本文系作者授权腾讯云开发者社区发表,未经许可,不得转载。
如有侵权,请联系 cloudcommunity@tencent.com 删除。