专栏首页小朋友学云计算07-Cinder对接GlusterFs
原创

07-Cinder对接GlusterFs

分布式存储

环境

系统

主机

ip

角色

存储

配置

Centos7.7

node1

192.168.150.61

存储节点

两块硬盘分别50G,10G

联网状态,拥有所有主机的hosts记录,关闭防火墙selinux

Centos7.7

node2

192.168.150.62

存储节点

两块硬盘分别50G,10G

联网状态,拥有所有主机的hosts记录,关闭防火墙selinux

Centos7.7

node3

192.168.150.63

存储节点

两块硬盘分别50G,10G

联网状态,拥有所有主机的hosts记录,关闭防火墙selinux

Redhat7.7

computer

192.168.150.11

存储客户端

一块硬盘50G

联网状态,拥有所有主机的hosts记录,关闭防火墙selinux

实验

1.下载glusterfs源

#下载网易的Centos7的yum源
[root@node1 yum.repos.d]# wget -O CentOS-Base.repo http://mirrors.163.com/.help/CentOS7-Base-163.repo

#安装Glusterfs源
[root@node1 yum.repos.d]# yum install centos-release-gluster -y

#复制到其它存储节点中
[root@node1 yum.repos.d]#scp * node2:/etc/yum.repos.d/
[root@node1 yum.repos.d]#scp * node3:/etc/yum.repos.d/

#把glusterfs源复制到computer节点
[root@node1 yum.repos.d]#scp CentOS-Gluster-6.repo CentOS-Storage-common.repo computer:/etc/yum.repos.d/

#取消软件包检查
[root@node1 /]# sed -i 's#gpgcheck=1#gpgcheck=0#g' /etc/yum.repos.d/*
[root@node2 /]# sed -i 's#gpgcheck=1#gpgcheck=0#g' /etc/yum.repos.d/*
[root@node3 /]# sed -i 's#gpgcheck=1#gpgcheck=0#g' /etc/yum.repos.d/*
[root@computer /]# sed -i 's#gpgcheck=1#gpgcheck=0#g' /etc/yum.repos.d/*

2.存储节点,安装glusterfs服务

#安装gluster服务
[root@node1 yum.repos.d]# yum install glusterfs-server -y
[root@node2 yum.repos.d]# yum install glusterfs-server -y
[root@node3 yum.repos.d]# yum install glusterfs-server -y

3.启动服务,并设置开机自启

[root@node1 /]#systemctl start glusterd && systemctl enable glusterd
[root@node2 /]#systemctl start glusterd && systemctl enable glusterd
[root@node3 /]#systemctl start glusterd && systemctl enable glusterd

4.计算节点安装gluster客户端

#因为是rhel系统,把$releasever改成7
[root@computer yum.repos.d]# sed -i 's#\$releasever#7#g' /etc/yum.repos.d/*.repo

#安装客户端
[root@computer yum.repos.d]# yum install -y glusterfs-fuse 

5.检查版本

#客户端
[root@computer yum.repos.d]# rpm -qa|grep glu
glusterfs-cli-6.6-1.el7.x86_64
glusterfs-6.6-1.el7.x86_64
glusterfs-fuse-6.6-1.el7.x86_64
libvirt-daemon-driver-storage-gluster-4.5.0-23.el7.x86_64
glusterfs-libs-6.6-1.el7.x86_64
glusterfs-api-6.6-1.el7.x86_64
glusterfs-client-xlators-6.6-1.el7.x86_64

#服务端
[root@node1 ~]# rpm -qa|grep glu
glusterfs-client-xlators-6.6-1.el7.x86_64
glusterfs-api-6.6-1.el7.x86_64
glusterfs-fuse-6.6-1.el7.x86_64
glusterfs-server-6.6-1.el7.x86_64
glusterfs-6.6-1.el7.x86_64
glusterfs-cli-6.6-1.el7.x86_64
glusterfs-libs-6.6-1.el7.x86_64

6.添加其它存储主机

#如果失败请检查防火墙和selinux
[root@node1 /]# gluster peer probe node2
peer probe: success.
[root@node1 /]# gluster peer probe node3
peer probe: success.

7.创建卷

  • 在node1上创建逻辑卷池,并从逻辑卷池里面创建逻辑卷
#创建物理卷和卷组
[root@node1 /]# pvcreate /dev/sdb &&  vgcreate vg_gluster /dev/sdb
  Physical volume "/dev/sdb" successfully created
  Volume group "vg_gluster" successfully created

#创建逻辑卷池
[root@node1 /]# lvcreate -l 2500 -T vg_gluster/gluster_pool1
  Logical volume "lvol0" created
  Logical volume "gluster_pool1" created

#创建一个逻辑卷
[root@node1 /]# lvcreate  -n dist_brick1 -V 3G -T vg_gluster/gluster_pool1
  Logical volume "dist_brick1" created
  
#格式化dist_brick1逻辑卷
[root@node1 /]# mkfs.xfs -i size=512 /dev/vg_gluster/dist_brick1 

#创建目录
[root@node1 /]# mkdir -p /mygluster/dist_brick1

#挂载上去
[root@node1 /]# mount /dev/vg_gluster/dist_brick1 /mygluster/dist_brick1/

#在目录下创建一个brick文件夹,待会共享出去用的
[root@node1 /]# mkdir /mygluster/dist_brick1/brick
  • 在node2节点同样的操作
[root@node2 ~]# pvcreate /dev/sdb ; vgcreate vg_gluster /dev/sdb
  Physical volume "/dev/sdb" successfully created
  Volume group "vg_gluster" successfully created
  
[root@node2 ~]# lvcreate -l 2500 -T vg_gluster/gluster_pool2
  Logical volume "lvol0" created
  Logical volume "gluster_pool2" created
  
[root@node2 ~]# lvcreate -n dist_brick2  -V 3G -T vg_gluster/gluster_pool2
  Logical volume "dist_brick2" created
  
[root@node2 ~]# mkfs.xfs -i size=512 /dev/vg_gluster/dist_brick2 
[root@node2 ~]# mkdir -p /mygluster/dist_brick2
[root@node2 ~]# mount /dev/vg_gluster/dist_brick2 /mygluster/dist_brick2
[root@node2 ~]# mkdir /mygluster/dist_brick2/brick

8.创建共享

[root@node1 /]# gluster volume create distvol node1:/mygluster/dist_brick1/brick/
volume create: distvol: success: please start the volume to access data

[root@node1 /]# gluster volume start distvol
volume start: distvol: success

[root@node1 /]# gluster volume info
Volume Name: distvol
Type: Distribute
Volume ID: ac914d3a-9619-4694-9a96-2c6d14adf364
Status: Started
Snapshot Count: 0
Number of Bricks: 1
Transport-type: tcp
Bricks:
Brick1: node1:/mygluster/dist_brick1/brick
Options Reconfigured:
transport.address-family: inet
nfs.disable: on

9.添加共享

#在node2中往distvol存储池中添加存储
[root@node2 dist_brick2]# gluster volume add-brick distvol node2:/mygluster/dist_brick2/brick/

#再次查看发现多了一个
[root@node2 dist_brick2]# gluster volume info
Volume Name: distvol
Type: Distribute
Volume ID: ac914d3a-9619-4694-9a96-2c6d14adf364
Status: Started
Snapshot Count: 0
Number of Bricks: 2
Transport-type: tcp
Bricks:
Brick1: node1:/mygluster/dist_brick1/brick
Brick2: node2:/mygluster/dist_brick2/brick
Options Reconfigured:
transport.address-family: inet
nfs.disable: on

10.客户端挂载测试

  • 挂载的时候注意hosts文件是否有服务节点的记录
[root@computer yum.repos.d]# mount.glusterfs node1:/distvol /mnt/
[root@computer yum.repos.d]# df -hT
文件系统              类型            容量  已用  可用 已用% 挂载点
devtmpfs              devtmpfs        894M     0  894M    0% /dev
tmpfs                 tmpfs           910M     0  910M    0% /dev/shm
tmpfs                 tmpfs           910M   11M  900M    2% /run
tmpfs                 tmpfs           910M     0  910M    0% /sys/fs/cgroup
/dev/mapper/rhel-root xfs              47G  4.2G   43G    9% /
/dev/sda1             xfs            1014M  182M  833M   18% /boot
tmpfs                 tmpfs           182M   12K  182M    1% /run/user/42
tmpfs                 tmpfs           182M     0  182M    0% /run/user/0
node1:/distvol        fuse.glusterfs  6.0G  127M  5.9G    3% /mnt

Cinder使用外部存储

NFS方式

1.安装NFS组件

  • 在储存节点上使用gluster的源安装NFS组件
[root@node1 /]# yum install -y nfs-ganesha
[root@node1 /]# yum install -y  nfs-utils

2.开户NFS功能

  • 查看共享卷信息
[root@node1 ganesha]# gluster volume info distvol

Volume Name: distvol
Type: Distribute
Volume ID: ac914d3a-9619-4694-9a96-2c6d14adf364
Status: Started
Snapshot Count: 0
Number of Bricks: 2
Transport-type: tcp
Bricks:
Brick1: node1:/mygluster/dist_brick1/brick
Brick2: node2:/mygluster/dist_brick2/brick
Options Reconfigured:
#此时NFS功能是关闭状态
nfs.disable: on
  • 开启nfs
[root@node1 ganesha]# gluster volume set distvol nfs.disable off
Gluster NFS is being deprecated in favor of NFS-Ganesha Enter "yes" to continue using Gluster NFS (y/n) y
  • 再次查看
[root@node1 ganesha]# gluster volume info distvol 
 
Volume Name: distvol
Type: Distribute
Volume ID: ac914d3a-9619-4694-9a96-2c6d14adf364
Status: Started
Snapshot Count: 0
Number of Bricks: 2
Transport-type: tcp
Bricks:
Brick1: node1:/mygluster/dist_brick1/brick
Brick2: node2:/mygluster/dist_brick2/brick
Options Reconfigured:
nfs.disable: off

3.配置NFS共享

1)复制gluster的模板文件

[root@node1 /]# cp /usr/share/doc/ganesha/config_samples/gluster.conf /etc/ganesha/

2)编辑它的配置文件

[root@node1 /]# vim /etc/ganesha/gluster.conf
EXPORT
{
	# Export Id (mandatory, each EXPORT must have a unique Export_Id)
	Export_Id = 77;

	#自己gluster定义的共享目录
	Path = "/distvol";

	# Pseudo Path (required for NFS v4)
	Pseudo = "/distvol";

	# Required for access (default is None)
	# Could use CLIENT blocks instead
	Access_Type = RW;

	# Allow root access
	Squash = No_Root_Squash;

	# Security flavor supported
	SecType = "sys";

	# Exporting FSAL
	FSAL {
		#共享名称
		Name = "gluster";
		#允许所有主机
		Hostname = 0.0.0.0;
		Volume = "distvol";
                Up_poll_usec = 10; # Upcall poll interval in microseconds
                Transport = tcp; # tcp or rdma
	}
}

3)改成主配置文件的名称

[root@node1 /]# mv /etc/ganesha/gluster.conf /etc/ganesha/ganesha.conf  

4)重启服务

[root@node1 /]# systemctl restart nfs-ganesha
[root@node1 /]# systemctl restart nfs-ganesha-config.service
[root@node1 /]# systemctl restart rpcbind

5)查看共享

[root@node1 ganesha]# showmount -e node1
Export list for node1:
/distvol (everyone)

4.挂载NFS共享

  • 服务端挂载测试
[root@node1 ganesha]# mount.nfs node1:/distvol /mnt/
[root@node1 ganesha]# df -hT|grep mnt
node1:/distvol                     nfs4      6.0G  126M  5.9G    3% /mnt
  • 计算节点挂载测试
#检查NFS客户端的版本,是不是和服务端一致
[root@computer ~]# rpm -qa|grep nfs-utils
nfs-utils-1.3.0-0.65.el7.x86_64

[root@computer ~]# mount.nfs node1:/distvol /mnt/
[root@computer ~]# df -hT|grep mnt
node1:/distvol        nfs4      6.0G  126M  5.9G    3% /mnt

5.配置Cinder对接NFS

1)编辑cinder配置文件

[root@controller cinder]# vim /etc/cinder/cinder.conf
#后端加上一个自己定义的nas
enabled_backends=lvm,nas

#在文件最后加上nas的配置项
[nas]
volume_group=cinder-volumes							#卷组,这个不能掉!!!
volume_driver=cinder.volume.drivers.nfs.NfsDriver	#驱动
nfs_shares_config=/etc/cinder/nfs_shares.conf		#共享配置
volume_backend_name=nfs								#后端存储名
nfs_sparsed_volumes=True

2)编辑NFS共享配置文件

[root@controller cinder]# vim /etc/cinder/nfs_shares.conf 
node1:/distvol

3)创建并关联新储存类型

  • 创建并关联
[root@controller ~(keystone_admin)]# cinder type-create nas
[root@controller ~(keystone_admin)]# cinder type-key nas set volume_backend_name=nfs
  • 查看结果
[root@controller ~(keystone_admin)]# cinder extra-specs-list 
+--------------------------------------+-------+---------------------------------------------+
| ID                                   | Name  | extra_specs                                 |
+--------------------------------------+-------+---------------------------------------------+
| 4fd547b2-3645-4a99-b494-454c58992708 | iscsi | {'volume_backend_name': 'lvm'}              |
| 7c69bd74-b60e-488a-b7d2-4d3953b8cca7 | nas   | {'set': None, 'volume_backend_name': 'nfs'} |
+--------------------------------------+-------+---------------------------------------------+

4)重启服务

[root@controller cinder]# systemctl restart openstack-cinder-volume.service 
[root@controller cinder]# systemctl restart openstack-cinder-api.service 

5)查看储存配置

[root@controller ~(keystone_admin)]# cinder service-list 
+------------------+-----------------+------+---------+-------+----------------------------+-----------------+
| Binary           | Host            | Zone | Status  | State | Updated_at                 | Disabled Reason |
+------------------+-----------------+------+---------+-------+----------------------------+-----------------+
| cinder-backup    | controller      | nova | enabled | up    | 2019-12-15T09:23:10.000000 | -               |
| cinder-scheduler | controller      | nova | enabled | up    | 2019-12-15T09:23:10.000000 | -               |
| cinder-volume    | controller@lvm  | nova | enabled | up    | 2019-12-15T09:23:09.000000 | -               |
| cinder-volume    | controller@nas  | nova | enabled | up    | 2019-12-15T09:23:08.000000 | -               |
+------------------+-----------------+------+---------+-------+----------------------------+-----------------+

6)查看Cinder节点挂载情况

#如果是出现了这种 未知的错误 521是gluster储存节点的问题,重启服务再开就好了
[root@controller mnt]# df
df: "/var/lib/cinder/mnt/8295b8103a107e8dd0526c7e0c8f6859": 未知的错误 521
文件系统                   1K-块    已用     可用 已用% 挂载点
devtmpfs                 2945828       0  2945828    0% /dev
tmpfs                    2962860       4  2962856    1% /dev/shm
tmpfs                    2962860  123632  2839228    5% /run
tmpfs                    2962860       0  2962860    0% /sys/fs/cgroup
/dev/mapper/rhel-root   52403200 6836376 45566824   14% /
/dev/mapper/rhel-home   45189572   33100 45156472    1% /home
/dev/sda1                1038336  230252   808084   23% /boot
tmpfs                     592572      12   592560    1% /run/user/42
tmpfs                     592572       0   592572    0% /run/user/0
/dev/loop1               1900368  610852  1168276   35% /srv/node/swiftloopback
192.168.150.61:/distvol  6270976  129024  6141952    3% /var/lib/cinder/mnt/d026d44a6c05b310f43a314539126d90

6.创建云硬盘

  • 指定类型为nas,大小1G,名称WEB02
[root@controller ~(keystone_admin)]# cinder create 1 --volume-type nas --name WEB02
+--------------------------------+--------------------------------------+
| Property                       | Value                                |
+--------------------------------+--------------------------------------+
| attachments                    | []                                   |
| availability_zone              | nova                                 |
| bootable                       | false                                |
| consistencygroup_id            | None                                 |
| created_at                     | 2019-12-15T12:05:40.000000           |
| description                    | None                                 |
| encrypted                      | False                                |
| id                             | 493f382b-9ce6-4fd3-9d1d-5a296e649627 |
| metadata                       | {}                                   |
| migration_status               | None                                 |
| multiattach                    | False                                |
| name                           | WEB02                                |
| os-vol-host-attr:host          | None                                 |
| os-vol-mig-status-attr:migstat | None                                 |
| os-vol-mig-status-attr:name_id | None                                 |
| os-vol-tenant-attr:tenant_id   | 0fe8e6b7060342ca9c1f06f84aa24628     |
| replication_status             | None                                 |
| size                           | 1                                    |
| snapshot_id                    | None                                 |
| source_volid                   | None                                 |
| status                         | creating                             |
| updated_at                     | None                                 |
| user_id                        | 22394636c4034eaf9c7060a82372a657     |
| volume_type                    | nas                                  |
+--------------------------------+--------------------------------------+
  • 查看创建状态
[root@controller ~(keystone_admin)]# cinder list
+--------------------------------------+-----------+-------+------+-------------+----------+-------------+
| ID                                   | Status    | Name  | Size | Volume Type | Bootable | Attached to |
+--------------------------------------+-----------+-------+------+-------------+----------+-------------+
| 493f382b-9ce6-4fd3-9d1d-5a296e649627 | available | WEB02 | 1    | nas         | false    |             |
| d8cde449-488c-415c-92be-1a5334bee12e | available | WEB01 | 1    | iscsi       | false    |             |
+--------------------------------------+-----------+-------+------+-------------+----------+-------------+
  • 查看日志
[root@controller /(keystone_admin)]# tailf /var/log/cinder/volume.log
#从后端储存中创建卷
2019-12-15 20:05:42.661 108461 INFO cinder.volume.drivers.remotefs [req-49ca016d-dcea-4586-ad3d-f1a84a9302e3 22394636c4034eaf9c7060a82372a657 0fe8e6b7060342ca9c1f06f84aa24628 - default default] casted to 192.168.150.61:/distvol
2019-12-15 20:05:43.332 108461 WARNING cinder.volume.drivers.remotefs [req-49ca016d-dcea-4586-ad3d-f1a84a9302e3 22394636c4034eaf9c7060a82372a657 0fe8e6b7060342ca9c1f06f84aa24628 - default default] /var/lib/cinder/mnt/d026d44a6c05b310f43a314539126d90/volume-493f382b-9ce6-4fd3-9d1d-5a296e649627 is being set with open permissions: ugo+rw

2019-12-15 20:05:44.062 108461 INFO cinder.volume.flows.manager.create_volume [req-49ca016d-dcea-4586-ad3d-f1a84a9302e3 22394636c4034eaf9c7060a82372a657 0fe8e6b7060342ca9c1f06f84aa24628 - default default] Volume volume-493f382b-9ce6-4fd3-9d1d-5a296e649627 (493f382b-9ce6-4fd3-9d1d-5a296e649627): created successfully
#只要是上面出现successfully表示成功
  • 查看调度日志
#关于创建时错误的原因都在这个日志里
[root@controller ~]# tailf /var/log/cinder/scheduler.log

原创声明,本文系作者授权云+社区发表,未经许可,不得转载。

如有侵权,请联系 yunjia_community@tencent.com 删除。

我来说两句

0 条评论
登录 后参与评论

相关文章

  • 04-OpenStack的命令行管理

    小朋友呢
  • 14-OpenStack中的Vlan网络

    小朋友呢
  • 02-OpenStack部署

    root@ntp /# vim /etc/yum.repos.d/rhel7.repo

    小朋友呢
  • 分布式监控-open-falcon

    操作系统:centos7(minimal,www.centos.org下载的包是CentOS-7-x86_64-Minimal-1611.iso)

    yaohong
  • Golang Leetcode 701. Insert into a Binary Search Tree.go

    版权声明:原创勿转 https://blog.csdn.net/anakinsun/arti...

    anakinsun
  • Linux系统实战项目——sudo日志审计

    由于企业内部权限管理启用了sudo权限管理,但是还是有一定的风险因素,毕竟运维、开发等各个人员技术水平、操作习惯都不相同,也会因一时失误造成误操作,从而影响系统...

    民工哥
  • 在 PowerShell 中使用 SQ

      使用SQL Server 2008 R2的安装光盘,安装SSMS,即可将所需的插件全部安装并自动注册。

    py3study
  • 776. Split BST

    思路: 问题的关键在于进行切分后,树的结构不能改变。影响BST的结构在于输入顺序,所以切分前后,维持输入顺序即可。而BST的层序遍历刚好是最初的输入顺序。所...

    用户1147447
  • 探寻 webpack 插件机制

    webpack 可谓是让人欣喜又让人忧,功能强大但需要一定的学习成本。在探寻 webpack 插件机制前,首先需要了解一件有意思的事情,webpack 插件机制...

    牧云云
  • 单链表中头节点作用(深入理解)

    今天QQ群里有人咨询一个问题 例如单链表中头节点作用 然后联想到做项目中解决core一个问题 虽然每天都在吃饭睡觉打豆豆,啥框架业务都不懂 解决了这一个...

    程序员小王

扫码关注云+社区

领取腾讯云代金券