首页
学习
活动
专区
工具
TVP
发布
社区首页 >专栏 >Ceph对象存储集群部署

Ceph对象存储集群部署

作者头像
院长技术
发布2020-06-11 22:18:53
7020
发布2020-06-11 22:18:53
举报
文章被收录于专栏:院长运维开发院长运维开发

集群架构

192.168.10.186   ceph1          admin、mon、mgr、osd、rgw

192.168.10.187   ceph2          mon、mgr、osd、rgw 

192.168.10.188   ceph3          mon、mgr、osd、rgw

部署

[root@10dot186 ~]# vim /etc/hosts
192.168.10.186   ceph1
192.168.10.187   ceph2
192.168.10.188   ceph3

hostnamectl set-hostname ceph1
hostnamectl set-hostname ceph2
hostnamectl set-hostname ceph3

ntpdate ntp1.aliyun.com



ssh-keygen
ssh-copy-id ceph1
ssh-copy-id ceph2
ssh-copy-id ceph3


[root@ceph1 ~]# vim /etc/yum.repos.d/ceph.repo
[ceph]
name=Ceph packages for $basearch
baseurl=http://mirrors.aliyun.com/ceph/rpm-luminous/el7/$basearch
enabled=1
gpgcheck=1
priority=1
type=rpm-md
gpgkey=https://mirrors.aliyun.com/ceph/keys/release.asc

[ceph-noarch]
name=Ceph noarch packages
baseurl=http://mirrors.aliyun.com/ceph/rpm-luminous/el7/noarch
enabled=1
gpgcheck=1
priority=1
type=rpm-md
gpgkey=https://mirrors.aliyun.com/ceph/keys/release.asc

[ceph-source]
name=Ceph source packages
baseurl=http://mirrors.aliyun.com/ceph/rpm-luminous/el7/SRPMS
enabled=0
gpgcheck=1
type=rpm-md
gpgkey=https://mirrors.aliyun.com/ceph/keys/release.asc
priority=1




yum install -y https://dl.fedoraproject.org/pub/epel/epel-release-latest-7.noarch.rpm
yum makecache
yum update -y
yum install -y ceph-deploy




mkdir /etc/ceph && cd /etc/ceph
ceph-deploy new ceph1  ceph2 ceph3


yum install -y python-setuptools
在配置文件中增加:
osd_pool_default_size = 3
[mgr]
mgr modules = dashboard
[mon]
mon allow pool delete = true

mon

ceph-deploy install ceph1 ceph2 ceph3

ceph-deploy mon create-initial



[root@ceph1 ceph]# ceph -s
  cluster:
    id:     fcb2fa5e-481a-4494-9a27-374048f37113
    health: HEALTH_OK
 
  services:
    mon: 3 daemons, quorum ceph1,ceph2,ceph3
    mgr: no daemons active
    osd: 0 osds: 0 up, 0 in
 
  data:
    pools:   0 pools, 0 pgs
    objects: 0 objects, 0B
    usage:   0B used, 0B / 0B avail
    pgs:

mgr

ceph-deploy mgr create ceph1 ceph2 ceph3


[root@ceph1 ceph]# ceph -s
  cluster:
    id:     fcb2fa5e-481a-4494-9a27-374048f37113
    health: HEALTH_OK
 
  services:
    mon: 3 daemons, quorum ceph1,ceph2,ceph3
    mgr: ceph1(active), standbys: ceph2, ceph3
    osd: 0 osds: 0 up, 0 in
 
  data:
    pools:   0 pools, 0 pgs
    objects: 0 objects, 0B
    usage:   0B used, 0B / 0B avail
    pgs: 
    
    
    
[root@ceph1 ceph]# ceph mgr dump
{
    "epoch": 4,
    "active_gid": 4122,
    "active_name": "ceph1",
    "active_addr": "192.168.10.186:6800/22316",
    "available": true,
    "standbys": [
        {
            "gid": 4129,
            "name": "ceph2",
            "available_modules": [
                "balancer",
                "dashboard",
                "influx",
                "localpool",
                "prometheus",
                "restful",
                "selftest",
                "status",
                "zabbix"
            ]
        },
        {
            "gid": 4132,
            "name": "ceph3",
            "available_modules": [
                "balancer",
                "dashboard",
                "influx",
                "localpool",
                "prometheus",
                "restful",
                "selftest",
                "status",
                "zabbix"
            ]
        }
    ],
    "modules": [
        "balancer",
        "restful",
        "status"
    ],
    "available_modules": [
        "balancer",
        "dashboard",
        "influx",
        "localpool",
        "prometheus",
        "restful",
        "selftest",
        "status",
        "zabbix"
    ],
    "services": {}
}
[root@ceph1 ceph]# ceph mgr module enable dashboard
[root@ceph1 ceph]# ceph mgr dump
{
    "epoch": 7,
    "active_gid": 4139,
    "active_name": "ceph1",
    "active_addr": "192.168.10.186:6800/22316",
    "available": true,
    "standbys": [
        {
            "gid": 4136,
            "name": "ceph3",
            "available_modules": [
                "balancer",
                "dashboard",
                "influx",
                "localpool",
                "prometheus",
                "restful",
                "selftest",
                "status",
                "zabbix"
            ]
        },
        {
            "gid": 4141,
            "name": "ceph2",
            "available_modules": [
                "balancer",
                "dashboard",
                "influx",
                "localpool",
                "prometheus",
                "restful",
                "selftest",
                "status",
                "zabbix"
            ]
        }
    ],
    "modules": [
        "balancer",
        "dashboard",
        "restful",
        "status"
    ],
    "available_modules": [
        "balancer",
        "dashboard",
        "influx",
        "localpool",
        "prometheus",
        "restful",
        "selftest",
        "status",
        "zabbix"
    ],
    "services": {}
}


[root@ceph1 ceph]# ceph config-key put mgr/dashboard/server_addr 192.168.6.101
set mgr/dashboard/server_addr
[root@ceph1 ceph]# ceph config-key put mgr/dashboard/server_port 7000
set mgr/dashboard/server_port
[root@ceph1 ~]# netstat -tulnp |grep 7000
tcp        0      0 192.168.6.101:7000      0.0.0.0:*               LISTEN      19836/ceph-mgr

这时看下danshboard图:

dash_cluster1.png
dash_cluster1.png

osd

每台机器做逻辑卷
[root@ceph1 ceph]# pvcreate /dev/sdb
  Physical volume "/dev/sdb" successfully created.

[root@ceph1 ceph]# vgcreate data_vg1 /dev/sdb
  Volume group "data_vg1" successfully created
  
[root@ceph1 ceph]# lvcreate -n data_lv1 -L 99g data_vg1   
  Logical volume "data_lv1" created.
  



ceph-deploy osd create ceph1 --data data_vg1/data_lv1
ceph-deploy osd create ceph2 --data data_vg1/data_lv1
ceph-deploy osd create ceph3 --data data_vg1/data_lv1

[root@ceph1 ceph]# ceph -s
  cluster:
    id:     fcb2fa5e-481a-4494-9a27-374048f37113
    health: HEALTH_OK
 
  services:
    mon: 3 daemons, quorum ceph1,ceph2,ceph3
    mgr: ceph1(active), standbys: ceph3, ceph2
    osd: 3 osds: 3 up, 3 in
 
  data:
    pools:   0 pools, 0 pgs
    objects: 0 objects, 0B
    usage:   3.01GiB used, 294GiB / 297GiB avail
    pgs:

这时看下danshboard图:

dash_cluster2.png
dash_cluster2.png

rgw集群

ceph-deploy install --rgw ceph1 ceph2 ceph3

ceph-deploy admin ceph1 ceph2 ceph3

ceph-deploy rgw create ceph1 ceph2 ceph3

[root@ceph1 ceph]# ceph -s
  cluster:
    id:     fcb2fa5e-481a-4494-9a27-374048f37113
    health: HEALTH_OK
 
  services:
    mon: 3 daemons, quorum ceph1,ceph2,ceph3
    mgr: ceph1(active), standbys: ceph3, ceph2
    osd: 3 osds: 3 up, 3 in
    rgw: 3 daemons active
 
  data:
    pools:   4 pools, 32 pgs
    objects: 191 objects, 3.08KiB
    usage:   3.01GiB used, 294GiB / 297GiB avail
    pgs:     32 active+clean

这时看下danshboard图:

dash_cluster3.png
dash_cluster3.png

NGINX代理

安装这里就不介绍了

[root@ceph1 conf.d]# cat cephcloud.dev.goago.cn.conf 
        upstream cephcloud.dev.goago.cn  {
        server  192.168.10.186:7480;
        server  192.168.10.187:7480;
        server  192.168.10.188:7480;        
        }
        server {
        listen       80;
        server_name  cephcloud.dev.goago.cn;
        location / {
                        proxy_intercept_errors on;
                        access_log /var/log/nginx/cephcloud_log;
                        proxy_pass http://cephcloud.dev.goago.cn;
                        proxy_set_header X-Real-IP $remote_addr;
                        proxy_set_header Host $host;
                        proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
                        proxy_set_header Request_Uri $request_uri;

                }
        }

s3和swift

具体安装这里不叙述了,可以看我上篇文章

New settings:
  Access Key: M954JYYAOBES65B7UNEZ
  Secret Key: 11MZu3N9vB4S4C4N8U2Ywgkhxro3Xi6K9HPyRQ9v
  Default Region: US
  S3 Endpoint: cephcloud.dev.goago.cn
  DNS-style bucket+hostname:port template for accessing a bucket: %(bucket)s.cephcloud.dev.goago.cn bucket
  Encryption password: 123456
  Path to GPG program: /usr/bin/gpg
  Use HTTPS protocol: False
  HTTP Proxy server name: 
  HTTP Proxy server port: 0
本文参与 腾讯云自媒体分享计划,分享自作者个人站点/博客。
如有侵权请联系 cloudcommunity@tencent.com 删除

本文分享自 作者个人站点/博客 前往查看

如有侵权,请联系 cloudcommunity@tencent.com 删除。

本文参与 腾讯云自媒体分享计划  ,欢迎热爱写作的你一起参与!

评论
登录后参与评论
0 条评论
热度
最新
推荐阅读
领券
问题归档专栏文章快讯文章归档关键词归档开发者手册归档开发者手册 Section 归档