共计 3844 个字符,预计需要花费 10 分钟才能阅读完成。
创立存储池
ceph osd pool create ceph-demo 64 64
#查看存储池
ceph osd lspools
查看存储池参数
[root@node-1 ~]# ceph osd pool get ceph-demo pg_num
pg_num: 64
[root@node-1 ~]# ceph osd pool get ceph-demo pgp_num
pgp_num: 64
[root@node-1 ~]# ceph osd pool get ceph-demo size
size: 3
批改存储池参数
ceph osd pool get ceph-demo pg_num 32
ceph osd pool get ceph-demo pgp_num 32
创立镜像
rbd create -p ceph-demo --image rbd-demo.img --size 10G
或者 rbd create ceph-demo/rbd-demo-1.img --size 10G
[root@node-1 ~]# rbd -p ceph-demo ls
rbd-demo.img
rbd-demo-1.img
查看镜像详细信息
root@node-1 ~]# rbd info ceph-demo/rbd-demo.img
rbd image 'rbd-demo.img':
size 10 GiB in 2560 objects
order 22 (4 MiB objects)
snapshot_count: 0
id: 1134749f26f5
block_name_prefix: rbd_data.1134749f26f5
format: 2
features: layering, exclusive-lock, object-map, fast-diff, deep-flatten
op_features:
flags:
create_timestamp: Tue Sep 22 15:39:03 2020
access_timestamp: Tue Sep 22 15:39:03 2020
modify_timestamp: Tue Sep 22 15:39:03 2020
删除镜像
rbd rm ceph-demo/rbd-demo-1.img
[root@node-1 ~]# rbd -p ceph-demo ls
rbd-demo.img
块设施映射
rbd map ceph-demo/rbd-demo.img
rbd: sysfs write failed
RBD image feature set mismatch. Try disabling features unsupported by the kernel with "rbd feature disable".
In some cases useful info is found in syslog - try "dmesg | tail".
rbd: map failed: (6) No such device or address
下面有报错是因为有些个性 centos7 的内核不反对,所以须要关了
[root@node-1 ~]# rbd feature disable ceph-demo/rbd-demo.img deep-flatten
[root@node-1 ~]# rbd feature disable ceph-demo/rbd-demo.img fast-diff
[root@node-1 ~]# rbd feature disable ceph-demo/rbd-demo.img object-map
rbd: failed to update image features: (22) Invalid argument
2020-09-25 14:03:56.251 7efe8d341c80 -1 librbd::Operations: one or more requested features are already disabled
[root@node-1 ~]# rbd feature disable ceph-demo/rbd-demo.img exclusive-lock
再次映射
[root@node-1 ~]# rbd map ceph-demo/rbd-demo.img
/dev/rbd0
#查看设施
[root@node-1 ~]# rbd device list
id pool namespace image snap device
0 ceph-demo rbd-demo.img - /dev/rbd0
格式化
mkfs.ext4 /dev/rbd0
查看设施
[root@node-1 ~]# lsblk
NAME MAJ:MIN RM SIZE RO TYPE MOUNTPOINT
sda 8:0 0 100G 0 disk
├─sda1 8:1 0 1G 0 part /boot
└─sda2 8:2 0 99G 0 part
└─centos-root 253:0 0 99G 0 lvm /
sdb 8:16 0 50G 0 disk
└─ceph--82366968--ae58--446f--88db--57f5246aa08d-osd--block--19d85d44--9015--4580--bf2d--eae1afe0c0d6
253:1 0 50G 0 lvm
sdc 8:32 0 50G 0 disk
sr0 11:0 1 792M 0 rom
rbd0 252:0 0 10G 0 disk
挂载块设施
mkdir /mnt/rbd-demo
mount /dev/rbd0 /mnt/rbd-demo
[root@node-1 ~]# df -h /dev/rbd0
Filesystem Size Used Avail Use% Mounted on
/dev/rbd0 9.8G 37M 9.2G 1% /mnt/rbd-demo
rbd 扩容
[root@node-1 rbd-demo]# rbd resize ceph-demo/rbd-demo.img --size 20G
Resizing image: 100% complete...done.
[root@node-1 rbd-demo]# rbd info ceph-demo/rbd-demo.img
rbd image 'rbd-demo.img':
size 20 GiB in 5120 objects
order 22 (4 MiB objects)
snapshot_count: 0
id: 1134749f26f5
block_name_prefix: rbd_data.1134749f26f5
format: 2
features: layering
op_features:
flags:
create_timestamp: Tue Sep 22 15:39:03 2020
access_timestamp: Tue Sep 22 15:39:03 2020
modify_timestamp: Tue Sep 22 15:39:03 2020
# 查看设施曾经扩容到 20G
主机扩容
[root@node-1 rbd-demo]# df -h /dev/rbd0
Filesystem Size Used Avail Use% Mounted on
/dev/rbd0 9.8G 37M 9.2G 1% /mnt/rbd-demo
#查看发现挂载的盘没有扩容刷新磁盘空间
[root@node-1 rbd-demo]# resize2fs /dev/rbd0
resize2fs 1.42.9 (28-Dec-2013)
Filesystem at /dev/rbd0 is mounted on /mnt/rbd-demo; on-line resizing required
old_desc_blocks = 2, new_desc_blocks = 3
The filesystem on /dev/rbd0 is now 5242880 blocks long.
#再次查看设施大小
[root@node-1 rbd-demo]# df -h /dev/rbd0
Filesystem Size Used Avail Use% Mounted on
/dev/rbd0 20G 44M 19G 1% /mnt/rbd-demo
查看数据 object
#grep 的为 rbd info 查看的 block_name_prefix 的值,他作为 object 的前缀
rados -p ceph-demo ls | grep rbd_data.1134749f26f5
查看 object
[root@node-1 rbd-demo]# rados -p ceph-demo stat rbd_data.1134749f26f5.0000000000000e00
ceph-demo/rbd_data.1134749f26f5.0000000000000e00 mtime 2020-09-25 14:19:39.000000, size 4096
# 能够看到每个 object 大小默认 4M
查看 objec 数据存储
[root@node-1 rbd-demo]# ceph osd map ceph-demo rbd_data.1134749f26f5.0000000000000120
osdmap e35 pool 'ceph-demo' (1) object 'rbd_data.1134749f26f5.0000000000000120' -> pg 1.b2f3e87f (1.3f) -> up ([1,2,0], p1) acting ([1,2,0], p1)
查看以后 object
落在了 id
为 1.3f 的 pg
上,pg
寄存在 id 为 [1,2,0] 三个 OSD 上,能够通过 ceph osd tree
查看 osd
别离在那个主机
测试写入数据
dd if=/dev/zero of=test.image bs=1M count=1024
rados -p ceph-demo ls | grep rbd_data.1134749f26f5 | wc -l
299
#查看 object 数量在减少
正文完