创立存储池
ceph osd pool create ceph-demo 64 64#查看存储池ceph osd lspools
查看存储池参数
[root@node-1 ~]# ceph osd pool get ceph-demo pg_numpg_num: 64[root@node-1 ~]# ceph osd pool get ceph-demo pgp_numpgp_num: 64[root@node-1 ~]# ceph osd pool get ceph-demo sizesize: 3
批改存储池参数
ceph osd pool get ceph-demo pg_num 32ceph osd pool get ceph-demo pgp_num 32
创立镜像
rbd create -p ceph-demo --image rbd-demo.img --size 10G或者 rbd create ceph-demo/rbd-demo-1.img --size 10G[root@node-1 ~]# rbd -p ceph-demo lsrbd-demo.imgrbd-demo-1.img
查看镜像详细信息
root@node-1 ~]# rbd info ceph-demo/rbd-demo.imgrbd image 'rbd-demo.img': size 10 GiB in 2560 objects order 22 (4 MiB objects) snapshot_count: 0 id: 1134749f26f5 block_name_prefix: rbd_data.1134749f26f5 format: 2 features: layering, exclusive-lock, object-map, fast-diff, deep-flatten op_features: flags: create_timestamp: Tue Sep 22 15:39:03 2020 access_timestamp: Tue Sep 22 15:39:03 2020 modify_timestamp: Tue Sep 22 15:39:03 2020
删除镜像
rbd rm ceph-demo/rbd-demo-1.img[root@node-1 ~]# rbd -p ceph-demo lsrbd-demo.img
块设施映射
rbd map ceph-demo/rbd-demo.imgrbd: sysfs write failedRBD image feature set mismatch. Try disabling features unsupported by the kernel with "rbd feature disable".In some cases useful info is found in syslog - try "dmesg | tail".rbd: map failed: (6) No such device or address
下面有报错是因为有些个性centos7的内核不反对,所以须要关了
[root@node-1 ~]# rbd feature disable ceph-demo/rbd-demo.img deep-flatten[root@node-1 ~]# rbd feature disable ceph-demo/rbd-demo.img fast-diff[root@node-1 ~]# rbd feature disable ceph-demo/rbd-demo.img object-maprbd: failed to update image features: (22) Invalid argument2020-09-25 14:03:56.251 7efe8d341c80 -1 librbd::Operations: one or more requested features are already disabled[root@node-1 ~]# rbd feature disable ceph-demo/rbd-demo.img exclusive-lock
再次映射
[root@node-1 ~]# rbd map ceph-demo/rbd-demo.img/dev/rbd0#查看设施[root@node-1 ~]# rbd device list id pool namespace image snap device 0 ceph-demo rbd-demo.img - /dev/rbd0
格式化
mkfs.ext4 /dev/rbd0
查看设施
[root@node-1 ~]# lsblk NAME MAJ:MIN RM SIZE RO TYPE MOUNTPOINTsda 8:0 0 100G 0 disk ├─sda1 8:1 0 1G 0 part /boot└─sda2 8:2 0 99G 0 part └─centos-root 253:0 0 99G 0 lvm /sdb 8:16 0 50G 0 disk └─ceph--82366968--ae58--446f--88db--57f5246aa08d-osd--block--19d85d44--9015--4580--bf2d--eae1afe0c0d6 253:1 0 50G 0 lvm sdc 8:32 0 50G 0 disk sr0 11:0 1 792M 0 rom rbd0 252:0 0 10G 0 disk
挂载块设施
mkdir /mnt/rbd-demomount /dev/rbd0 /mnt/rbd-demo[root@node-1 ~]# df -h /dev/rbd0Filesystem Size Used Avail Use% Mounted on/dev/rbd0 9.8G 37M 9.2G 1% /mnt/rbd-demo
rbd扩容
[root@node-1 rbd-demo]# rbd resize ceph-demo/rbd-demo.img --size 20GResizing image: 100% complete...done.[root@node-1 rbd-demo]# rbd info ceph-demo/rbd-demo.imgrbd image 'rbd-demo.img': size 20 GiB in 5120 objects order 22 (4 MiB objects) snapshot_count: 0 id: 1134749f26f5 block_name_prefix: rbd_data.1134749f26f5 format: 2 features: layering op_features: flags: create_timestamp: Tue Sep 22 15:39:03 2020 access_timestamp: Tue Sep 22 15:39:03 2020 modify_timestamp: Tue Sep 22 15:39:03 2020# 查看设施曾经扩容到20G
主机扩容
[root@node-1 rbd-demo]# df -h /dev/rbd0Filesystem Size Used Avail Use% Mounted on/dev/rbd0 9.8G 37M 9.2G 1% /mnt/rbd-demo#查看发现挂载的盘没有扩容刷新磁盘空间[root@node-1 rbd-demo]# resize2fs /dev/rbd0resize2fs 1.42.9 (28-Dec-2013)Filesystem at /dev/rbd0 is mounted on /mnt/rbd-demo; on-line resizing requiredold_desc_blocks = 2, new_desc_blocks = 3The filesystem on /dev/rbd0 is now 5242880 blocks long.#再次查看设施大小[root@node-1 rbd-demo]# df -h /dev/rbd0Filesystem Size Used Avail Use% Mounted on/dev/rbd0 20G 44M 19G 1% /mnt/rbd-demo
查看数据object
#grep 的为rbd info 查看的block_name_prefix的值,他作为object的前缀rados -p ceph-demo ls | grep rbd_data.1134749f26f5
查看object
[root@node-1 rbd-demo]# rados -p ceph-demo stat rbd_data.1134749f26f5.0000000000000e00ceph-demo/rbd_data.1134749f26f5.0000000000000e00 mtime 2020-09-25 14:19:39.000000, size 4096# 能够看到每个object大小默认4M
查看objec数据存储
[root@node-1 rbd-demo]# ceph osd map ceph-demo rbd_data.1134749f26f5.0000000000000120osdmap e35 pool 'ceph-demo' (1) object 'rbd_data.1134749f26f5.0000000000000120' -> pg 1.b2f3e87f (1.3f) -> up ([1,2,0], p1) acting ([1,2,0], p1)
查看以后object
落在了id
为1.3f的pg
上,pg
寄存在id为[1,2,0]三个OSD上,能够通过ceph osd tree
查看osd
别离在那个主机
测试写入数据
dd if=/dev/zero of=test.image bs=1M count=1024rados -p ceph-demo ls | grep rbd_data.1134749f26f5 | wc -l299#查看object数量在减少