本文仅仅记录下常用命令,以备自己查询使用。
crushmap
获取crush map
要获取集群的crush map,执行命令:
ceph osd getcrushmap -o {compiled-crushmap-filename}
ceph将crush输出(-o)到你指定的文件,由于crush map是已编译的,所以需要反编译;
反编译crush map
要反编译crush map, 执行命令:
crushtool -d {compiled-crushmap-filename} -o {decompiled-crushmap-filename}
ceph将反编译(-d)二进制crush图,且输出(-o)到你指定的文件;
编译crush map
要编译crush map,执行命令:
crushtool -c {decompiled-crushmap-filename} -o {compiled-crushmap-filename}
ceph将已编译的crush map保存到你指定的文件;
注入crush map
要把crush map应用到集群,执行命令:
ceph osd setcrushmap -i {compiled-crushmap-filename}
ceph 将把你指定的已编译的crush map输入到集群;
查看对象分布
查看文件系统下某个文件的对象集合
[root@node192 jiushan-dir1]# cephfs ./10M show_layout
WARNING: This tool is deprecated. Use the layout.* xattrs to query and modify layouts.
layout.data_pool: 3
layout.object_size: 4194304
layout.stripe_unit: 4194304
layout.stripe_count: 1
[root@node192 jiushan-dir1]# cephfs ./10M show_location
WARNING: This tool is deprecated. Use the layout.* xattrs to query and modify layouts.
location.file_offset: 0
location.object_offset:0
location.object_no: 0
location.object_size: 4194304
location.object_name: 10000001458.00000000
location.block_offset: 0
location.block_size: 4194304
location.osd: 0
[root@node192 jiushan-dir1]# cephfs ./10M map
WARNING: This tool is deprecated. Use the layout.* xattrs to query and modify layouts.
FILE OFFSET OBJECT OFFSET LENGTH OSD
0 10000001458.00000000 0 4194304 0
4194304 10000001458.00000001 0 4194304 5
8388608 10000001458.00000002 0 4194304 2
[root@node248 ~]# rados -p data ls
100000003e9.00000000
100000003e9.00000006
100000003e9.00000002
100000003e9.00000008
100000003e9.00000001
100000003e9.00000005
100000003e9.00000004
100000003e9.00000009
100000003e9.00000003
100000003e9.00000007
[root@node248 ~]# ceph osd map data 100000003e9.00000007
osdmap e39 pool 'data' (1) object '100000003e9.00000007' -> pg 1.e5b260d4 (1.d4) -> up ([0,4], p0) acting ([0,4], p0)
[root@node ~]# ceph osd tree
ID WEIGHT TYPE NAME UP/DOWN REWEIGHT PRIMARY-AFFINITY
-1 18.13797 root default
-2 7.25519 host node253
0 3.62759 osd.0 up 1.00000 1.00000
2 3.62759 osd.2 up 1.00000 1.00000
-3 7.25519 host node250
1 3.62759 osd.1 up 1.00000 1.00000
3 3.62759 osd.3 up 1.00000 1.00000
-4 3.62759 host node248
4 3.62759 osd.4 up 1.00000 1.00000
[root@node248 ~]# cd /var/lib/ceph/osd/ceph-4/current/
[root@node248 current]# ls 1.d4_head/
100000003e9.00000007__head_E5B260D4__1 __head_000000D4__1
目录绑定,查看条带
目录绑定到哪个存储池
getfattr -n ceph.dir.layout.pool /ceph
查看文件条带
getfattr -n ceph.file.layout file1
# file: file1
ceph.file.layout="stripe_unit=4194304 stripe_count=1 object_size=4194304 pool=pool-rule-c2"
pool=存储池
查看目录所有信息
getfattr -n ceph.dir.layout nfs-1
#file: nfs-1
ceph.dir.layout="stripe_unit=4194304 stripe_count=1 object_size=4194304 pool=pool-rule-c2"
设置 setfattr
[root@ceph226 ceph]# getfattr -n ceph.dir.layout nfs-2
nfs-2: ceph.dir.layout: No such attribute
[root@ceph226 ceph]# setfattr -n ceph.dir.layout.pool -v pool-rule-c2 nfs-2
[root@ceph226 ceph]# getfattr -n ceph.dir.layout.pool nfs-2
#file: nfs-2
ceph.dir.layout.pool="pool-rule-c2"
[root@ceph226 ceph]# getfattr -n ceph.dir.layout nfs-2
# file: nfs-2
ceph.dir.layout="stripe_unit=4194304 stripe_count=1 object_size=4194304 pool=pool-rule-c2"