ceph安装命令
[root@node01 ~]
[root@node01 ~]
[root@node01 ~]
[root@node01 ~]
[root@node01 ~]
[root@node01 ~]
[root@node01 ~]
[root@node01 ~]
[root@node01 ~]
[root@node01 ~]
[root@node01 ~]
Ceph Dashboard is now available at:
URL: https://node01:8443/
User: admin
Password: 5p176ukk0r
Enabling client.admin keyring and conf on hosts with "admin" label
[root@node01 ~]
[root@node01 ~]
[root@node01 ~]
[root@node01 ~]
[root@node01 ~]
[root@node01 ~]
[root@node01 ~]
[root@node01 ~]
[root@node01 ~]
[root@node01 ~]
[root@node01 ~]
[root@node01 ~]
[root@node01 ~]
1. 主机管理命令
ceph orch host add node02 192.168.1.102
ceph orch host add node03 192.168.1.103 --labels osd,mon
ceph orch host ls
ceph orch host ls --format json-pretty
ceph orch ps --host node02
ceph orch ps
ceph orch device ls node02
ceph orch device ls node02 --detail
ceph orch host maintenance enter node02
ceph orch host maintenance exit node02
ceph orch host label add node02 osd
ceph orch host label rm node02 mon
ceph orch host ls --host node02
ceph orch upgrade start --host node02 --image quay.io/ceph/ceph:v20.2.0
ceph orch upgrade start --image quay.io/ceph/ceph:v20.2.0
ceph orch host maintenance enter node02
ceph orch host rm node02
cephadm rm-cluster --fsid $(ceph fsid) --force
ceph orch host rm node02 --force
2. osd管理命令
ceph orch daemon add osd node01:/dev/nvme0n2
ceph orch apply osd --all-available-devices --placement="node01,node02"
ceph osd ls
ceph osd tree
ceph osd status
ceph osd tree
ceph osd info osd.0
ceph osd perf
ceph osd df
ceph osd df --format json-pretty
ceph orch ps --daemon-type osd
ceph orch device ls node01 --detail | grep -A5 "osd."
ceph osd out osd.0
ceph osd out osd.0 osd.1
ceph orch daemon stop osd.0
ceph orch daemon start osd.0
ceph orch daemon restart osd.0
ceph orch daemon restart osd --host node01
ceph osd in osd.0
ceph osd crush set osd.0 1.0 host=node01
ceph osd crush tree
ceph osd out osd.0
ceph orch daemon stop osd.0
ceph osd rm osd.0
ceph auth del osd.0
ceph osd crush rm osd.0
ceph osd rm osd.0 --force
ceph logs --name osd.0
ceph osd ping osd.0
ceph health detail
3. 存储池相关命令
ceph osd pool create rbd 128 128
rbd pool init rbd
3.1 rbd块存储
rbd create demo.img --size 10G
rbd create pool1/test.img --size 20G
rbd create demo2.img --size 5G --object-size 4M --features layering
rbd ls
rbd ls pool1
rbd info demo.img
rbd info pool1/test.img
rbd du demo.img
rbd resize demo.img --size 15G
rbd resize demo.img --size 8G --allow-shrink
rbd info demo.img
rbd rm demo.img
rbd rm pool1/test.img
rbd rm demo.img --force
rbd snap create demo.img@snap1
rbd snap create demo.img@$(date +%Y%m%d_%H%M%S)
rbd snap ls demo.img
rbd info demo.img@snap1
rbd snap rollback demo.img@snap1
rbd snap rm demo.img@snap1
rbd snap purge demo.img
rbd snap protect demo.img@snap1
rbd snap unprotect demo.img@snap1
rbd clone demo.img@snap1 new_demo.img
rbd clone demo.img@snap1 pool1/clone_demo.img
rbd info new_demo.img | grep parent
rbd flatten new_demo.img
rbd info new_demo.img
rbd map demo.img
rbd map demo.img --name client.admin --keyring /etc/ceph/ceph.client.admin.keyring
rbd showmapped
mkfs.ext4 /dev/rbd0
mount /dev/rbd0 /mnt/rbd
umount /mnt/rbd
rbd unmap /dev/rbd0
rbd unmap /dev/rbd0 --force
3.1.1 iscsi挂载
- 纯 targetcli 实现 RBD → iSCSI(无需 ceph-iscsi)
dnf install -y targetcli python3-kmod python3-pyparsing
rbd map demo.img
rbd showmapped
targetcli
/backstores/block create demo_iscsi /dev/rbd0
/iscsi create iqn.2026-03.com.ceph:demo-rbd
/iscsi/iqn.2026-03.com.ceph:demo-rbd/tpg1/luns create /backstores/block/demo_iscsi
/iscsi/iqn.2026-03.com.ceph:demo-rbd/tpg1 set attribute authentication=0 demo_mode_write_protect=0 generate_node_acls=1 cache_dynamic_acls=1
/iscsi/iqn.2026-03.com.ceph:demo-rbd/tpg1/acls create ANY
saveconfig
exit
systemctl enable --now target
systemctl status target
- windows使用iscsi发起程序连接iscsi

targetcli ls
targetcli /iscsi/iqn.2026-03.com.ceph:demo-rbd/tpg1/connections ls
systemctl stop target
systemctl restart target
rbd unmap /dev/rbd0
4. 对象存储
- Ceph 对象存储依赖 RGW 守护进程和专用存储池,先完成基础部署
ceph osd pool create default.rgw.buckets.data 8 8
ceph osd pool create default.rgw.control 8 8
ceph osd pool create default.rgw.meta 8 8
ceph osd pool create default.rgw.log 8 8
ceph orch daemon add rgw node01:rgw.node01
ceph orch ls | grep rgw
ceph orch ps --daemon-type rgw
ceph orch daemon restart rgw.node01
ceph orch daemon stop rgw.node01
ceph orch daemon rm rgw.node01
ceph config set rgw rgw_frontends "civetweb port=80"
ceph config set rgw rgw_dns_name s3.ceph.com
ceph orch daemon restart rgw.node01
radosgw-admin user create --uid="s3-user1" --display-name="S3 User 1"
radosgw-admin user info --uid="s3-user1"
radosgw-admin user list
radosgw-admin user stats --uid="s3-user1"
radosgw-admin key create --uid="s3-user1" --key-type="s3" --gen-access-key --gen-secret-key
radosgw-admin user modify --uid="s3-user1" --display-name="New S3 User 1"
radosgw-admin bucket rm --bucket="user1-bucket" --uid="s3-user1"
radosgw-admin user rm --uid="s3-user1"
- 创建桶(两种方式)
- 方式 1:通过 radosgw-admin(服务端)
radosgw-admin bucket create --bucket="test-bucket" --uid="s3-user1"
dnf install -y s3cmd
s3cmd --configure
s3cmd mb s3://test-bucket
radosgw-admin bucket list
radosgw-admin bucket info --bucket="test-bucket"
s3cmd ls s3://test-bucket
radosgw-admin bucket quota set --bucket="test-bucket" --max-size=10G --max-objects=1000
radosgw-admin bucket quota get --bucket="test-bucket"
radosgw-admin bucket rm --bucket="test-bucket" --uid="s3-user1"
s3cmd rm --recursive s3://test-bucket
radosgw-admin bucket rm --bucket="test-bucket" --uid="s3-user1"
- 对象(Object)管理
- 上传 / 下载 / 删除对象
s3cmd put local-file.txt s3://test-bucket/remote-file.txt
s3cmd get s3://test-bucket/remote-file.txt local-file-download.txt
s3cmd rm s3://test-bucket/remote-file.txt
s3cmd rm --recursive s3://test-bucket/
s3cmd info s3://test-bucket/remote-file.txt
radosgw-admin object info --bucket="test-bucket" --object="remote-file.txt"
radosgw-admin user quota set --uid="s3-user1" --max-size=50G --max-objects=5000
radosgw-admin user quota get --uid="s3-user1"
s3cmd setacl s3://test-bucket --acl-public
s3cmd setacl s3://test-bucket --acl-private
s3cmd setacl s3://test-bucket --acl-grant=write:another-user-uid --uid="s3-user1"
ceph logs --name rgw.node01
ceph osd pool stats default.rgw.buckets.data
radosgw-admin bucket sync --bucket="test-bucket"