2022. 5. 21. 22:13ใ๐ฏ OpenSource/Ceph
ceph์ openstack์ cinder์ ๊ฐ์ block stroage ์๋น์ค์ ๋ฐฑ์๋๋ก ์ฌ์ฉํ๊ธฐ ์ํด์๋ RBD๋ฅผ ๊ตฌ์ฑํด์ผ ํ๋ค.
RBD(Rados Block Device)๋ Ceph Block Device๋ผ๊ณ ๋ ์๋ฌ์ ธ ์๋ค.
https://docs.ceph.com/en/latest/rbd/rbd-openstack/
์์
ํ๊ธฐ์ ์์ ์ด์ ์ ๊ตฌ์ฑํ ceph ํด๋ฌ์คํฐ์ ์ํ๊ฐ HEALTH__OK์ธ ๊ฒ์ ํ์ธํ๊ณ ์์ํ๋ค.
https://greencloud33.tistory.com/45 ์์
ํ๋ก pool ๋ฐ disk ์ํ๋ ๋ค์๊ณผ ๊ฐ๋ค.
root@deploy:/home/ceph-cluster# ceph osd lspools
1 device_health_metrics
2 .rgw.root
3 default.rgw.log
4 default.rgw.control
5 default.rgw.meta
root@deploy:/home/ceph-cluster# ceph df
--- RAW STORAGE ---
CLASS SIZE AVAIL USED RAW USED %RAW USED
hdd 838 GiB 828 GiB 956 MiB 9.9 GiB 1.19
TOTAL 838 GiB 828 GiB 956 MiB 9.9 GiB 1.19
--- POOLS ---
POOL ID PGS STORED OBJECTS USED %USED MAX AVAIL
device_health_metrics 1 1 0 B 0 0 B 0 262 GiB
.rgw.root 2 32 1.3 KiB 4 768 KiB 0 262 GiB
default.rgw.log 3 32 3.4 KiB 207 6 MiB 0 262 GiB
default.rgw.control 4 32 0 B 8 0 B 0 262 GiB
default.rgw.meta 5 8 0 B 0 0 B 0 262 GiB
deploy ์๋ฒ์์ ceph cli ์ฌ์ฉํ๊ธฐ
์ฌ์ค ์ด ๊ณผ์ ์ RBD ์ค์น์ ๋ฌด๊ดํ๊ธฐ๋ ํ๋ฐ ํธ์์ฑ์ ์ํด์ ์งํํ์๋ค.
deploy ์๋ฒ์ ceph client๋ฅผ ์ค์นํ์ฌ ceph ํด๋ฌ์คํฐ์ ๋ด์ฉ์ cli๋ก ์ ์ด, ์กฐํํ ์ ์๊ฒ ํ๋ค.
root@deploy:/home/ceph-cluster# ceph-deploy install ceph-client
ํจํค์ง๋ง ์ค์นํ๊ณ deploy ์๋ฒ์์ ceph -s๋ฅผ ํ์ ๋ ์ค๋ฅ๊ฐ ๋๋ค.
ceph admin ๊ณ์ ์ผ๋ก ceph cluster์ ์ธ์ฆ์ ๋ชป๋ฐ์์ ๊ทธ๋ฐ ๊ฒ์ผ๋ก, ์๋ฌ์ ๋ํ๋๋ /etc/ceph ํ์ ๊ฒฝ๋ก์
ceph.admin.keyring์ ๋ณต์ฌํด ์ค๋ค.
root@deploy:/home/ceph-cluster# ceph -s
2022-03-01T17:55:30.576+0900 7fa8edfb6700 -1 auth: unable to find a keyring on /etc/ceph/ceph.client.admin.keyring,/etc/ceph/ceph.keyring,/etc/ceph/keyring,/etc/ceph/keyring.bin,: (2) No such file or directory
2022-03-01T17:55:30.576+0900 7fa8edfb6700 -1 AuthRegistry(0x7fa8e80590e0) no keyring found at /etc/ceph/ceph.client.admin.keyring,/etc/ceph/ceph.keyring,/etc/ceph/keyring,/etc/ceph/keyring.bin,, disabling cephx
2022-03-01T17:55:30.576+0900 7fa8ecd54700 -1 auth: unable to find a keyring on /etc/ceph/ceph.client.admin.keyring,/etc/ceph/ceph.keyring,/etc/ceph/keyring,/etc/ceph/keyring.bin,: (2) No such file or directory
2022-03-01T17:55:30.576+0900 7fa8ecd54700 -1 AuthRegistry(0x7fa8e805b698) no keyring found at /etc/ceph/ceph.client.admin.keyring,/etc/ceph/ceph.keyring,/etc/ceph/keyring,/etc/ceph/keyring.bin,, disabling cephx
2022-03-01T17:55:30.576+0900 7fa8ecd54700 -1 auth: unable to find a keyring on /etc/ceph/ceph.client.admin.keyring,/etc/ceph/ceph.keyring,/etc/ceph/keyring,/etc/ceph/keyring.bin,: (2) No such file or directory
2022-03-01T17:55:30.576+0900 7fa8ecd54700 -1 AuthRegistry(0x7fa8ecd53130) no keyring found at /etc/ceph/ceph.client.admin.keyring,/etc/ceph/ceph.keyring,/etc/ceph/keyring,/etc/ceph/keyring.bin,, disabling cephx
[errno 2] RADOS object not found (error connecting to the cluster)
root@deploy:/home/ceph-cluster# cp -a ceph.client.admin.keyring /etc/ceph
์ด์ ๋ deploy ์๋ฒ์์๋ ceph ํด๋ฌ์คํฐ ์ ๋ณด๋ฅผ ์กฐํํ ์ ์๋ค.
root@deploy:/home/ceph-cluster# ceph -s
cluster:
id: 4ec23dde-416c-4a0b-8c6d-6d10a960b090
health: HEALTH_OK
services:
mon: 3 daemons, quorum wglee-ceph-001,wglee-ceph-002,wglee-ceph-003 (age 4d)
mgr: wglee-ceph-001(active, since 4d), standbys: wglee-ceph-002, wglee-ceph-003
osd: 9 osds: 9 up (since 4d), 9 in (since 4d)
rgw: 1 daemon active (wglee-ceph-001)
task status:
data:
pools: 5 pools, 105 pgs
objects: 187 objects, 4.7 KiB
usage: 9.2 GiB used, 829 GiB / 838 GiB avail
pgs: 105 active+clean
ceph.conf ์ค์ ํ๊ธฐ
pool ์์ฑ ์ ์ ceph.conf์ osd default ๊ฐ ๋ฐ cache ์ค์ ์ ํ์๋ค.
ceph.conf ์ํ์ ์๋ ์ฃผ์์ ์ฝ์ด๋ณด๋ osd pool default pg num ๊ฐ์ ๊ฒฝ์ฐ๋ ๋ค์๊ณผ ๊ฐ์ด ๊ณ์ฐํ๋๊ฑธ ๊ถ์ฅํ๋ค๊ณ ํ๋ค.
(OSD ๊ฐ์ * 100 ) / replica ์์น
์ ๊ณ์ฐ์ ์์ ๋๊ฐ์ด ํ๊ฑด ์๋์ง๋ง ๊ทผ์ฌ์น๋ก 320 ์ผ๋ก ์ค์ ํ๋ค.
320์ PG calculator๋ก ๊ณ์ฐํ์ ๋ ๋์จ ๊ฐ์ด์๋ค.
###config for ceph osds
osd pool default size = 3
osd pool default min size = 2
osd pool default pg num = 320
osd pool default pgp num = 320
[client]
rbd cache = true
rbd cache size = 33554432 # (32MiB)
rbd cache max dirty = 20971520 # (20MiB)
rbd cache target dirty = 10485760 # (10MiB)
rbd cache max dirty age = 1.0
rbd cache writethrough until flush = true
PG autoscaler Disable
PG๋ฅผ ์๋์ผ๋ก ๊ณ์ฐํ์ฌ ๋ถ๋ฐฐํ๋ ์ต์
์ด ์ผ์ ธ ์์๋ค.
๋๋ pool ๋ณ๋ก ์ฌ์ฉ๋ฅ ์ ๊ณ ๋ คํด์ ๋ถ๋ฐฐํ๊ณ ์ถ์ด์ ์ด๋ฅผ disable ํ๋ค.
root@deploy:/home/ceph-cluster# ceph osd pool set device_health_metrics pg_autoscale_mode off
set pool 1 pg_autoscale_mode to off
root@deploy:/home/ceph-cluster# ceph osd pool set .rgw.root pg_autoscale_mode off
set pool 2 pg_autoscale_mode to off
root@deploy:/home/ceph-cluster# ceph osd pool set default.rgw.log pg_autoscale_mode off
set pool 3 pg_autoscale_mode to off
root@deploy:/home/ceph-cluster# ceph osd pool set default.rgw.control pg_autoscale_mode off
set pool 4 pg_autoscale_mode to off
root@deploy:/home/ceph-cluster# ceph osd pool set default.rgw.meta pg_autoscale_mode off
set pool 5 pg_autoscale_mode to off
์์ผ๋ก ์์ฑ๋ pool ์ ๋ํด์๋ ํด๋น ์ต์ ์ ๋๊ณ ์ถ์ผ๋ฉด ๋ค์๊ณผ ๊ฐ์ด ํ๋ฉด ๋๋ค.
root@deploy:/home/ceph-cluster# ceph config set global osd_pool_default_pg_autoscale_mode off
root@deploy:/home/ceph-cluster# ceph config dump
WHO MASK LEVEL OPTION VALUE RO
global advanced osd_pool_default_pg_autoscale_mode off
mon advanced auth_allow_insecure_global_id_reclaim false
mon advanced mon_allow_pool_delete false
๋ชจ๋ off ๋ ๊ฒ์ ๋ณผ ์ ์๋ค.
root@deploy:/home/ceph-cluster# ceph osd pool autoscale-status
POOL SIZE TARGET SIZE RATE RAW CAPACITY RATIO TARGET RATIO EFFECTIVE RATIO BIAS PG_NUM NEW PG_NUM AUTOSCALE
device_health_metrics 0 3.0 838.1G 0.0000 1.0 1 off
.rgw.root 1289 3.0 838.1G 0.0000 1.0 16 off
default.rgw.log 3520 3.0 838.1G 0.0000 1.0 16 off
default.rgw.control 0 3.0 838.1G 0.0000 1.0 16 off
default.rgw.meta 0 3.0 838.1G 0.0000 4.0 16 off
RBD pool ์์ฑํ๊ธฐ
https://docs.ceph.com/en/nautilus/rados/operations/pools/#create-a-pool
์ด์ ๋ณธ๊ฒฉ์ ์ผ๋ก RBD ์ค์น๋ฅผ ์์ํ๋ค. ๋ค์ pool์ ์์ฑํ๊ณ , pg_num์ ์๋์ผ๋ก ์ง์ ํด ์ฃผ๋ ค๊ณ ํ๋ค.
๊ฐ pool์ด ํ๋ ์ญํ ์ ๋ค์๊ณผ ๊ฐ๋ค
- volume pool : openstack cinder์ ์ฐ๋. ๋ธ๋ก์คํ ๋ฆฌ์ง ๋ฐ ๋ธ๋ก์คํ ๋ฆฌ์ง ์ค๋
์ท์ ์ ์ฅ
- images pool : openstack glance์ ์ฐ๋. VM ์์ฑ์ ์ํ OS image์ ํด๋นํ๋ค.
!! ์ฃผ์์ฌํญ ) ceph ์ด์ฉํด์ VM์ OS image ๋ง๋ค๋๋ QCOW2 ๋ณด๋ค๋ raw ํ์
์ ์ฌ์ฉ์ด ๊ถ์ฅ๋๋ค.
- vms pool : openstack VM์ ๋ถํ
๋์คํฌ๋ฅผ ์ ์ฅํ๋ค. Havana ๋ฒ์ ๋ถํฐ๋ ๊ฐ์์๋ฒ ๋ถํ
๋์คํฌ๊ฐ cinder๋ฅผ ํ์ง ์๊ณ ๋ฐ๋ก ceph๊ณผ ํต์ ํ์ฌ ์์ฑ๋๋ค. ์ ์ง๋ณด์ ์์
์์ ๋ฒ๊ฑฐ๋ก์์ ๋๊ณ live-migration ์๋ ์ด์ ์ด ์๋ค๊ณ ํ๋ค.
- backups pool : ๋ธ๋ก ์คํ ๋ฆฌ์ง๋ฅผ ๋ฐฑ์
ํ ์ ์๋ cinder์ ๊ธฐ๋ฅ์ด ์๊ณ , ๋ฐฑ์
๋ด์ฉ์ ์ ์ฅํ๋ pool๋ก ๋ณด์ธ๋ค.
๋๋ ์์ง clnder backup ๊ตฌํ๊น์ง๋ ์๊ฐ์ ์ํ๊ณ ์์ง๋ง ์ผ๋จ pool์ ์์ฑํ๋ค.
root@deploy:/home/ceph-cluster# ceph osd pool create volumes
pool 'volumes' created
root@deploy:/home/ceph-cluster# ceph osd pool create images
pool 'images' created
root@deploy:/home/ceph-cluster# ceph osd pool create backups
pool 'backups' created
root@deploy:/home/ceph-cluster# ceph osd pool create vms
pool 'vms' created
pg_num ์ ์ฉ
์ฌ์ค ๋ด๊ฐ ๋ง๊ฒ ํ ๊ฒ์ธ์ง ์ ๋ชจ๋ฅด๊ฒ ๋ค..
https://access.redhat.com/labs/cephpgc/ ์ฌ๊ธฐ์ osd 10๊ฐ, replica 3์ผ๋ก ์ค์ ํ๊ณ ํ์ํ pool๋ค์ ์
๋ ฅํด์ ๊ณ์ฐํด ๋ณด์๋ค.
์ด ์ฌ์ง์์ ๊ณ์ฐ๋ ์์น๋ก ์ ์ฉํ๋ค.
root@deploy:/home/ceph-cluster# ceph osd pool set device_health_metrics pg_num 16
root@deploy:/home/ceph-cluster# ceph osd pool set device_health_metrics pgp_num 16
set pool 1 pgp_num to 16
root@deploy:/home/ceph-cluster# ceph osd pool get .rgw.root pgp_num
pgp_num: 16
root@deploy:/home/ceph-cluster# ceph osd pool get .rgw.root pg_num
pg_num: 16
root@deploy:/home/ceph-cluster# ceph osd pool set .rgw.root pg_num 16
root@deploy:/home/ceph-cluster# ceph osd pool set .rgw.root pgp_num 16
set pool 2 pgp_num to 16
root@deploy:/home/ceph-cluster# ceph osd pool set default.rgw.log pg_num 16
root@deploy:/home/ceph-cluster# ceph osd pool set default.rgw.log pgp_num 16
set pool 3 pgp_num to 16
root@deploy:/home/ceph-cluster# ceph osd pool set default.rgw.control pg_num 16
root@deploy:/home/ceph-cluster# ceph osd pool set default.rgw.control pgp_num 16
set pool 4 pgp_num to 16
root@deploy:/home/ceph-cluster# ceph osd pool set default.rgw.meta pg_num 16
root@deploy:/home/ceph-cluster# ceph osd pool set default.rgw.meta pgp_num 16
set pool 5 pgp_num to 16
root@deploy:/home/ceph-cluster# ceph osd pool set backups pg_num 32
root@deploy:/home/ceph-cluster# ceph osd pool set backups pgp_num 32
set pool 12 pgp_num to 32
root@deploy:/home/ceph-cluster# ceph osd pool set volumes pg_num 128
set pool 10 pg_num to 128
root@deploy:/home/ceph-cluster# ceph osd pool set volumes pgp_num 128
set pool 10 pgp_num to 128
root@deploy:/home/ceph-cluster# ceph osd pool set images pg_num 16
set pool 11 pg_num to 16
root@deploy:/home/ceph-cluster# ceph osd pool set images pgp_num 16
set pool 11 pgp_num to 16
root@deploy:/home/ceph-cluster# ceph osd pool set vms pg_num 64
set pool 13 pg_num to 64
root@deploy:/home/ceph-cluster# ceph osd pool set vms pgp_num 64
set pool 13 pgp_num to 64
ceph status๋ฅผ watch ๋ช
๋ น์ผ๋ก ํ์ธํ์ฌ ๋ชจ๋ PG๊ฐ active+clean ๋ ๋๊น์ง ๊ธฐ๋ค๋ฆฐ๋ค.
320 ๊ฐ์ PG๊ฐ ๋ชจ๋ active+clean ๋์๋ค.
Every 2.0s: ceph -s deploy: Sat May 21 17:36:31 2022
cluster:
id: 4ec23dde-416c-4a0b-8c6d-6d10a960b090
health: HEALTH_OK
services:
mon: 3 daemons, quorum wglee-ceph-001,wglee-ceph-002,wglee-ceph-003 (age 3h)
mgr: wglee-ceph-002(active, since 3h), standbys: wglee-ceph-001, wglee-ceph-003
osd: 9 osds: 9 up (since 3h), 9 in (since 2M)
rgw: 1 daemon active (wglee-ceph-001)
task status:
data:
pools: 9 pools, 320 pgs
objects: 219 objects, 4.7 KiB
usage: 10 GiB used, 828 GiB / 838 GiB avail
pgs: 320 active+clean
ceph df ๊ฒฐ๊ณผ. PG๊ฐ ๋ด๊ฐ ์ค์ ํ ๋๋ก ๋ถ๋ฐฐ๋ ๊ฒ์ ๋ณผ ์ ์๋ค.
root@deploy:/home/ceph-cluster# ceph df
--- RAW STORAGE ---
CLASS SIZE AVAIL USED RAW USED %RAW USED
hdd 838 GiB 828 GiB 1.1 GiB 10 GiB 1.20
TOTAL 838 GiB 828 GiB 1.1 GiB 10 GiB 1.20
--- POOLS ---
POOL ID PGS STORED OBJECTS USED %USED MAX AVAIL
device_health_metrics 1 16 0 B 0 0 B 0 262 GiB
.rgw.root 2 16 1.3 KiB 4 768 KiB 0 262 GiB
default.rgw.log 3 16 3.4 KiB 207 6 MiB 0 262 GiB
default.rgw.control 4 16 0 B 8 0 B 0 262 GiB
default.rgw.meta 5 16 0 B 0 0 B 0 262 GiB
volumes 10 128 0 B 0 0 B 0 262 GiB
images 11 16 0 B 0 0 B 0 262 GiB
backups 12 32 0 B 0 0 B 0 262 GiB
vms 13 64 0 B 0 0 B 0 262 GiB
RBD init
pool์ ์์ฑํ๋ฉด ์ด๋ค application์์ ์ฌ์ฉํ ๊ฒ์ธ์ง๋ฅผ ์ง์ ํด์ผ ํ๋ค. rbd block device ๊ฐ์ ๊ฒฝ์ฐ๋ ๋ค์๊ณผ ๊ฐ์ด init ํ์ฌ ์ด๋ฅผ ์ํํ๋ค.
root@deploy:/home/ceph-cluster# rbd pool init volumes
root@deploy:/home/ceph-cluster# rbd pool init images
root@deploy:/home/ceph-cluster# rbd pool init backups
root@deploy:/home/ceph-cluster# rbd pool init vms
RBD object ์์ฑ ํ ์คํธ
์ด์ pool ๊น์ง ์ค์ ์ด ์๋ฃ ๋์์ผ๋ ๋ฆฌ์์ค๊ฐ ์ ์์ฑ๋๋์ง ํ
์คํธ ํด ๋ณธ๋ค.
--size์ ์ธ์๋ก ๋ค์ด๊ฐ๋ ๊ฐ์ ๋จ์๋ MiB์ด๋ค. ๋๋ 1MiB ๋ฅผ ํ๋ ์์ฑํด ๋ณด์๋ค.
root@deploy:/home/ceph-cluster# rbd create --size 1 volumes/wglee-test
๋ค์๊ณผ ๊ฐ์ด wglee-test ๋ผ๋ object๊ฐ volumes pool์ ์์ฑ๋์๋ค.
volumes pool์ STORED ๋ฐ์ดํฐ๋ 70B๋ก ์ฆ๊ฐํ๋ค.
root@deploy:/home/ceph-cluster# rbd ls volumes
wglee-test
root@deploy:/home/ceph-cluster# rbd info volumes/wglee-test
rbd image 'wglee-test':
size 1 MiB in 1 objects
order 22 (4 MiB objects)
snapshot_count: 0
id: 6a5831cfe8f3
block_name_prefix: rbd_data.6a5831cfe8f3
format: 2
features: layering, exclusive-lock, object-map, fast-diff, deep-flatten
op_features:
flags:
create_timestamp: Sat May 21 17:50:32 2022
access_timestamp: Sat May 21 17:50:32 2022
modify_timestamp: Sat May 21 17:50:32 2022
root@deploy:/home/ceph-cluster# ceph df
...
--- POOLS ---
POOL ID PGS STORED OBJECTS USED %USED MAX AVAIL
...
volumes 10 128 70 B 5 576 KiB 0 262 GiB
images 11 16 19 B 1 192 KiB 0 262 GiB
backups 12 32 19 B 1 192 KiB 0 262 GiB
vms 13 64 19 B 1 192 KiB 0 262 GiB
rbd resize๋ก ํฌ๊ธฐ๋ฅผ ๋ณ๊ฒฝํด ๋ณธ๋ค.
root@deploy:/home/ceph-cluster# rbd resize --size 5 volumes/wglee-test
Resizing image: 100% complete...done.
root@deploy:/home/ceph-cluster# rbd info volumes/wglee-test
rbd image 'wglee-test':
size 5 MiB in 2 objects
order 22 (4 MiB objects)
snapshot_count: 0
id: 6a5831cfe8f3
block_name_prefix: rbd_data.6a5831cfe8f3
format: 2
features: layering, exclusive-lock, object-map, fast-diff, deep-flatten
op_features:
flags:
create_timestamp: Sat May 21 17:50:32 2022
access_timestamp: Sat May 21 17:50:32 2022
modify_timestamp: Sat May 21 17:50:32 2022
ํ
์คํธ์ฉ์ผ๋ก ์์ฑํ๊ฑฐ ์ญ์ ํ๊ธฐ
์ญ์ ํ ํ์ df ํ์ ๋ volumes pool ์ฉ๋๋ ์ค์ด๋ค์๋ค. ์ ๋๋ ๋ฏ ํ๋ค! ๐๐
root@deploy:/home/ceph-cluster# rbd rm volumes/wglee-test
Removing image: 100% complete...done.
root@deploy:/home/ceph-cluster# ceph df
--- RAW STORAGE ---
CLASS SIZE AVAIL USED RAW USED %RAW USED
hdd 838 GiB 828 GiB 1.1 GiB 10 GiB 1.21
TOTAL 838 GiB 828 GiB 1.1 GiB 10 GiB 1.21
--- POOLS ---
POOL ID PGS STORED OBJECTS USED %USED MAX AVAIL
...
volumes 10 128 19 B 3 192 KiB 0 262 GiB
...
'๐ฏ OpenSource > Ceph' ์นดํ ๊ณ ๋ฆฌ์ ๋ค๋ฅธ ๊ธ
[ ceph-deploy ] 02. ceph cluster ๋ฐฐํฌํ๊ธฐ (0) | 2022.02.27 |
---|---|
[ ceph-deploy ] 01. ์ฌ์ ์์ (0) | 2022.02.26 |
Ceph ๊ธฐ๋ณธ ๋์ ์๋ฆฌ (0) | 2021.02.21 |