2016년 10월 3일 월요일

redhat ceph 1.3 install

subscription-manager repos --enable=rhel-7-server-rhceph-1.3-calamari-rpms --enable=rhel-7-server-rhceph-1.3-installer-rpms --enable=rhel-7-server-rhceph-1.3-tools-rpms --enable=rhel-7-server-rhceph-1.3-mon-rpms --enable=rhel-7-server-rhceph-1.3-osd-rpms


setenforce 0
systemctl disable firewalld
systemctl stop firewalld


sudo yum install yum-utils -y

sudo yum-config-manager --disable epel


yum install ntp -y

systemctl enable ntpd
systemctl restart ntpd

ntpq -p


useradd ceph
passwd ceph

password



cat << EOF >/etc/sudoers.d/ceph
ceph ALL = (root) NOPASSWD:ALL
Defaults:ceph !requiretty
EOF

sudo chmod 0440 /etc/sudoers.d/ceph

su - ceph


ssh-keygen

ssh-copy-id ceph@cephmon01
ssh-copy-id ceph@cephmon02
ssh-copy-id ceph@radosgw01
ssh-copy-id ceph@radosgw02
ssh-copy-id ceph@cephosd01
ssh-copy-id ceph@cephosd02
ssh-copy-id ceph@cephosd03
ssh-copy-id ceph@cephosd04
ssh-copy-id ceph@cephosd05



vi ~/.ssh/config

Host radosgw01
   Hostname radosgw01
   User ceph
Host radosgw02
   Hostname radosgw02
   User ceph
Host cephmon01
   Hostname cephmon01
   User ceph
Host cephmon02
   Hostname cephmon02
   User ceph
Host cephosd01
   Hostname cephosd01
   User ceph
Host cephosd02
   Hostname cephosd02
   User ceph
Host cephosd03
   Hostname cephosd03
   User ceph
Host cephosd04
   Hostname cephosd04
   User ceph
Host cephosd05
   Hostname cephosd05
   User ceph


chmod 600 ~/.ssh/config

sudo yum install ceph-deploy calamari-server calamari-clients -y


mkdir ~/ceph-config
cd ~/ceph-config

ceph-deploy new cephmon01 cephmon02



[global]
fsid = 4652d0b0-5c80-4fb0-9dea-5bb049c5a735
mon_initial_members = cephmon01
mon_host = 10.1.0.236,10.1.0.237
auth_cluster_required = cephx
auth_service_required = cephx
auth_client_required = cephx

public_network = 10.1.0.0/24
cluster_network = 192.168.200.0/24

journal_size = 10000
osd_pool_default_size = 3
osd_pool_default_min_size = 2

osd_crush_chooseleaf_type = 1
osd_crush_update_on_start = false
max_open_files = 131072



ceph-deploy install --mon cephmon01 cephmon02
ceph-deploy install --rgw radosgw01 radosgw02
ceph-deploy install --osd cephosd01 cephosd02 cephosd03 cephosd04 cephosd05



10.1.0.232 radosgw01
10.1.0.233 radosgw02

10.1.0.236 cephmon01
10.1.0.237 cephmon02

10.1.0.240 cephosd01
10.1.0.241 cephosd02
10.1.0.242 cephosd03
10.1.0.243 cephosd04
10.1.0.244 cephosd05



ceph-deploy mon create-initial
ceph-deploy calamari connect --master cephadmin.time-gate.com cephmon01 cephmon02
ceph-deploy calamari connect --master cephadmin.time-gate.com cephosd01 cephosd02 cephosd03

 


ceph-deploy disk zap cephosd01:vdb cephosd02:vdb cephosd03:vdb cephosd04:vdb cephosd05:vdb

ceph-deploy osd prepare cephosd01:vdb cephosd02:vdb cephosd03:vdb cephosd04:vdb cephosd05:vdb

ceph-deploy osd activate cephosd01:vdb1:vdb2 cephosd02:vdb1:vdb2 cephosd03:vdb1:vdb2 cephosd04:vdb1:vdb2 cephosd05:vdb1:vdb2

ceph-deploy install --cli cephadmin
ceph-deploy admin cephadmin

sudo chmod +r /etc/ceph/ceph.client.admin.keyring

ceph quorum_status --format json-pretty




[ceph@cephadmin ceph-config]$ ceph-deploy rgw create radosgw01 radosgw02


http://10.1.0.232:7480/


####  OSD Purge
ceph-deploy purge cephosd01 cephosd02 cephosd03 cephosd04 cephosd05
ceph-deploy purgedata cephosd01 cephosd02 cephosd03 cephosd04 cephosd05

#### FULL Purge
ceph-deploy purge cephosd01 cephosd02 cephosd03 cephosd04 cephosd05 radosgw01 radosgw02 cephmon01 cephmon02 cephadmin
ceph-deploy purgedata cephosd01 cephosd02 cephosd03 cephosd04 cephosd05 radosgw01 radosgw02 cephmon01 cephmon02 cephadmin





ceph osd lspools

ceph osd pool delete rbd rbd --yes-i-really-really-mean-it

ceph osd pool create rbd 128 128



ceph osd pool set rbd pg_num 128
ceph osd pool set rbd pgp_num 128

##### osd 제거 추가/


1. 5개 각 osd node에서 실행.
/etc/init.d/ceph stop osd


2. admin node 에서

ceph osd pool delete rbd rbd --yes-i-really-really-mean-it


3. 5개 각 osd node에서 실행.
/etc/init.d/ceph start osd

4. 재설치시 인증에러 발생하면 key 삭제 후 다시 activate

[ceph@cephadmin ceph-config]$ ceph auth del osd.0
updated
[ceph@cephadmin ceph-config]$ ceph auth del osd.1
updated
[ceph@cephadmin ceph-config]$ ceph auth del osd.2
updated
[ceph@cephadmin ceph-config]$ ceph auth del osd.3
updated
[ceph@cephadmin ceph-config]$ ceph auth del osd.4
updated




ceph osd down, rm, out 후 다시 activate

sudo ceph osd crush remove osd.4
sudo ceph auth del osd.4
sudo ceph osd rm 4

ceph pg repair 0.2c

ceph osd out 1
ceph osd down 1
ceph osd rm 1



버그 해결 문서
최초 설치 후  rbd pool 제거 하고 다시 설치.
https://access.redhat.com/webassets/avalon/d/Red_Hat_Ceph_Storage-1.3.1-Release_Notes-en-US/Red_Hat_Ceph_Storage-1.3.1-Release_Notes-en-US.pdf


### bucket 구성


ceph osd crush add-bucket tg-center datacenter
ceph osd crush add-bucket mr01 room
ceph osd crush add-bucket row01 row
ceph osd crush add-bucket rack01 rack

ceph osd crush add-bucket cephosd01 host
ceph osd crush add-bucket cephosd02 host
ceph osd crush add-bucket cephosd03 host
ceph osd crush add-bucket cephosd04 host
ceph osd crush add-bucket cephosd05 host

ceph osd crush move tg-center root=default
ceph osd crush move mr01 datacenter=tg-center
ceph osd crush move row01 room=mr01
ceph osd crush move rack01 row=row01

ceph osd crush move cephosd01 rack=rack01
ceph osd crush move cephosd02 rack=rack01
ceph osd crush move cephosd03 rack=rack01
ceph osd crush move cephosd04 rack=rack01
ceph osd crush move cephosd05 rack=rack01


### 모니터 노드에서. crushmap 수정 방법


ceph osd getcrushmap -o crushmap
crushtool -d crushmap -o crushmap.txt

#Devices
device 0 osd.0
device 1 osd.1
device 2 osd.2
device 3 osd.3
device 4 osd.4

를 추가


crushmap 수정 후

crushtool -c crushmap.txt -o crushmap

ceph osd setcrushmap -i crushmap


ceph osd crush set osd.0 1.0 root=default datacenter=tg-center room=mr01 row=row01 rack=rack01 host=cephosd01
ceph osd crush set osd.1 1.0 root=default datacenter=tg-center room=mr01 row=row01 rack=rack01 host=cephosd02
ceph osd crush set osd.2 1.0 root=default datacenter=tg-center room=mr01 row=row01 rack=rack01 host=cephosd03
ceph osd crush set osd.3 1.0 root=default datacenter=tg-center room=mr01 row=row01 rack=rack01 host=cephosd04
ceph osd crush set osd.4 1.0 root=default datacenter=tg-center room=mr01 row=row01 rack=rack01 host=cephosd05