2016년 10월 3일 월요일

redhat ceph 1.3 install

subscription-manager repos --enable=rhel-7-server-rhceph-1.3-calamari-rpms --enable=rhel-7-server-rhceph-1.3-installer-rpms --enable=rhel-7-server-rhceph-1.3-tools-rpms --enable=rhel-7-server-rhceph-1.3-mon-rpms --enable=rhel-7-server-rhceph-1.3-osd-rpms


setenforce 0
systemctl disable firewalld
systemctl stop firewalld


sudo yum install yum-utils -y

sudo yum-config-manager --disable epel


yum install ntp -y

systemctl enable ntpd
systemctl restart ntpd

ntpq -p


useradd ceph
passwd ceph

password



cat << EOF >/etc/sudoers.d/ceph
ceph ALL = (root) NOPASSWD:ALL
Defaults:ceph !requiretty
EOF

sudo chmod 0440 /etc/sudoers.d/ceph

su - ceph


ssh-keygen

ssh-copy-id ceph@cephmon01
ssh-copy-id ceph@cephmon02
ssh-copy-id ceph@radosgw01
ssh-copy-id ceph@radosgw02
ssh-copy-id ceph@cephosd01
ssh-copy-id ceph@cephosd02
ssh-copy-id ceph@cephosd03
ssh-copy-id ceph@cephosd04
ssh-copy-id ceph@cephosd05



vi ~/.ssh/config

Host radosgw01
   Hostname radosgw01
   User ceph
Host radosgw02
   Hostname radosgw02
   User ceph
Host cephmon01
   Hostname cephmon01
   User ceph
Host cephmon02
   Hostname cephmon02
   User ceph
Host cephosd01
   Hostname cephosd01
   User ceph
Host cephosd02
   Hostname cephosd02
   User ceph
Host cephosd03
   Hostname cephosd03
   User ceph
Host cephosd04
   Hostname cephosd04
   User ceph
Host cephosd05
   Hostname cephosd05
   User ceph


chmod 600 ~/.ssh/config

sudo yum install ceph-deploy calamari-server calamari-clients -y


mkdir ~/ceph-config
cd ~/ceph-config

ceph-deploy new cephmon01 cephmon02



[global]
fsid = 4652d0b0-5c80-4fb0-9dea-5bb049c5a735
mon_initial_members = cephmon01
mon_host = 10.1.0.236,10.1.0.237
auth_cluster_required = cephx
auth_service_required = cephx
auth_client_required = cephx

public_network = 10.1.0.0/24
cluster_network = 192.168.200.0/24

journal_size = 10000
osd_pool_default_size = 3
osd_pool_default_min_size = 2

osd_crush_chooseleaf_type = 1
osd_crush_update_on_start = false
max_open_files = 131072



ceph-deploy install --mon cephmon01 cephmon02
ceph-deploy install --rgw radosgw01 radosgw02
ceph-deploy install --osd cephosd01 cephosd02 cephosd03 cephosd04 cephosd05



10.1.0.232 radosgw01
10.1.0.233 radosgw02

10.1.0.236 cephmon01
10.1.0.237 cephmon02

10.1.0.240 cephosd01
10.1.0.241 cephosd02
10.1.0.242 cephosd03
10.1.0.243 cephosd04
10.1.0.244 cephosd05



ceph-deploy mon create-initial
ceph-deploy calamari connect --master cephadmin.time-gate.com cephmon01 cephmon02
ceph-deploy calamari connect --master cephadmin.time-gate.com cephosd01 cephosd02 cephosd03

 


ceph-deploy disk zap cephosd01:vdb cephosd02:vdb cephosd03:vdb cephosd04:vdb cephosd05:vdb

ceph-deploy osd prepare cephosd01:vdb cephosd02:vdb cephosd03:vdb cephosd04:vdb cephosd05:vdb

ceph-deploy osd activate cephosd01:vdb1:vdb2 cephosd02:vdb1:vdb2 cephosd03:vdb1:vdb2 cephosd04:vdb1:vdb2 cephosd05:vdb1:vdb2

ceph-deploy install --cli cephadmin
ceph-deploy admin cephadmin

sudo chmod +r /etc/ceph/ceph.client.admin.keyring

ceph quorum_status --format json-pretty




[ceph@cephadmin ceph-config]$ ceph-deploy rgw create radosgw01 radosgw02


http://10.1.0.232:7480/


####  OSD Purge
ceph-deploy purge cephosd01 cephosd02 cephosd03 cephosd04 cephosd05
ceph-deploy purgedata cephosd01 cephosd02 cephosd03 cephosd04 cephosd05

#### FULL Purge
ceph-deploy purge cephosd01 cephosd02 cephosd03 cephosd04 cephosd05 radosgw01 radosgw02 cephmon01 cephmon02 cephadmin
ceph-deploy purgedata cephosd01 cephosd02 cephosd03 cephosd04 cephosd05 radosgw01 radosgw02 cephmon01 cephmon02 cephadmin





ceph osd lspools

ceph osd pool delete rbd rbd --yes-i-really-really-mean-it

ceph osd pool create rbd 128 128



ceph osd pool set rbd pg_num 128
ceph osd pool set rbd pgp_num 128

##### osd 제거 추가/


1. 5개 각 osd node에서 실행.
/etc/init.d/ceph stop osd


2. admin node 에서

ceph osd pool delete rbd rbd --yes-i-really-really-mean-it


3. 5개 각 osd node에서 실행.
/etc/init.d/ceph start osd

4. 재설치시 인증에러 발생하면 key 삭제 후 다시 activate

[ceph@cephadmin ceph-config]$ ceph auth del osd.0
updated
[ceph@cephadmin ceph-config]$ ceph auth del osd.1
updated
[ceph@cephadmin ceph-config]$ ceph auth del osd.2
updated
[ceph@cephadmin ceph-config]$ ceph auth del osd.3
updated
[ceph@cephadmin ceph-config]$ ceph auth del osd.4
updated




ceph osd down, rm, out 후 다시 activate

sudo ceph osd crush remove osd.4
sudo ceph auth del osd.4
sudo ceph osd rm 4

ceph pg repair 0.2c

ceph osd out 1
ceph osd down 1
ceph osd rm 1



버그 해결 문서
최초 설치 후  rbd pool 제거 하고 다시 설치.
https://access.redhat.com/webassets/avalon/d/Red_Hat_Ceph_Storage-1.3.1-Release_Notes-en-US/Red_Hat_Ceph_Storage-1.3.1-Release_Notes-en-US.pdf


### bucket 구성


ceph osd crush add-bucket tg-center datacenter
ceph osd crush add-bucket mr01 room
ceph osd crush add-bucket row01 row
ceph osd crush add-bucket rack01 rack

ceph osd crush add-bucket cephosd01 host
ceph osd crush add-bucket cephosd02 host
ceph osd crush add-bucket cephosd03 host
ceph osd crush add-bucket cephosd04 host
ceph osd crush add-bucket cephosd05 host

ceph osd crush move tg-center root=default
ceph osd crush move mr01 datacenter=tg-center
ceph osd crush move row01 room=mr01
ceph osd crush move rack01 row=row01

ceph osd crush move cephosd01 rack=rack01
ceph osd crush move cephosd02 rack=rack01
ceph osd crush move cephosd03 rack=rack01
ceph osd crush move cephosd04 rack=rack01
ceph osd crush move cephosd05 rack=rack01


### 모니터 노드에서. crushmap 수정 방법


ceph osd getcrushmap -o crushmap
crushtool -d crushmap -o crushmap.txt

#Devices
device 0 osd.0
device 1 osd.1
device 2 osd.2
device 3 osd.3
device 4 osd.4

를 추가


crushmap 수정 후

crushtool -c crushmap.txt -o crushmap

ceph osd setcrushmap -i crushmap


ceph osd crush set osd.0 1.0 root=default datacenter=tg-center room=mr01 row=row01 rack=rack01 host=cephosd01
ceph osd crush set osd.1 1.0 root=default datacenter=tg-center room=mr01 row=row01 rack=rack01 host=cephosd02
ceph osd crush set osd.2 1.0 root=default datacenter=tg-center room=mr01 row=row01 rack=rack01 host=cephosd03
ceph osd crush set osd.3 1.0 root=default datacenter=tg-center room=mr01 row=row01 rack=rack01 host=cephosd04
ceph osd crush set osd.4 1.0 root=default datacenter=tg-center room=mr01 row=row01 rack=rack01 host=cephosd05

2016년 9월 26일 월요일

Redhat7 infiniband bonding + bridge

[root@openstack01 network-scripts]# cat ifcfg-ib0
CONNECTED_MODE=yes
TYPE=InfiniBand
BOOTPROTO=no
#DEFROUTE=yes
IPV4_FAILURE_FATAL=no
IPV6INIT=yes
IPV6_AUTOCONF=yes
IPV6_DEFROUTE=yes
IPV6_FAILURE_FATAL=no
NAME=ib0
UUID=af984bff-db23-4c53-b234-2d52b6029fed
DEVICE=ib0
ONBOOT=yes
HWADDR=80:00:02:0A:FE:80:00:00:00:00:00:00:00:21:28:00:01:CF:56:7F
MTU=65520
PEERDNS=yes
PEERROUTES=yes
IPV6_PEERDNS=yes
IPV6_PEERROUTES=yes
MASTER=bond1
SLAVE=yes
USERCTL=no
NM_CONTROLLED=no


[root@openstack01 network-scripts]# cat ifcfg-ib1
CONNECTED_MODE=yes
TYPE=InfiniBand
BOOTPROTO=none
#DEFROUTE=yes
IPV4_FAILURE_FATAL=no
IPV6INIT=yes
IPV6_AUTOCONF=yes
IPV6_DEFROUTE=yes
IPV6_FAILURE_FATAL=no
NAME=ib1
UUID=0a441295-38bb-4407-9f6a-e36a9cc854ef
DEVICE=ib1
ONBOOT=yes
HWADDR=80:00:02:0B:FE:80:00:00:00:00:00:00:00:21:28:00:01:CF:56:80
MTU=65520
PEERDNS=yes
PEERROUTES=yes
IPV6_PEERDNS=yes
IPV6_PEERROUTES=yes
MASTER=bond1
SLAVE=yes
USERCTL=no
NM_CONTROLLED=no
[root@openstack01 network-scripts]#

[root@openstack01 network-scripts]# cat ifcfg-bond1 
NAME=bond1
DEVICE=bond1
BONDING_MASTER=yes
TYPE=Bond
IPADDR=192.168.10.4
NETMASK=255.255.255.0
ONBOOT=yes
BOOTPROTO=none
BONDING_OPTS="mode=active-backup miimon=100"
NM_CONTROLLED=no
[root@openstack01 network-scripts]# 

[root@openstack01 network-scripts]# cat ifcfg-virbr1
DEVICE="virbr1"
TYPE=BRIDGE
ONBOOT=yes
BOOTPROTO=none
IPADDR="192.168.10.4"
PREFIX=24
[root@openstack01 network-scripts]# 




2016년 9월 22일 목요일

Pine64 ubuntu 16.04.1 xenial Ceph Storage Installation Hands on

Pine64 ubuntu 16.04.1 xenial
Ceph Storage Installation Hands on







목차



1.CEPH storage 권장 OS

http://docs.ceph.com/docs/jewel/start/os-recommendations/
http://ceph.com/releases/v10-2-0-jewel-released/

DISTRO COMPATIBILITY

Starting with Infernalis, we have dropped support for many older distributions so that we can move to a newer compiler toolchain (e.g., C++11). Although it is still possible to build Ceph on older distributions by installing backported development tools, we are not building and publishing release packages for ceph.com.
We now build packages for the following distributions and architectures:
  • x86_64:
      - CentOS 7.x. We have dropped support for CentOS 6 (and other RHEL 6 derivatives, like Scientific Linux 6).
      - Debian Jessie 8.x. Debian Wheezy 7.x’s g++ has incomplete support for C++11 (and no systemd).
      - Ubuntu Xenial 16.04 and Trusty 14.04. Ubuntu Precise 12.04 is no longer supported.
      - Fedora 22 or later.
  • aarch64 / arm64:
      - Ubuntu Xenial 16.04.




OS 확인 : ubuntu xenial (16.04.1 LTS)
ubuntu@admin:~$ lsb_release -a
No LSB modules are available.
Distributor ID: Ubuntu
Description: Ubuntu 16.04.1 LTS
Release: 16.04
Codename: xenial




2.Pine64 ceph storage 구성품

5 x pine64 for storage node
1 x Raspberry Pi 3 for admin / monitor
1 x Netgear GS108E ethernet switch
5 x 16G MicroSD for OS
5 x 32G MicroSD for Storage
5 x USB3.0 MicroSD reader
1 x PC power supply 5V 30A







3.테스트 환경 구성도


Node Name
ceph01
ceph02
ceph03
ceph04
ceph05
admin
IP Address
10.0.1.21
10.0.1.22
10.0.1.23
10.0.1.24
10.0.1.25
10.0.1.26
Functionality
Ceph OSD Node
Ceph OSD node
Ceph OSD node
Ceph OSD node
monitor
Ceph OSD Node
monitor
Admin




4.Hostname 변경

작업대상 : ceph01 ceph02 ceph03 ceph04 ceph05 admin
ubuntu@localhost:~$ sudo hostnamectl set-hostname ceph01

5.불필요한 패키지 삭제

작업대상 : ceph01 ceph02 ceph03 ceph04 ceph05 admin
sudo apt-get remove --purge libreoffice* -y
sudo apt-get remove --purge gimp* -y
sudo apt-get remove --purge firefox* -y
sudo apt-get clean
sudo apt-get autoremove

6.필요 패키지 설치

작업대상 : ceph01 ceph02 ceph03 ceph04 ceph05 admin
~$ apt-get install btrfs-tools -y

7.시스템 부트를 Text mode 로 변경 (run level 3)

작업대상 : ceph01 ceph02 ceph03 ceph04 ceph05
ubuntu@ceph05:~$ sudo systemctl set-default multi-user.target

8./etc/hosts 등록

작업대상 : ceph01 ceph02 ceph03 ceph04 ceph05 admin
ubuntu@admin:~$ cat /etc/hosts
127.0.0.1 localhost
127.0.1.1 kevin-desktop


# The following lines are desirable for IPv6 capable hosts
::1 ip6-localhost ip6-loopback
fe00::0 ip6-localnet
ff00::0 ip6-mcastprefix
ff02::1 ip6-allnodes
ff02::2 ip6-allrouters


10.0.1.21 ceph01
10.0.1.22 ceph02
10.0.1.23 ceph03
10.0.1.24 ceph04
10.0.1.25 ceph05
10.0.1.26 admin

9.ceph User 생성

작업대상 : ceph01 ceph02 ceph03 ceph04 ceph05 admin
ubuntu@admin:~$ sudo useradd -b /var/lib -m ceph -s /bin/bash
ubuntu@admin:~$ sudo passwd ceph
Enter new UNIX password:
Retype new UNIX password:
passwd: password updated successfully
Password : welcome1 로 일괄 적용

10.ceph usersudoers 에 등록 후 로그인

작업대상 : ceph01 ceph02 ceph03 ceph04 ceph05 admin
ubuntu@admin:~$ echo "ceph ALL = (root) NOPASSWD:ALL" | sudo tee /etc/sudoers.d/ceph
ubuntu@admin:~$ sudo chmod 0440 /etc/sudoers.d/ceph



11.각 노드에 ceph user access 를 위한 ssh key 전송

작업대상 : admin
ssh-keygen 생성시 passphase 는 입력하지 않고 enter 를 입력.
ubuntu@admin:~$ su - ceph
Password:

ceph@admin:~$ ssh-keygen
Generating public/private rsa key pair.
Enter file in which to save the key (/var/lib/ceph/.ssh/id_rsa):
Created directory '/var/lib/ceph/.ssh'.
Enter passphrase (empty for no passphrase):
Enter same passphrase again:
Your identification has been saved in /var/lib/ceph/.ssh/id_rsa.
Your public key has been saved in /var/lib/ceph/.ssh/id_rsa.pub.
The key fingerprint is:
SHA256:4KIDqKg9y3gBODyjl6n3JG0ed8UG7O/djmE8kGZjOVM ceph@admin
The key's randomart image is:
+---[RSA 2048]----+
| |
| . |
|o . o E |
|=+ . o o + |
|+ooo. . S # |
|+.=o . B * |
|oo+.= . . . = |
|o++* o . . o = |
|oo++o . o.o |
+----[SHA256]-----+


ubuntu@admin:~$ ssh-copy-id ceph@ceph01
/usr/bin/ssh-copy-id: INFO: Source of key(s) to be installed: "/home/ubuntu/.ssh/id_rsa.pub"
The authenticity of host 'ceph01 (10.0.1.21)' can't be established.
ECDSA key fingerprint is SHA256:10+26IDg1MawZjIS6y8iDnzb/3majyh+C1mVyznGJ68.
Are you sure you want to continue connecting (yes/no)? yes
/usr/bin/ssh-copy-id: INFO: attempting to log in with the new key(s), to filter out any that are already installed
/usr/bin/ssh-copy-id: INFO: 1 key(s) remain to be installed -- if you are prompted now it is to install the new keys
ceph@ceph01's password:

Now try logging into the machine, with: "ssh 'ceph@ceph01'"
and check to make sure that only the key(s) you wanted were added.

ceph@admin:~$ ssh-copy-id ceph@ceph02
...
ceph@admin:~$ ssh-copy-id ceph@ceph03
...
ceph@admin:~$ ssh-copy-id ceph@ceph04
...
ceph@admin:~$ ssh-copy-id ceph@ceph05
...

12.ceph repository 등록

작업대상 : admin
ceph@admin:~$ wget -q -O- 'https://ceph.com/git/?p=ceph.git;a=blob_plain;f=keys/release.asc' | sudo apt-key add -
OK
ceph@admin:~$ echo deb https://download.ceph.com/debian-jewel/ xenial $(lsb_release -sc) main | sudo tee /etc/apt/sources.list.d/ceph.list
deb https://download.ceph.com/debian-jewel/ xenial xenial main

13.apt update

작업대상 : admin
ceph@admin:~$ sudo apt-get update -y && sudo apt-get upgrade -y

14.ceph-deploy 설치

작업대상 : admin
ceph@admin:~$ sudo apt-get install ceph-deploy -y
Reading package lists... Done
Building dependency tree
Reading state information... Done
The following NEW packages will be installed:
ceph-deploy
0 upgraded, 1 newly installed, 0 to remove and 1 not upgraded.
Need to get 0 B/96.3 kB of archives.
After this operation, 617 kB of additional disk space will be used.
Selecting previously unselected package ceph-deploy.
(Reading database ... 168830 files and directories currently installed.)
Preparing to unpack .../ceph-deploy_1.5.34_all.deb ...
Unpacking ceph-deploy (1.5.34) ...
Setting up ceph-deploy (1.5.34) ...

15.Monitor node 생성

작업대상 : admin
ceph@admin:~$ ceph-deploy new ceph04 ceph05
[ceph_deploy.conf][DEBUG ] found configuration file at: /var/lib/ceph/.cephdeploy.conf
[ceph_deploy.cli][INFO ] Invoked (1.5.35): /usr/bin/ceph-deploy new ceph04 ceph05
[ceph_deploy.cli][INFO ] ceph-deploy options:
[ceph_deploy.cli][INFO ] username : None
[ceph_deploy.cli][INFO ] verbose : False
[ceph_deploy.cli][INFO ] overwrite_conf : False
[ceph_deploy.cli][INFO ] quiet : False
[ceph_deploy.cli][INFO ] cd_conf : <ceph_deploy.conf.cephdeploy.Conf instance at 0x7654f378>
[ceph_deploy.cli][INFO ] cluster : ceph
[ceph_deploy.cli][INFO ] ssh_copykey : True
[ceph_deploy.cli][INFO ] mon : ['ceph04', 'ceph05']
[ceph_deploy.cli][INFO ] func : <function new at 0x76533d70>
[ceph_deploy.cli][INFO ] public_network : None
[ceph_deploy.cli][INFO ] ceph_conf : None
[ceph_deploy.cli][INFO ] cluster_network : None
[ceph_deploy.cli][INFO ] default_release : False
[ceph_deploy.cli][INFO ] fsid : None
[ceph_deploy.new][DEBUG ] Creating new cluster named ceph
[ceph_deploy.new][INFO ] making sure passwordless SSH succeeds
[ceph04][DEBUG ] connected to host: admin
[ceph04][INFO ] Running command: ssh -CT -o BatchMode=yes ceph04
[ceph04][DEBUG ] connection detected need for sudo
[ceph04][DEBUG ] connected to host: ceph04
[ceph04][DEBUG ] detect platform information from remote host
[ceph04][DEBUG ] detect machine type
[ceph04][DEBUG ] find the location of an executable
[ceph04][INFO ] Running command: sudo /bin/ip link show
[ceph04][INFO ] Running command: sudo /bin/ip addr show
[ceph04][DEBUG ] IP addresses found: [u'10.0.1.24']
[ceph_deploy.new][DEBUG ] Resolving host ceph04
[ceph_deploy.new][DEBUG ] Monitor ceph04 at 10.0.1.24
[ceph_deploy.new][INFO ] making sure passwordless SSH succeeds
[ceph05][DEBUG ] connected to host: admin
[ceph05][INFO ] Running command: ssh -CT -o BatchMode=yes ceph05
[ceph05][DEBUG ] connection detected need for sudo
[ceph05][DEBUG ] connected to host: ceph05
[ceph05][DEBUG ] detect platform information from remote host
[ceph05][DEBUG ] detect machine type
[ceph05][DEBUG ] find the location of an executable
[ceph05][INFO ] Running command: sudo /bin/ip link show
[ceph05][INFO ] Running command: sudo /bin/ip addr show
[ceph05][DEBUG ] IP addresses found: [u'10.0.1.25']
[ceph_deploy.new][DEBUG ] Resolving host ceph05
[ceph_deploy.new][DEBUG ] Monitor ceph05 at 10.0.1.25
[ceph_deploy.new][DEBUG ] Monitor initial members are ['ceph04', 'ceph05']
[ceph_deploy.new][DEBUG ] Monitor addrs are ['10.0.1.24', '10.0.1.25']
[ceph_deploy.new][DEBUG ] Creating a random mon key...
[ceph_deploy.new][DEBUG ] Writing monitor keyring to ceph.mon.keyring...
[ceph_deploy.new][DEBUG ] Writing initial config to ceph.conf...

16.ceph.conf 수정

작업대상 : admin
ceph@admin:~$ vi ceph.conf
[global]
fsid = 7bf83b5c-18f3-496a-b1b3-f54316162a68
mon_initial_members = ceph04, ceph05
mon_host = 10.0.1.24,10.0.1.25
auth_cluster_required = cephx
auth_service_required = cephx
auth_client_required = cephx
osd pool default size = 2
public_network = 10.0.0.0/23
ceph@admin:~$

17.ceph install

Install admin 을 제외한 다른 노드에서 ceph 계정으로 로그인 되어 있는지 확인 후 로그아웃 한다.
Ceph 계정으로 로그인 되어 있는 상태에서 install 시 에러가 발행 할 수 있다.
작업대상 : admin
ceph@admin:~$ ceph-deploy install ceph01 ceph02 ceph03 ceph04 ceph05 admin
[ceph_deploy.conf][DEBUG ] found configuration file at: /var/lib/ceph/.cephdeploy.conf
[ceph_deploy.cli][INFO ] Invoked (1.5.34): /usr/bin/ceph-deploy install ceph01 ceph02 ceph03 ceph04 ceph05 admin
[ceph_deploy.cli][INFO ] ceph-deploy options:
[ceph_deploy.cli][INFO ] verbose : False
[ceph_deploy.cli][INFO ] testing : None
[ceph_deploy.cli][INFO ] cd_conf : <ceph_deploy.conf.cephdeploy.Conf instance at 0x765786e8>
[ceph_deploy.cli][INFO ] cluster : ceph
[ceph_deploy.cli][INFO ] dev_commit : None
[ceph_deploy.cli][INFO ] install_mds : False
[ceph_deploy.cli][INFO ] stable : None
[ceph_deploy.cli][INFO ] default_release : False
[ceph_deploy.cli][INFO ] username : None
[ceph_deploy.cli][INFO ] adjust_repos : True
[ceph_deploy.cli][INFO ] func : <function install at 0x765baa30>
[ceph_deploy.cli][INFO ] install_all : False
[ceph_deploy.cli][INFO ] repo : False
[ceph_deploy.cli][INFO ] host : ['ceph01', 'ceph02', 'ceph03', 'ceph04', 'ceph05', 'admin']
[ceph_deploy.cli][INFO ] install_rgw : False
[ceph_deploy.cli][INFO ] install_tests : False
[ceph_deploy.cli][INFO ] repo_url : None
[ceph_deploy.cli][INFO ] ceph_conf : None
[ceph_deploy.cli][INFO ] install_osd : False
[ceph_deploy.cli][INFO ] version_kind : stable
[ceph_deploy.cli][INFO ] install_common : False
[ceph_deploy.cli][INFO ] overwrite_conf : False
[ceph_deploy.cli][INFO ] quiet : False
[ceph_deploy.cli][INFO ] dev : master
[ceph_deploy.cli][INFO ] local_mirror : None
[ceph_deploy.cli][INFO ] release : None
[ceph_deploy.cli][INFO ] install_mon : False
[ceph_deploy.cli][INFO ] gpg_url : None




ceph-deploy install 에러 발생 시 install/uninstall 방법
install :
node 에서 root로 로그인 한 후 다음 명령으로 인스톨,

1. ~$ sudo wget -q -O- 'https://ceph.com/git/?p=ceph.git;a=blob_plain;f=keys/release.asc' | sudo apt-key add -

2. ~$ echo deb https://download.ceph.com/debian-jewel/ xenial $(lsb_release -sc) main | sudo tee /etc/apt/sources.list.d/ceph.list

3. ~$ sudo env ubuntu_FRONTEND=noninteractive ubuntu_PRIORITY=critical apt-get --assume-yes -q --no-install-recommends install -o pkg::Options::=--force-confnew ceph ceph-mds radosgw

uninstall :
각 노드에서 강제 삭제시 아래와 같이 수행.
~$ sudo env ubuntu_FRONTEND=noninteractive ubuntu_PRIORITY=critical apt-get --assume-yes -q -f --force-yes remove --purge ceph ceph-mds ceph-common ceph-fs-common radosgw

18.Monitor node initial

작업대상 : admin
ceph@admin:~$ ceph-deploy mon create-initial
[ceph_deploy.conf][DEBUG ] found configuration file at: /var/lib/ceph/.cephdeploy.conf
[ceph_deploy.cli][INFO  ] Invoked (1.5.35): /usr/bin/ceph-deploy mon create-initial
[ceph_deploy.cli][INFO  ] ceph-deploy options:
[ceph_deploy.cli][INFO  ]  username                      : None
[ceph_deploy.cli][INFO  ]  verbose                       : False
[ceph_deploy.cli][INFO  ]  overwrite_conf                : False
[ceph_deploy.cli][INFO  ]  subcommand                    : create-initial
[ceph_deploy.cli][INFO  ]  quiet                         : False
[ceph_deploy.cli][INFO  ]  cd_conf                       : <ceph_deploy.conf.cephdeploy.Conf instance at 0x76452918>
[ceph_deploy.cli][INFO  ]  cluster                       : ceph
[ceph_deploy.cli][INFO  ]  func                          : <function mon at 0x7654c8b0>
[ceph_deploy.cli][INFO  ]  ceph_conf                     : None
[ceph_deploy.cli][INFO  ]  keyrings                      : None
[ceph_deploy.cli][INFO  ]  default_release               : False
[ceph_deploy.mon][DEBUG ] Deploying mon, cluster ceph hosts ceph04 ceph05
[ceph_deploy.mon][DEBUG ] detecting platform for host ceph04 ...
[ceph04][DEBUG ] connection detected need for sudo
[ceph04][DEBUG ] connected to host: ceph04 
[ceph04][DEBUG ] detect platform information from remote host
[ceph04][DEBUG ] detect machine type
[ceph04][DEBUG ] find the location of an executable[ceph05][INFO  ] Running command: sudo /usr/bin/ceph --connect-timeout=25 --cluster=ceph --admin-daemon=/var/run/ceph/ceph-mon.ceph05.asok mon_status
[ceph05][INFO  ] Running command: sudo /usr/bin/ceph --connect-timeout=25 --cluster=ceph --name mon. --keyring=/var/lib/ceph/mon/ceph-ceph05/keyring auth get-or-create client.admin osd allow * mds allow * mon allow *
[ceph05][INFO  ] Running command: sudo /usr/bin/ceph --connect-timeout=25 --cluster=ceph --name mon. --keyring=/var/lib/ceph/mon/ceph-ceph05/keyring auth get-or-create client.bootstrap-mds mon allow profile bootstrap-mds
[ceph05][INFO  ] Running command: sudo /usr/bin/ceph --connect-timeout=25 --cluster=ceph --name mon. --keyring=/var/lib/ceph/mon/ceph-ceph05/keyring auth get-or-create client.bootstrap-osd mon allow profile bootstrap-osd
[ceph05][INFO  ] Running command: sudo /usr/bin/ceph --connect-timeout=25 --cluster=ceph --name mon. --keyring=/var/lib/ceph/mon/ceph-ceph05/keyring auth get-or-create client.bootstrap-rgw mon allow profile bootstrap-rgw
[ceph_deploy.gatherkeys][INFO  ] Storing ceph.client.admin.keyring
[ceph_deploy.gatherkeys][INFO  ] Storing ceph.bootstrap-mds.keyring
[ceph_deploy.gatherkeys][INFO  ] keyring 'ceph.mon.keyring' already exists
[ceph_deploy.gatherkeys][INFO  ] Storing ceph.bootstrap-osd.keyring
[ceph_deploy.gatherkeys][INFO  ] Storing ceph.bootstrap-rgw.keyring
[ceph_deploy.gatherkeys][INFO  ] Destroy temp directory /tmp/tmpSKaOCe


19.OSD 설정

작업대상 : admin
ceph@admin:~$ ceph-deploy osd create ceph01:sda1 ceph02:sda1 ceph03:sda1 ceph04:sda1 ceph05:sda1 --fs-type btrfs
[ceph_deploy.conf][DEBUG ] found configuration file at: /var/lib/ceph/.cephdeploy.conf
[ceph_deploy.cli][INFO ] Invoked (1.5.34): /usr/bin/ceph-deploy osd create ceph01:sda1 ceph02:sda1 ceph03:sda1 ceph04:sda1 --fs-type btrfs
[ceph_deploy.cli][INFO ] ceph-deploy options:
[ceph_deploy.cli][INFO ] username : None
[ceph_deploy.cli][INFO ] disk : [('ceph01', '/dev/sda1', None), ('ceph02', '/dev/sda1', None), ('ceph03', '/dev/sda1', None), ('ceph04', '/dev/sda1', None)]
[ceph_deploy.cli][INFO ] dmcrypt : False
[ceph_deploy.cli][INFO ] verbose : False
[ceph_deploy.cli][INFO ] bluestore : None
[ceph_deploy.cli][INFO ] overwrite_conf : False
[ceph_deploy.cli][INFO ] subcommand : create
[ceph_deploy.cli][INFO ] dmcrypt_key_dir : /etc/ceph/dmcrypt-keys
[ceph_deploy.cli][INFO ] quiet : False
[ceph_deploy.cli][INFO ] cd_conf : <ceph_deploy.conf.cephdeploy.Conf instance at 0x764ccda0>
[ceph_deploy.cli][INFO ] cluster : ceph
[ceph_deploy.cli][INFO ] fs_type : btrfs
[ceph_deploy.cli][INFO ] func : <function osd at 0x764b11b0>
[ceph_deploy.cli][INFO ] ceph_conf : None
[ceph_deploy.cli][INFO ] default_release : False
[ceph_deploy.cli][INFO ] zap_disk : False
[ceph_deploy.osd][DEBUG ] Preparingsudo apt-get remove compiz* cluster ceph disks ceph01:/dev/sda1: ceph02:/dev/sda1: ceph03:/dev/sda1: ceph04:/dev/sda1:
[ceph01][DEBUG ] connection detected need for sudo








20.OSD PREPARE

작업대상 : admin
ceph@admin:~$ ceph-deploy osd prepare ceph01:sda1 ceph02:sda1 ceph03:sda1 ceph04:sda1 ceph05:sda1 --fs-type btrfs
[ceph_deploy.conf][DEBUG ] found configuration file at: /var/lib/ceph/.cephdeploy.conf
[ceph_deploy.cli][INFO ] Invoked (1.5.34): /usr/bin/ceph-deploy osd prepare ceph01:sda1 ceph02:sda1 ceph03:sda1 ceph04:sda1 --fs-type btrfs
[ceph_deploy.cli][INFO ] ceph-deploy options:
[ceph_deploy.cli][INFO ] username : None
[ceph_deploy.cli][INFO ] disk : [('ceph01', '/dev/sda1', None), ('ceph02', '/dev/sda1', None), ('ceph03', '/dev/sda1', None), ('ceph04', '/dev/sda1', None)]
[ceph_deploy.cli][INFO ] dmcrypt : False
[ceph_deploy.cli][INFO ] verbose : False
[ceph_deploy.cli][INFO ] bluestore : None
[ceph_deploy.cli][INFO ] overwrite_conf : False
[ceph_deploy.cli][INFO ] subcommand : prepare
[ceph_deploy.cli][INFO ] dmcrypt_key_dir : /etc/ceph/dmcrypt-keys
[ceph_deploy.cli][INFO ] quiet : False
[ceph_deploy.cli][INFO ] cd_conf : <ceph_deploy.conf.cephdeploy.Conf instance at 0x7648bda0>
[ceph_deploy.cli][INFO ] cluster : ceph
[ceph_deploy.cli][INFO ] fs_type : btrfs
[ceph_deploy.cli][INFO ] func : <function osd at 0x764701b0>
[ceph_deploy.cli][INFO ] ceph_conf : None
[ceph_deploy.cli][INFO ] default_release : False
[ceph_deploy.cli][INFO ] zap_disk : False
[ceph_deploy.osd][DEBUG ] Preparing cluster ceph disks ceph01:/dev/sda1: ceph02:/dev/sda1: ceph03:/dev/sda1: ceph04:/dev/sda1:
[ceph01][DEBUG ] connection detected need for sudo






21.OSD ACTIVATE

작업대상 : admin
ceph@admin:~$ ceph-deploy osd activate ceph01:sda1 ceph02:sda1 ceph03:sda1 ceph04:sda1 ceph04:sda1
[ceph_deploy.conf][DEBUG ] found configuration file at: /var/lib/ceph/.cephdeploy.conf
[ceph_deploy.cli][INFO ] Invoked (1.5.34): /usr/bin/ceph-deploy osd activate ceph01:sda1 ceph02:sda1 ceph03:sda1 ceph04:sda1
[ceph_deploy.cli][INFO ] ceph-deploy options:
[ceph_deploy.cli][INFO ] username : None
[ceph_deploy.cli][INFO ] verbose : False
[ceph_deploy.cli][INFO ] overwrite_conf : False
[ceph_deploy.cli][INFO ] subcommand : activate
[ceph_deploy.cli][INFO ] quiet : False
[ceph_deploy.cli][INFO ] cd_conf : <ceph_deploy.conf.cephdeploy.Conf instance at 0x76519da0>
[ceph_deploy.cli][INFO ] cluster : ceph
[ceph_deploy.cli][INFO ] func : <function osd at 0x764fe1b0>
[ceph_deploy.cli][INFO ] ceph_conf : None
[ceph_deploy.cli][INFO ] default_release : False
[ceph_deploy.cli][INFO ] disk : [('ceph01', '/dev/sda1', None), ('ceph02', '/dev/sda1', None), ('ceph03', '/dev/sda1', None), ('ceph04', '/dev/sda1', None), ('ceph05', '/dev/sda1', None)]
[ceph_deploy.osd][DEBUG ] Activating cluster ceph disks ceph01:/dev/sda1: ceph02:/dev/sda1: ceph03:/dev/sda1: ceph04:/dev/sda1:
[ceph01][DEBUG ] connection detected need for sudo
[ceph01][DEBUG ] connected to host: ceph01




22.CEPH 상태 점검

작업대상 : admin
ceph@admin:~$ ceph quorum_status --format json-pretty


{
"election_epoch": 4,
"quorum": [
0,
1
],
"quorum_names": [
"ceph04",
"ceph05"
],
"quorum_leader_name": "ceph04",
"monmap": {
"epoch": 1,
"fsid": "7bf83b5c-18f3-496a-b1b3-f54316162a68",
"modified": "2016-09-22 19:22:24.810838",
"created": "2016-09-22 19:22:24.810838",
"mons": [
{
"rank": 0,
"name": "ceph04",
"addr": "10.0.1.24:6789\/0"
},
{
"rank": 1,
"name": "ceph05",
"addr": "10.0.1.25:6789\/0"
}
]
}
}



ceph@admin:~$ sudo chmod +r /etc/ceph/ceph.client.admin.keyring
ceph@admin:~$





ceph@admin:~$ ceph health
HEALTH_WARN too few PGs per OSD (25 < min 30)


ceph@admin:~$ ceph -s
cluster 7bf83b5c-18f3-496a-b1b3-f54316162a68
health HEALTH_OK
monmap e1: 2 mons at {ceph04=10.0.1.24:6789/0,ceph05=10.0.1.25:6789/0}
election epoch 4, quorum 0,1 ceph04,ceph05
osdmap e21: 4 osds: 4 up, 4 in
flags sortbitwise
pgmap v54: 64 pgs, 1 pools, 0 bytes data, 0 objects
20489 MB used, 100548 MB / 119 GB avail
64 active+clean

ceph@admin:~$ ceph osd pool set rbd pg_num 160
set pool 0 pg_num to 160


ceph@admin:~$ ceph osd pool set rbd pgp_num 160
set pool 0 pgp_num to 160


ceph@admin:~$ ceph -s
cluster 7bf83b5c-18f3-496a-b1b3-f54316162a68
health HEALTH_OK
monmap e1: 2 mons at {ceph04=10.0.1.24:6789/0,ceph05=10.0.1.25:6789/0}
election epoch 4, quorum 0,1 ceph04,ceph05
osdmap e30: 5 osds: 5 up, 5 in
flags sortbitwise
pgmap v90: 160 pgs, 1 pools, 0 bytes data, 0 objects
25617 MB used, 122 GB / 149 GB avail
160 active+clean


ceph@admin:~$ ceph mon stat
e1: 2 mons at {ceph04=10.0.1.24:6789/0,ceph05=10.0.1.25:6789/0}, election epoch 4, quorum 0,1 ceph04,ceph05
ceph@admin:~$ ceph mds stat
e1:
ceph@admin:~$ ceph osd tree
ID WEIGHT TYPE NAME UP/DOWN REWEIGHT PRIMARY-AFFINITY
-1 0.14549 root default
-2 0.02910 host ceph01
0 0.02910 osd.0 up 1.00000 1.00000
-3 0.02910 host ceph02
1 0.02910 osd.1 up 1.00000 1.00000
-4 0.02910 host ceph03
2 0.02910 osd.2 up 1.00000 1.00000
-5 0.02910 host ceph04
3 0.02910 osd.3 up 1.00000 1.00000
-6 0.02910 host ceph05
4 0.02910 osd.4 up 1.00000 1.00000

PG 설정에 대한 문서 참조.
http://docs.ceph.com/docs/master/rados/operations/placement-groups/


ceph@admin:~$ ceph osd dump
epoch 30
fsid 7bf83b5c-18f3-496a-b1b3-f54316162a68
created 2016-09-22 19:22:55.416036
modified 2016-09-22 19:39:39.805911
flags sortbitwise
pool 0 'rbd' replicated size 2 min_size 1 crush_ruleset 0 object_hash rjenkins pg_num 160 pgp_num 160 last_change 24 flags hashpspool stripe_width 0
max_osd 5
osd.0 up in weight 1 up_from 4 up_thru 29 down_at 0 last_clean_interval [0,0) 10.0.1.21:6800/10656 10.0.1.21:6801/10656 10.0.1.21:6802/10656 10.0.1.21:6803/10656 exists,up 8a03e5e1-5447-4612-a0c5-4fd7ef1d2b18
osd.1 up in weight 1 up_from 8 up_thru 29 down_at 0 last_clean_interval [0,0) 10.0.1.22:6800/8422 10.0.1.22:6801/8422 10.0.1.22:6802/8422 10.0.1.22:6803/8422 exists,up efcc116c-4ec1-4dd9-b7b3-72ef3a03a7e7
osd.2 up in weight 1 up_from 14 up_thru 29 down_at 0 last_clean_interval [0,0) 10.0.1.23:6800/8436 10.0.1.23:6801/8436 10.0.1.23:6802/8436 10.0.1.23:6803/8436 exists,up adf35a8b-2785-4ef3-a25e-1b3f21c15ea1
osd.3 up in weight 1 up_from 19 up_thru 29 down_at 0 last_clean_interval [0,0) 10.0.1.24:6800/9878 10.0.1.24:6801/9878 10.0.1.24:6802/9878 10.0.1.24:6803/9878 exists,up fac90ac0-b1b4-4653-96d6-961dedae9b0d
osd.4 up in weight 1 up_from 28 up_thru 29 down_at 0 last_clean_interval [0,0) 10.0.1.25:6800/11122 10.0.1.25:6801/11122 10.0.1.25:6802/11122 10.0.1.25:6803/11122 exists,up ca220f2d-7b54-4e37-b227-e03cad244614

23.RADOS TEST

작업대상 : admin 또는 linux1


ceph@admin:~$ sudo dd if=/dev/zero of=/testfile.txt bs=1024 count=10000
10000+0 records in
10000+0 records out
10240000 bytes (10 MB, 9.8 MiB) copied, 0.21831 s, 46.9 MB/s


ceph@admin:~$ ceph df
GLOBAL:
SIZE AVAIL RAW USED %RAW USED
119G 100546M 20492M 16.78
POOLS:
NAME ID USED %USED MAX AVAIL OBJECTS
rbd 0 0 0 50272M 0



ceph@admin:~$ rados mkpool rdata
successfully created pool rdata


ceph@admin:~$ ceph df
GLOBAL:
SIZE AVAIL RAW USED %RAW USED
119G 100546M 20492M 16.78
POOLS:
NAME ID USED %USED MAX AVAIL OBJECTS
rbd 0 0 0 50272M 0
rdata 1 0 0 50272M 0


ceph@admin:~$ rados put test-object-1 /testfile.txt --pool=rdata


ceph@admin:~$ rados -p rdata ls
test-object-1


ceph@admin:~$ ceph osd map rdata test-object-1
osdmap e33 pool 'rdata' (1) object 'test-object-1' -> pg 1.74dc35e2 (1.2) -> up ([3,1], p3) acting ([3,1], p3)


ceph@admin:~$ rados rm test-object-1 --pool=rdata


ceph@admin:~$ rados -p rdata ls



24.BLOCK DEVICE TEST

작업대상 : admin
ceph@admin:~$ cat /etc/hosts
127.0.0.1 localhost
127.0.1.1 kevin-desktop


# The following lines are desirable for IPv6 capable hosts
::1 ip6-localhost ip6-loopback
fe00::0 ip6-localnet
ff00::0 ip6-mcastprefix
ff02::1 ip6-allnodes
ff02::2 ip6-allrouters


10.0.1.26 admin
10.0.1.21 ceph01
10.0.1.22 ceph02
10.0.1.23 ceph03
10.0.1.24 ceph04
10.0.1.25 ceph05
10.0.0.195 kevdev
10.0.1.150 linux1 # VirtualBox node




ceph@admin:~$ ceph-deploy install root@linux1
[ceph_deploy.conf][DEBUG ] found configuration file at: /var/lib/ceph/.cephdeploy.conf
[ceph_deploy.cli][INFO ] Invoked (1.5.34): /usr/bin/ceph-deploy install root@linux1
[ceph_deploy.cli][INFO ] ceph-deploy options:
[ceph_deploy.cli][INFO ] verbose : False
...
[ceph_deploy.cli][INFO ] local_mirror : None
[ceph_deploy.cli][INFO ] release : None
[ceph_deploy.cli][INFO ] install_mon : False
[ceph_deploy.cli][INFO ] gpg_url : None
[ceph_deploy.install][DEBUG ] Installing stable version jewel on cluster ceph hosts root@linux1
[ceph_deploy.install][DEBUG ] Detecting platform for host root@linux1 ...
root@linux1's password:
root@linux1's password:
[root@linux1][DEBUG ] connected to host: root@linux1

[root@linux1][DEBUG ] Package 1:ceph-10.2.2-0.el7.x86_64 already installed and latest version
[root@linux1][DEBUG ] Package 1:ceph-radosgw-10.2.2-0.el7.x86_64 already installed and latest version
[root@linux1][DEBUG ] Nothing to dosudo apt-get remove compiz*
[root@linux1][INFO ] Running command: ceph --version
[root@linux1][DEBUG ] ceph version 10.2.2 (45107e21c568dd033c2f0a3107dec8f0b0e58374)
ceph@admin:~$

ceph@admin:~$ ceph-deploy admin root@linux1
[ceph_deploy.conf][DEBUG ] found configuration file at: /var/lib/ceph/.cephdeploy.conf
[ceph_deploy.cli][INFO ] Invoked (1.5.34): /usr/bin/ceph-deploy admin root@linux1
[ceph_deploy.cli][INFO ] ceph-deploy options:
[ceph_deploy.cli][INFO ] username : None
[ceph_deploy.cli][INFO ] verbose : False
[ceph_deploy.cli][INFO ] overwrite_conf : False
[ceph_deploy.cli][INFO ] quiet : False
[ceph_deploy.cli][INFO ] cd_conf : <ceph_deploy.conf.cephdeploy.Conf instance at 0x764a78c8>
[ceph_deploy.cli][INFO ] cluster : ceph
[ceph_deploy.cli][INFO ] client : ['root@linux1']
[ceph_deploy.cli][INFO ] func : <function admin at 0x76559070>
[ceph_deploy.cli][INFO ] ceph_conf : None
[ceph_deploy.cli][INFO ] default_release : False
[ceph_deploy.admin][DEBUG ] Pushing admin keys and conf to root@linux1
root@linux1's password:
root@linux1's password:
[root@linux1][DEBUG ] connected to host: root@linux1
[root@linux1][DEBUG ] detect platform information from remote host
[root@linux1][DEBUG ] detect machine type
[root@linux1][DEBUG ] write cluster configuration to /etc/ceph/{cluster}.conf
ceph@admin:~$


작업대상 : linux1
[ceph@linux1 ~]$ rbd create rbd_data --size 4096
2016-09-06 06:32:59.338104 7fdebb003d80 -1 auth: unable to find a keyring on /etc/ceph/ceph.client.admin.keyring,/etc/ceph/ceph.keyring,/etc/ceph/keyring,/etc/ceph/keyring.bin: (2) No such file or directory
2016-09-06 06:32:59.338140 7fdebb003d80 -1 monclient(hunting): ERROR: missing keyring, cannot use cephx for authentication
2016-09-06 06:32:59.338142 7fdebb003d80 0 librados: client.admin initialization error (2) No such file or directory
rbd: couldn't connect to the cluster!


[ceph@linux1 ~]$ sudo chmod +r /etc/ceph/ceph.client.admin.keyring


[ceph@linux1 ~]$ rbd create rbd_data --size 4096

rbd 를 생성할때 keyring 권한문제로 접근이 안되어 에러가 발생. chmod 실행시켜 read 권한 추가.


[ceph@linux1 ceph]$ sudo rbd map rbd_data
rbd: sysfs write failed
RBD image feature set mismatch. You can disable features unsupported by the kernel with "rbd feature disable".
In some cases useful info is found in syslog - try "dmesg | tail" or so.
rbd: map failed: (6) No such device or address


[ceph@linux1 ceph]$ rbd feature disable rbd_data deep-flatten fast-diff object-map exclusive-lock


[ceph@linux1 ceph]$ sudo rbd map rbd_data
/dev/rbd0
rbd map 실행중 feature mismatch 가 발생. disable 작업 후 다시 map.


[ceph@linux1 ceph]$ sudo mkfs.ext4 -m0 /dev/rbd/rbd/rbd_data
mke2fs 1.42.9 (28-Dec-2013)
Discarding device blocks: done
Filesystem label=
OS type: Linux
Block size=4096 (log=2)
Fragment size=4096 (log=2)
Stride=1024 blocks, Stripe width=1024 blocks
262144 inodes, 1048576 blocks
0 blocks (0.00%) reserved for the super user
First data block=0
Maximum filesystem blocks=1073741824
32 block groups
32768 blocks per group, 32768 fragments per group
8192 inodes per group
Superblock backups stored on blocks:
32768, 98304, 163840, 229376, 294912, 819200, 884736


Allocating group tables: done
Writing inode tables: done
Creating journal (32768 blocks): done
Writing superblocks and filesystem accounting information:
done


[ceph@linux1 ceph]$ sudo mount /dev/rbd/rbd/rbd_data /mnt


[ceph@linux1 ceph]$ cd /mnt


[ceph@linux1 mnt]$ ls -al
total 24
drwxr-xr-x. 3 root root 4096 Sep 6 06:40 .
dr-xr-xr-x. 17 root root 4096 Sep 5 21:25 ..
drwx------. 2 root root 16384 Sep 6 06:40 lost+found


[root@linux1 mnt]# sudo dd if=/dev/zero of=testfile.txt bs=1024 count=10000
10000+0 records in
10000+0 records out
10240000 bytes (10 MB) copied, 0.0359571 s, 285 MB/s


[root@linux1 mnt]# ls -al
합계 10024
drwxr-xr-x. 3 root root 4096 96 13:58 .
dr-xr-xr-x. 18 root root 4096 96 06:48 ..
drwx------. 2 root root 16384 96 13:56 lost+found
-rw-r--r--. 1 root root 10240000 96 13:58 testfile.txt
[root@linux1 mnt]#


[root@linux1 mnt]# ceph df
GLOBAL:
SIZE AVAIL RAW USED %RAW USED
149G 122G 25900M 16.97
POOLS:
NAME ID USED %USED MAX AVAIL OBJECTS
rbd 0 148M 0.19 62653M 48
[root@linux1 mnt]#


25.node 별 프로세스 확인

ceph@admin:~$ ps -ef|grep ceph | grep -v grep
root 1942 1628 0 922 pts/0 00:00:00 su - ceph
ceph 1951 1942 0 922 pts/0 00:00:01 -su
ceph 26485 1951 0 15:07 pts/0 00:00:00 ps -ef


ubuntu@ceph01:~$ ps -ef|grep ceph | grep -v grep
avahi 541 1 0 Sep22 ? 00:00:46 avahi-daemon: running [ceph01.local]
ceph 10656 1 0 Sep22 ? 00:05:34 /usr/bin/ceph-osd -f --cluster ceph --id 0 --setuser ceph --setgroup ceph
ubuntu@ceph01:~$


ubuntu@ceph02:~$ ps -ef|grep ceph | grep -v grep
avahi 524 1 0 Sep22 ? 00:00:47 avahi-daemon: running [ceph02.local]
ceph 8422 1 0 Sep22 ? 00:05:34 /usr/bin/ceph-osd -f --cluster ceph --id 1 --setuser ceph --setgroup ceph
ubuntu@ceph02:~$

ubuntu@ceph03:~$ ps -ef|grep ceph | grep -v grep
avahi 524 1 0 Sep22 ? 00:00:47 avahi-daemon: running [ceph03.local]
ceph 8436 1 0 Sep22 ? 00:05:40 /usr/bin/ceph-osd -f --cluster ceph --id 2 --setuser ceph --setgroup ceph
ubuntu@ceph03:~$


ubuntu@ceph04:~$ ps -ef|grep ceph | grep -v grep
ceph 7276 1 0 Sep22 ? 00:03:26 /usr/bin/ceph-mon -f --cluster ceph --id ceph04 --setuser ceph --setgroup ceph
ceph 9878 1 0 Sep22 ? 00:05:52 /usr/bin/ceph-osd -f --cluster ceph --id 3 --setuser ceph --setgroup ceph
ubuntu@ceph04:~$


ubuntu@ceph05:~$ ps -ef|grep ceph | grep -v grep
ceph 7479 1 0 Sep22 ? 00:00:54 /usr/bin/ceph-mon -f --cluster ceph --id ceph05 --setuser ceph --setgroup ceph
ceph 11122 1 0 Sep22 ? 00:05:40 /usr/bin/ceph-osd -f --cluster ceph --id 4 --setuser ceph --setgroup ceph
ubuntu@ceph05:~$


26.에러 확인.

ubuntu@ceph01:~$ sudo journalctl -f


ubuntu@ceph02:~$ sudo journalctl -f


ubuntu@ceph03:~$ sudo journalctl -f


ubuntu@ceph04:~$ sudo journalctl -f


ubuntu@ceph05:~$ sudo journalctl -f


ubuntu@admin:~$ sudo journalctl -f

27.Refresh all data and configuration

각 노드의 모든 설정을 초기화 하고 재 설치 할 때 수행.
작업대상 : admin


ceph@admin:~$ ceph-deploy disk zap ceph01:sda1 ceph02:sda1 ceph03:sda1 ceph04:sda1 ceph05:sda1
ceph@admin:~$ ceph-deploy forgetkeys
ceph@admin:~$ ceph-deploy purge ceph01 ceph02 ceph03 ceph04 ceph05 admin
ceph@admin:~$ ceph-deploy purgedata ceph01 ceph02 ceph03 ceph04 ceph05 admin








작업대상 : ceph01 ceph02 ceph03 ceph04 ceph05 admin
ceph@admin:~$ sudo su
# apt-get autoremove -y
# apt-get autoclean -y
# rm -fr /etc/apt/source-list.d/ceph.list
# deluser --remove-home ceph
# rm -fr /var/lib/ceph
# rm -fr /var/local/osd*
# reboot