minger

minger

Ceph Installation Usage Record

Ceph Installation Record#

I. Preparation Work#

Machine List:

ceph0 192.168.100.40

ceph1 192.168.100.41

ceph2 192.168.100.42

# Configuration needed for all three machines
#  Turn off the firewall
systemctl stop firewalld 
systemctl disable firewalld 
# Turn off SELinux
setenforce 0
# Turn off the graphical network manager
systemctl stop NetworkManager
systemctl disable NetworkManager 
# Configure hostname
hostnamectl set-hostname ceph0
# Modify hosts (sync to other nodes using scp)
vim /etc/hosts
192.168.100.40 ceph0
192.168.100.41 ceph1
192.168.100.42 ceph2
# Configure SSH passwordless login
ssh-keygen
ssh-copy-id ceph0
ssh-copy-id ceph1
ssh-copy-id ceph2

Configure yum repository and sync to other nodes

vim /etc/yum.repos.d/ceph.repo

[Ceph]
name=Ceph packages for $basearch
baseurl=https://mirrors.tuna.tsinghua.edu.cn/ceph/rpm-nautilus/el7/x86_64/
enabled=1
gpgcheck=1
type=rpm-md
gpgkey=https://download.ceph.com/keys/release.asc
priority=1

[Ceph-noarch]
name=Ceph noarch packages
baseurl=https://mirrors.tuna.tsinghua.edu.cn/ceph/rpm-nautilus/el7/noarch/
enabled=1
gpgcheck=1
type=rpm-md
gpgkey=https://download.ceph.com/keys/release.asc
priority=1

[ceph-source]
name=Ceph source packages
baseurl=https://mirrors.tuna.tsinghua.edu.cn/ceph/rpm-nautilus/el7/SRPMS/
enabled=1
gpgcheck=1
type=rpm-md
gpgkey=https://download.ceph.com/keys/release.asc
priority=1

II. Create Cluster#

1. Create Cluster Configuration Directory#

mkdir /etc/ceph

2. Install ceph-deploy Deployment Tool#

yum -y install ceph-deploy

3. Create Ceph Cluster#

ceph-deploy new ceph0 ceph1 ceph2
# ls 
# Explanation:
# ceph.conf Cluster configuration file
# ceph-deploy-ceph.log Log record of ceph-deploy deployment
# ceph.mon.keyring Verification key file

4. Modify Configuration File#

[global]
fsid = 913835ff-cc9b-45d7-a243-f3978d3e0c53
mon_initial_members = ceph0, ceph1, ceph2
mon_host = 192.168.100.40,192.168.100.41,192.168.100.42
auth_cluster_required = cephx
auth_service_required = cephx
auth_client_required = cephx

# Public network segment
public network = 192.168.100.0/24

[mgr]
# Enable WEB dashboard
mgr modules = dashboard

5. Install Ceph#

ceph-deploy install ceph0 ceph1 ceph2
# Additional note: If the network is poor, you can manually install ceph and ceph-radosgw. If the network is good, you can use the command ceph-deploy install ceph0 ceph1 ceph2 to install. The ceph-deploy command will automatically install from the public official repository.

yum install ceph ceph-radosgw -y

6. Initialize Monitor#

ceph-deploy mon create-initial

7. Sync Management Information#

Sync the configuration file information to all Ceph cluster nodes

ceph-deploy admin ceph0 ceph1 ceph2

8. Install Mgr#

The management daemon's main role is to share and extend some functions of the monitor, reducing the load on the monitor for better management of the Ceph storage system. The Ceph dashboard graphical management also requires the mgr.

ceph-deploy mgr create ceph0 ceph1 ceph2

9. Install RGW#

ceph-deploy rgw create ceph0 ceph1 ceph2

10. Create MDS Service#

ceph-deploy mds create ceph0 ceph1 ceph2

11. Use zap Command to Clear Disk Information in Preparation for Creating OSD#

ceph-deploy disk zap ceph0 /dev/vdb
ceph-deploy disk zap ceph1 /dev/vdb
ceph-deploy disk zap ceph2 /dev/vdb

12. Create OSD Disk#

ceph-deploy osd create --data /dev/sdb ceph0
ceph-deploy osd create --data /dev/sdb ceph1
ceph-deploy osd create --data /dev/sdb ceph2

13. Verify#

ceph -s 
[root@ceph0 ceph]# ceph -s 
  cluster:
    id:     913835ff-cc9b-45d7-a243-f3978d3e0c53
    health: HEALTH_OK
 
  services:
    mon: 3 daemons, quorum ceph0,ceph1,ceph2 (age 18h)
    mgr: ceph0(active, since 17h), standbys: ceph2, ceph1
    mds: cephfs:1 {0=ceph1=up:active} 2 up:standby
    osd: 3 osds: 3 up (since 18h), 3 in (since 18h)
 
  data:
    pools:   2 pools, 192 pgs
    objects: 106 objects, 335 MiB
    usage:   4.0 GiB used, 26 GiB / 30 GiB avail
    pgs:     192 active+clean

III. Enable Dashboard#

1. Enable Dashboard Module#

ceph mgr module enable dashboard

Error:

Error ENOENT: all mgr daemons do not support module 'dashboard', pass --force to force enablement
Solution:

yum -y install ceph-mgr-dashboard
This is because ceph-mgr-dashboard is not installed; just install it on the mgr node.

2. Generate Signature#

ceph dashboard create-self-signed-cert

3. Generate Certificate#

openssl req -new -nodes -x509   -subj "/O=IT/CN=ceph-mgr-dashboard" -days 3650   -keyout dashboard.key -out dashboard.crt -extensions v3_ca

4. Start Service#

ceph mgr module disable dashboard
 
ceph mgr module enable dashboard

5. Set Access Address and Port#

ceph config set mgr mgr/dashboard/server_addr 192.168.100.40
 
ceph config set mgr mgr/dashboard/server_port 9001

6. Disable HTTPS#

ceph config set mgr mgr/dashboard/ssl false

7. View Daemons#

ceph mgr services

8. Set Management Panel Login Credentials#

echo "1234567890" >> ceph-password.txt
ceph dashboard set-login-credentials admin -i ceph-password.txt
# Access via http://192.168.100.40:9001/

image-20230721162111692

IV. Using Ceph#

1. Create Storage Pool#

ceph osd pool create cephfs_data 128

ceph osd pool create cephfs_metadata 64

rbd pool init cephfs_data # Initialize RBD pool

rbd pool init cephfs_metadata

2. View Storage Pools#

ceph osd lspools

3. Create File System#

ceph fs new fs_test cephfs_metadata cephfs_data

4. View File System#

ceph fs ls

5. View MDS Status#

ceph mds stat

V. Test Mounting#

yum install ceph-fuse

cat ceph.client.admin.keyring 

echo “ABCrM7lkDYZ8GxAA7SkHF7gcZIsFMR47LA8bsB==” >> admin.key

mount -t ceph ceph2:6789:/ /mnt -o name=admin,secretfile=/root/admin.key
Loading...
Ownership of this post data is guaranteed by blockchain and smart contracts to the creator alone.