Skip to content

Added documentation for Ceph-Dashboard enablement #607

Added documentation for Ceph-Dashboard enablement

Added documentation for Ceph-Dashboard enablement #607

Workflow file for this run

name: Tests
on:
- push
- pull_request
jobs:
build-microceph:
name: Build microceph snap
runs-on: ubuntu-22.04
env:
SNAPCRAFT_BUILD_ENVIRONMENT: "lxd"
steps:
- name: Checkout code
uses: actions/checkout@v3
with:
fetch-depth: 0
- name: Clear FORWARD firewall rules
run: tests/scripts/actionutils.sh cleaript
- name: Install dependencies
run: |
tests/scripts/actionutils.sh setup_lxd
sudo snap install snapcraft --classic
snap list
- name: Build snaps
run: snapcraft
- name: Upload snap artifact
if: always()
uses: actions/upload-artifact@v3
with:
name: snaps
path: "*.snap"
retention-days: 5
single-system-tests:
name: Single node with encryption
runs-on: ubuntu-22.04
needs: build-microceph
steps:
- name: Download snap
uses: actions/download-artifact@v3
with:
name: snaps
path: /home/runner
- name: Checkout code
uses: actions/checkout@v3
with:
fetch-depth: 0
- name: Install and setup
run: |
tests/scripts/actionutils.sh install_microceph
set -uex
# Verify metadata.yaml
meta=/var/snap/microceph/current/conf/metadata.yaml
cat $meta
grep -q ceph-version $meta
# Verify health and auto crush rule
sudo microceph.ceph health | grep -q "OSD count 0 < osd_pool_default_size 3"
sudo microceph.ceph osd crush rule ls | grep -F microceph_auto_osd
- name: Add OSD with failure
run: |
set -eux
loop_file="$(sudo mktemp -p /mnt XXXX.img)"
sudo truncate -s 1G "${loop_file}"
loop_dev="$(sudo losetup --show -f "${loop_file}")"
minor="${loop_dev##/dev/loop}"
sudo mknod -m 0660 "/dev/sdi21" b 7 "${minor}"
set +e
sudo microceph disk add --wipe "/dev/sdi21" --encrypt || rc="$?"
if [[ $rc -eq 0 ]] ; then echo "FDE should fail without dmcrypt: $rc"; exit 1; fi
- name: Add OSDs
run: tests/scripts/actionutils.sh add_encrypted_osds
- name: Enable RGW
run: tests/scripts/actionutils.sh enable_rgw
- name: Run system tests
run: |
set -eux
# Show ceph's status
sudo microceph.ceph status
# Confirm ceph is healthy and services started
sudo microceph.ceph status | grep -F "mon: 1 daemons"
sudo microceph.ceph status | grep -E "mgr: .*active, "
sudo microceph.ceph status | grep -F "osd: 3 osds"
sudo microceph.ceph status | grep -F "rgw: 1 daemon"
# Check health after restart
sudo snap stop microceph
sudo snap start microceph
sleep 2m
sudo microceph.ceph status
sudo microceph.ceph status | grep -F "mon: 1 daemons"
sudo microceph.ceph status | grep -E "mgr: .*active, "
sudo microceph.ceph status | grep -F "osd: 3 osds"
sudo microceph.ceph status | grep -F "rgw: 1 daemon"
pgrep ceph-osd || { echo "No ceph-osd process found" ; exit 1; }
- name: Exercise RGW
run: |
set -eux
sudo microceph.ceph status
sudo systemctl status snap.microceph.rgw
sudo microceph.radosgw-admin user create --uid=test --display-name=test
sudo microceph.radosgw-admin key create --uid=test --key-type=s3 --access-key fooAccessKey --secret-key fooSecretKey
sudo apt-get -qq install s3cmd
echo hello-radosgw > ~/test.txt
s3cmd --host localhost --host-bucket="localhost/%(bucket)" --access_key=fooAccessKey --secret_key=fooSecretKey --no-ssl mb s3://testbucket
s3cmd --host localhost --host-bucket="localhost/%(bucket)" --access_key=fooAccessKey --secret_key=fooSecretKey --no-ssl put -P ~/test.txt s3://testbucket
curl -s http://localhost/testbucket/test.txt | grep -F hello-radosgw
- name: Test Cluster Config
run: |
set -eux
cip=$(ip -4 -j route | jq -r '.[] | select(.dst | contains("default")) | .prefsrc' | tr -d '[:space:]')
# pre config set timestamp for service age
ts=$(sudo systemctl show --property ActiveEnterTimestampMonotonic snap.microceph.osd.service | cut -d= -f2)
# set config
sudo microceph cluster config set cluster_network $cip/8 --wait
# post config set timestamp for service age
ts2=$(sudo systemctl show --property ActiveEnterTimestampMonotonic snap.microceph.osd.service | cut -d= -f2)
# Check config output
output=$(sudo microceph cluster config get cluster_network | grep -cim1 'cluster_network')
if [[ $output -lt 1 ]] ; then echo "config check failed: $output"; exit 1; fi
# Check service restarted
if [ $ts2 -lt $ts ]; then echo "config check failed: TS1: $ts2 TS2: $ts3"; exit 1; fi
# reset config
sudo microceph cluster config reset cluster_network --wait
# post config reset timestamp for service age
ts3=$(sudo systemctl show --property ActiveEnterTimestampMonotonic snap.microceph.osd.service | cut -d= -f2)
# Check service restarted
if [ $ts3 -lt $ts2 ]; then echo "config check failed: TS2: $ts2 TS3: $ts3"; exit 1; fi
multi-node-tests:
name: Multi node testing
runs-on: ubuntu-22.04
needs: build-microceph
steps:
- name: Download snap
uses: actions/download-artifact@v3
with:
name: snaps
path: /home/runner
- name: Checkout code
uses: actions/checkout@v3
with:
fetch-depth: 0
- name: Clear FORWARD firewall rules
run: tests/scripts/actionutils.sh cleaript
- name: Free disk
run: tests/scripts/actionutils.sh free_runner_disk
- name: Install dependencies
run: tests/scripts/actionutils.sh setup_lxd
- name: Create containers with loopback devices
run: tests/scripts/actionutils.sh create_containers
- name: Install local microceph snap
run: tests/scripts/actionutils.sh install_multinode
- name: Bootstrap
run: tests/scripts/actionutils.sh bootstrap_head
- name: Setup cluster
run: tests/scripts/actionutils.sh cluster_nodes
- name: Add 2 OSDs
run: |
for c in node-wrk1 node-wrk2 ; do
tests/scripts/actionutils.sh add_osd_to_node $c
done
lxc exec node-head -- sh -c "microceph.ceph -s"
- name: Test failure domain scale up
run: |
set -uex
# We still have failure domain OSD
lxc exec node-head -- sh -c "sudo microceph.ceph osd crush rule ls" | grep -F microceph_auto_osd
# Add a 3rd OSD, should switch to host failure domain
tests/scripts/actionutils.sh add_osd_to_node node-head
for i in $(seq 1 8); do
res=$( ( lxc exec node-head -- sh -c 'sudo microceph.ceph -s | grep -F osd: | sed -E "s/.* ([[:digit:]]*) in .*/\1/"' ) || true )
if [[ $res -gt 2 ]] ; then
echo "Found >2 OSDs"
break
else
echo -n '.'
sleep 2
fi
done
# Expect exactly one rule with host failure dom
rules=$( lxc exec node-head -- sh -c "sudo microceph.ceph osd crush rule ls" )
echo $rules
echo $rules | grep -F microceph_auto_host
num=$( echo $rules | wc -l)
if [ $num != '1' ] ; then echo "Expect exactly one rule" ; exit 1 ; fi
- name: Test 3 osds present
run: |
set -uex
lxc exec node-head -- sh -c "microceph.ceph -s" | egrep "osd: 3 osds: 3 up.*3 in"
- name: Test osd host rule
run: |
set -uex
lxc exec node-head -- sh -c "microceph.ceph osd crush rule ls" | grep -F microceph_auto_host
lxc exec node-head -- sh -c "microceph.ceph osd pool ls detail" | grep -F "crush_rule 1"
- name: Test migrate services
run: |
set -uex
lxc exec node-head -- sh -c "microceph cluster migrate node-wrk1 node-wrk3"
sleep 2
lxc exec node-head -- sh -c "microceph status" | grep -F -A 1 node-wrk1 | grep -E "^ Services: osd$"
lxc exec node-head -- sh -c "microceph status" | grep -F -A 1 node-wrk3 | grep -E "^ Services: mds, mgr, mon$"