-
Notifications
You must be signed in to change notification settings - Fork 82
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
Signed-off-by: Adriana Nikelska <adrianax.nikielska@intel.com>
- Loading branch information
Showing
1 changed file
with
266 additions
and
0 deletions.
There are no files selected for viewing
266 changes: 266 additions & 0 deletions
266
test/functional/tests/io_class/test_io_class_stats_core_cache.py
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,266 @@ | ||
# | ||
# Copyright(c) 2022 Intel Corporation | ||
# SPDX-License-Identifier: BSD-3-Clause | ||
# | ||
|
||
import re | ||
from itertools import cycle | ||
|
||
import pytest | ||
|
||
from api.cas import casadm, ioclass_config | ||
from api.cas.cache_config import CacheMode, CleaningPolicy, SeqCutOffPolicy | ||
from api.cas.casadm_params import StatsFilter | ||
from api.cas.ioclass_config import IoClass | ||
from core.test_run_utils import TestRun | ||
from storage_devices.disk import DiskTypeSet, DiskType, DiskTypeLowerThan | ||
from test_tools import fs_utils | ||
from test_tools.disk_utils import Filesystem | ||
from test_tools.fio.fio import Fio | ||
from test_tools.fio.fio_param import IoEngine, ReadWrite | ||
from test_utils.os_utils import sync, drop_caches, Udev | ||
from test_utils.size import Size, Unit | ||
|
||
num_of_caches = 4 | ||
cores_per_cache = 3 | ||
num_of_cores = num_of_caches * cores_per_cache | ||
|
||
|
||
@pytest.mark.parametrize("per_core", [False, True]) | ||
@pytest.mark.require_disk("cache", DiskTypeSet([DiskType.optane, DiskType.nand])) | ||
@pytest.mark.require_disk("core", DiskTypeLowerThan("cache")) | ||
def test_io_class_stats_core_cache(per_core): | ||
""" | ||
title: Open CAS statistics values for IO classes - per core/cache. | ||
description: Check Open CAS ability to display correct value in statistics | ||
for all supported IO classes for given core/cache device. | ||
pass_criteria: | ||
- proper statistics after fio | ||
- statistics doesn't change after stop and load caches | ||
""" | ||
|
||
with TestRun.step("Prepare devices."): | ||
cache_device = TestRun.disks['cache'] | ||
cache_device.create_partitions([Size(20, Unit.GibiByte)] * num_of_caches) | ||
cache_devices = cache_device.partitions | ||
|
||
core_device = TestRun.disks['core'] | ||
core_device.create_partitions([Size(10, Unit.GibiByte)] * num_of_cores) | ||
core_devices = core_device.partitions | ||
|
||
with TestRun.step("Start caches (one for each supported cache mode) and add core devices."): | ||
caches = [casadm.start_cache(dev, cache_mode=cache_mode, force=True) | ||
for dev, cache_mode in zip(cache_devices, CacheMode)] | ||
|
||
cores = [] | ||
for i, cache in zip(range(0, num_of_cores, cores_per_cache), caches): | ||
cores.extend([cache.add_core(dev) for dev in core_devices[i:i+cores_per_cache]]) | ||
|
||
for cache in caches: | ||
cache.set_cleaning_policy(CleaningPolicy.nop) | ||
cache.set_seq_cutoff_policy(SeqCutOffPolicy.never) | ||
cache.purge_cache() | ||
cache.reset_counters() | ||
|
||
Udev.disable() | ||
|
||
with TestRun.step(f"Validate IO class usage statistics after start " | ||
f"for each {'core' if per_core else 'cache'}."): | ||
devices = cores if per_core else caches | ||
for dev in devices: | ||
stats = dev.get_statistics_flat(0, [StatsFilter.usage]) | ||
TestRun.LOGGER.info(f"Check stats for cache {dev.cache_id} " | ||
f"{f'core {dev.core_id}' if per_core else ''}") | ||
for name, value in stats.items(): | ||
check_value(name, value.get_value(), 0) | ||
|
||
with TestRun.step("Load IO class configuration file for all caches."): | ||
io_classes = IoClass.csv_to_list( | ||
fs_utils.read_file("/etc/opencas/ioclass-config.csv")) | ||
for io_class in io_classes: | ||
if 'metadata' in io_class.rule: | ||
io_class.allocation = 0 | ||
IoClass.save_list_to_config_file(io_classes, add_default_rule=False) | ||
[cache.load_io_class(ioclass_config.default_config_file_path) for cache in caches] | ||
|
||
with TestRun.step("Make filesystem on OpenCAS devices and mount it."): | ||
for core, fs in zip(cores, cycle(Filesystem)): | ||
mount_point = core.path.replace('/dev/', '/mnt/') | ||
core.create_filesystem(fs) | ||
core.mount(mount_point) | ||
sync() | ||
drop_caches() | ||
|
||
with TestRun.step("Run fio for each device and validate IO class usage, " | ||
"request and block level statistics values depending on cache mode."): | ||
saved_stats = [] | ||
sizes = get_sizes(io_classes) | ||
for io_class, core, file_size in zip(io_classes[2:], cores, sizes): | ||
cache_id = core.cache_id | ||
cache = caches[cache_id - 1] | ||
cache_mode = cache.get_cache_mode() | ||
|
||
core.reset_counters() | ||
cache.purge_cache() | ||
drop_caches() | ||
|
||
with TestRun.step(f"Testing cache {cache_id} core {core.core_id} " | ||
f"with IO class {io_class.id}. " | ||
f"Cache mode: {cache_mode}"): | ||
|
||
size_in_blocks = round(file_size.get_value(Unit.Blocks4096)) | ||
|
||
TestRun.LOGGER.info("Run fio.") | ||
fio = fio_params(core, file_size, direct=False if io_class.id != 22 else True) | ||
fio.run() | ||
sync() | ||
drop_caches() | ||
|
||
TestRun.LOGGER.info("Check statistics.") | ||
dev = core if per_core else cache | ||
stats = dev.get_statistics_flat( | ||
io_class.id, [StatsFilter.usage, StatsFilter.req, StatsFilter.blk]) | ||
stats_perc = dev.get_statistics_flat(io_class.id, [StatsFilter.usage], | ||
percentage_val=True) | ||
|
||
s = '' if per_core else '(s)' | ||
stats_pt_wa = [f'writes to exported object{s}', f'total to/from exported object{s}', | ||
f'writes to core{s}', f'total to/from core{s}'] | ||
stats_wb = ['occupancy', 'dirty', 'write full misses', 'write total', | ||
f'writes to exported object{s}', f'total to/from exported object{s}', | ||
'writes to cache', 'total to/from cache'] | ||
stats_wt = ['occupancy', 'clean', 'write full misses', 'write total', | ||
f'writes to exported object{s}', f'total to/from exported object{s}', | ||
'writes to cache', 'total to/from cache', | ||
f'writes to core{s}', f'total to/from core{s}'] | ||
|
||
# TODO: need proper values for pass-through reads, pass-through writes, | ||
# serviced requests, total requests and check correctness of other values | ||
|
||
for name, value in stats.items(): | ||
value = round(value) if type(value) is float \ | ||
else round(value.get_value(Unit.Blocks4096)) | ||
|
||
if cache_mode == CacheMode.PT or cache_mode == CacheMode.WA: | ||
expected_value = size_in_blocks if name in stats_pt_wa else 0 | ||
check_value(name, value, expected_value) | ||
|
||
elif cache_mode == CacheMode.WB: | ||
expected_value = size_in_blocks if name in stats_wb else 0 | ||
check_value(name, value, expected_value) | ||
|
||
elif cache_mode == CacheMode.WT: | ||
expected_value = size_in_blocks if name in stats_wt else 0 | ||
check_value(name, value, expected_value) | ||
|
||
for name, value in stats_perc.items(): | ||
if cache_mode == CacheMode.PT: | ||
expected_value = 0 | ||
epsilon_percentage = 0 | ||
check_perc_value(name, value, expected_value, epsilon_percentage) | ||
|
||
elif cache_mode == CacheMode.WA: | ||
expected_value = 0 | ||
epsilon_percentage = 0.5 if name == 'occupancy' else 0 | ||
check_perc_value(name, value, expected_value, epsilon_percentage) | ||
|
||
elif cache_mode == CacheMode.WB: | ||
occupancy = 100 * size_in_blocks / cache.size.get_value() | ||
expected_value = 100 if name == 'dirty' else \ | ||
occupancy if name == 'occupancy' else 0 | ||
epsilon_percentage = 0.5 if name in ('dirty', 'occupancy') else 0 | ||
check_perc_value(name, value, expected_value, epsilon_percentage) | ||
|
||
elif cache_mode == CacheMode.WT: | ||
occupancy = 100 * size_in_blocks / cache.size.get_value() | ||
expected_value = 100 if name == 'clean' else \ | ||
occupancy if name == 'occupancy' else 0 | ||
epsilon_percentage = 0.5 if name in ('clean', 'occupancy') else 0 | ||
check_perc_value(name, value, expected_value, epsilon_percentage) | ||
|
||
saved_stats.append(dev.get_statistics_flat(io_class.id, | ||
[StatsFilter.conf, StatsFilter.usage])) | ||
|
||
with TestRun.step("Stop and load caches back."): | ||
[core.unmount() for core in cores] | ||
casadm.stop_all_caches() | ||
caches = [casadm.load_cache(cache) for cache in cache_devices] | ||
|
||
with TestRun.step(f"Validate IO class statistics per {'core' if per_core else 'cache'} - " | ||
f"shall be the same as before stop."): | ||
stats = [] | ||
for io_class, core in zip(io_classes[2:], cores): | ||
cache_id = core.cache_id | ||
cache = caches[cache_id - 1] | ||
dev = core if per_core else cache | ||
stats.append(dev.get_statistics_flat(io_class.id, | ||
[StatsFilter.conf, StatsFilter.usage])) | ||
|
||
for saved_stat, stat, core, io_class in zip(saved_stats, stats, cores, io_classes[2:]): | ||
TestRun.LOGGER.info(f"Testing cache {core.cache_id} core {core.core_id} " | ||
f"with IO class {io_class.id}. ") | ||
for name, saved_value, value in zip(stat.keys(), saved_stat.values(), stat.values()): | ||
value = round(value.get_value(Unit.Blocks4096)) if type(value) is Size else value | ||
saved_value = round(saved_value.get_value(Unit.Blocks4096)) \ | ||
if type(saved_value) is Size else saved_value | ||
check_value(name, value, saved_value) | ||
|
||
with TestRun.step("Sum (except free) all values from statistics and " | ||
"compare it with statistics for cache."): | ||
for cache in caches: | ||
TestRun.LOGGER.info(f"Check stats for cache {cache.cache_id}.") | ||
occupancy = sum([core.get_statistics().usage_stats.occupancy for core in cores if | ||
core.cache_id == cache.cache_id]) | ||
dirty = sum([core.get_statistics().usage_stats.dirty for core in cores if | ||
core.cache_id == cache.cache_id]) | ||
clean = sum([core.get_statistics().usage_stats.clean for core in cores if | ||
core.cache_id == cache.cache_id]) | ||
cores_stats = [occupancy, dirty, clean] | ||
|
||
cache_occupancy = cache.get_statistics().usage_stats.occupancy | ||
cache_dirty = cache.get_statistics().usage_stats.dirty | ||
cache_clean = cache.get_statistics().usage_stats.clean | ||
cache_stats = [cache_occupancy, cache_dirty, cache_clean] | ||
|
||
for name, cores_sum, cache_stat in zip( | ||
('occupancy', 'dirty', 'clean'), cores_stats, cache_stats): | ||
check_value(name, cores_sum, cache_stat) | ||
|
||
|
||
def get_sizes(io_classes): | ||
sizes = [Size(int(re.search(r"\d+", io_class.rule).group()), Unit.Byte) | ||
for io_class in io_classes[2:-2]] | ||
sizes.extend([sizes[-1] + Size(100, Unit.MebiByte), Size(1, Unit.Blocks4096)]) | ||
|
||
return sizes | ||
|
||
|
||
def check_value(name, actual_value, expected_value): | ||
if actual_value != expected_value: | ||
TestRun.LOGGER.error(f"Bad {name} value. " | ||
f"Expected: {expected_value}, actual: {actual_value}.") | ||
else: | ||
TestRun.LOGGER.info(f"Proper {name} value: {actual_value}.") | ||
|
||
|
||
def check_perc_value(name, actual_value, expected_value, epsilon_percentage): | ||
if abs(expected_value - actual_value) > epsilon_percentage: | ||
TestRun.LOGGER.error(f"Bad {name} percentage value. " | ||
f"Expected: {expected_value}, actual: {actual_value}.") | ||
else: | ||
TestRun.LOGGER.info(f"Proper {name} percentage value: {actual_value}.") | ||
|
||
|
||
def fio_params(core, size, direct=False): | ||
name = f"{core.mount_point}/{round(size.get_value())}{'_direct' if direct else ''}" | ||
fio = Fio().create_command() \ | ||
.io_engine(IoEngine.libaio) \ | ||
.read_write(ReadWrite.write) \ | ||
.io_depth(1) \ | ||
.block_size(Size(1, Unit.Blocks4096)) \ | ||
.num_jobs(1) \ | ||
.direct(direct) \ | ||
.file_size(size) \ | ||
.target(name) | ||
|
||
return fio |