mirror of
https://git.proxmox.com/git/ceph.git
synced 2025-08-02 14:09:14 +00:00
import 14.2.10 upstream release
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
This commit is contained in:
parent
bed944f8c4
commit
abc94cda91
@ -1,7 +1,7 @@
|
||||
cmake_minimum_required(VERSION 3.5.1)
|
||||
|
||||
project(ceph CXX C ASM)
|
||||
set(VERSION 14.2.9)
|
||||
set(VERSION 14.2.10)
|
||||
|
||||
if(POLICY CMP0028)
|
||||
cmake_policy(SET CMP0028 NEW)
|
||||
@ -253,7 +253,7 @@ endif()
|
||||
|
||||
option(WITH_FUSE "Fuse is here" ON)
|
||||
if(WITH_FUSE)
|
||||
find_package(fuse)
|
||||
find_package(FUSE)
|
||||
set(HAVE_LIBFUSE ${FUSE_FOUND})
|
||||
endif()
|
||||
|
||||
@ -445,6 +445,7 @@ option(WITH_RADOSGW_FCGI_FRONTEND "Rados Gateway's FCGI frontend is enabled" OFF
|
||||
option(WITH_RADOSGW_BEAST_FRONTEND "Rados Gateway's Beast frontend is enabled" ON)
|
||||
option(WITH_RADOSGW_BEAST_OPENSSL "Rados Gateway's Beast frontend uses OpenSSL" ON)
|
||||
option(WITH_RADOSGW_AMQP_ENDPOINT "Rados Gateway's pubsub support for AMQP push endpoint" ON)
|
||||
option(WITH_RADOSGW_KAFKA_ENDPOINT "Rados Gateway's pubsub support for Kafka push endpoint" ON)
|
||||
|
||||
if(WITH_RADOSGW)
|
||||
find_package(EXPAT REQUIRED)
|
||||
|
@ -1,45 +1,38 @@
|
||||
14.2.8
|
||||
14.2.9
|
||||
------
|
||||
|
||||
* The following OSD memory config options related to bluestore cache autotuning can now
|
||||
be configured during runtime:
|
||||
* Bucket notifications now support Kafka endpoints. This requires librdkafka of
|
||||
version 0.9.2 and up. Note that Ubuntu 16.04.6 LTS (Xenial Xerus) has an older
|
||||
version of librdkafka, and would require an update to the library.
|
||||
|
||||
- osd_memory_base (default: 768 MB)
|
||||
- osd_memory_cache_min (default: 128 MB)
|
||||
- osd_memory_expected_fragmentation (default: 0.15)
|
||||
- osd_memory_target (default: 4 GB)
|
||||
* The pool parameter ``target_size_ratio``, used by the pg autoscaler,
|
||||
has changed meaning. It is now normalized across pools, rather than
|
||||
specifying an absolute ratio. For details, see :ref:`pg-autoscaler`.
|
||||
If you have set target size ratios on any pools, you may want to set
|
||||
these pools to autoscale ``warn`` mode to avoid data movement during
|
||||
the upgrade::
|
||||
|
||||
The above options can be set with::
|
||||
ceph osd pool set <pool-name> pg_autoscale_mode warn
|
||||
|
||||
ceph config set global <option> <value>
|
||||
* The behaviour of the ``-o`` argument to the rados tool has been reverted to
|
||||
its orignal behaviour of indicating an output file. This reverts it to a more
|
||||
consistent behaviour when compared to other tools. Specifying object size is now
|
||||
accomplished by using an upper case O ``-O``.
|
||||
|
||||
* The MGR now accepts 'profile rbd' and 'profile rbd-read-only' user caps.
|
||||
These caps can be used to provide users access to MGR-based RBD functionality
|
||||
such as 'rbd perf image iostat' an 'rbd perf image iotop'.
|
||||
* The format of MDSs in `ceph fs dump` has changed.
|
||||
|
||||
* The configuration value ``osd_calc_pg_upmaps_max_stddev`` used for upmap
|
||||
balancing has been removed. Instead use the mgr balancer config
|
||||
``upmap_max_deviation`` which now is an integer number of PGs of deviation
|
||||
from the target PGs per OSD. This can be set with a command like
|
||||
``ceph config set mgr mgr/balancer/upmap_max_deviation 2``. The default
|
||||
``upmap_max_deviation`` is 1. There are situations where crush rules
|
||||
would not allow a pool to ever have completely balanced PGs. For example, if
|
||||
crush requires 1 replica on each of 3 racks, but there are fewer OSDs in 1 of
|
||||
the racks. In those cases, the configuration value can be increased.
|
||||
* Ceph will issue a health warning if a RADOS pool's ``size`` is set to 1
|
||||
or in other words the pool is configured with no redundancy. This can
|
||||
be fixed by setting the pool size to the minimum recommended value
|
||||
with::
|
||||
|
||||
* RGW: a mismatch between the bucket notification documentation and the actual
|
||||
message format was fixed. This means that any endpoints receiving bucket
|
||||
notification, will now receive the same notifications inside an JSON array
|
||||
named 'Records'. Note that this does not affect pulling bucket notification
|
||||
from a subscription in a 'pubsub' zone, as these are already wrapped inside
|
||||
that array.
|
||||
ceph osd pool set <pool-name> size <num-replicas>
|
||||
|
||||
* Ceph will now issue a health warning if a RADOS pool as a ``pg_num``
|
||||
value that is not a power of two. This can be fixed by adjusting
|
||||
the pool to a nearby power of two::
|
||||
The warning can be silenced with::
|
||||
|
||||
ceph osd pool set <pool-name> pg_num <new-pg-num>
|
||||
ceph config set global mon_warn_on_pool_no_redundancy false
|
||||
|
||||
Alternatively, the warning can be silenced with::
|
||||
|
||||
ceph config set global mon_warn_on_pool_pg_num_not_power_of_two false
|
||||
* RGW: bucket listing performance on sharded bucket indexes has been
|
||||
notably improved by heuristically -- and significantly, in many
|
||||
cases -- reducing the number of entries requested from each bucket
|
||||
index shard.
|
||||
|
@ -1,7 +1,7 @@
|
||||
# Contributor: John Coyle <dx9err@gmail.com>
|
||||
# Maintainer: John Coyle <dx9err@gmail.com>
|
||||
pkgname=ceph
|
||||
pkgver=14.2.9
|
||||
pkgver=14.2.10
|
||||
pkgrel=0
|
||||
pkgdesc="Ceph is a distributed object store and file system"
|
||||
pkgusers="ceph"
|
||||
@ -64,7 +64,7 @@ makedepends="
|
||||
xmlstarlet
|
||||
yasm
|
||||
"
|
||||
source="ceph-14.2.9.tar.bz2"
|
||||
source="ceph-14.2.10.tar.bz2"
|
||||
subpackages="
|
||||
$pkgname-base
|
||||
$pkgname-common
|
||||
@ -117,7 +117,7 @@ _sysconfdir=/etc
|
||||
_udevrulesdir=/etc/udev/rules.d
|
||||
_python_sitelib=/usr/lib/python2.7/site-packages
|
||||
|
||||
builddir=$srcdir/ceph-14.2.9
|
||||
builddir=$srcdir/ceph-14.2.10
|
||||
|
||||
build() {
|
||||
export CEPH_BUILD_VIRTUALENV=$builddir
|
||||
@ -397,7 +397,7 @@ libcephfs_dev() {
|
||||
pkgdesc="Ceph distributed file system client library headers"
|
||||
depends="libcephfs librados-devel"
|
||||
|
||||
_pkg $_includedir/cephfs ceph_statx.h libcephfs.h
|
||||
_pkg $_includedir/cephfs ceph_ll_client.h libcephfs.h
|
||||
_pkg $_libdir libcephfs.so
|
||||
}
|
||||
|
||||
|
@ -397,7 +397,7 @@ libcephfs_dev() {
|
||||
pkgdesc="Ceph distributed file system client library headers"
|
||||
depends="libcephfs librados-devel"
|
||||
|
||||
_pkg $_includedir/cephfs ceph_statx.h libcephfs.h
|
||||
_pkg $_includedir/cephfs ceph_ll_client.h libcephfs.h
|
||||
_pkg $_libdir libcephfs.so
|
||||
}
|
||||
|
||||
|
@ -30,17 +30,23 @@
|
||||
%endif
|
||||
%if 0%{?fedora} || 0%{?rhel}
|
||||
%bcond_without selinux
|
||||
%if 0%{?rhel} >= 8
|
||||
%bcond_with cephfs_java
|
||||
%else
|
||||
%bcond_without cephfs_java
|
||||
%endif
|
||||
%bcond_without amqp_endpoint
|
||||
%bcond_without lttng
|
||||
%bcond_without libradosstriper
|
||||
%bcond_without ocf
|
||||
%bcond_without amqp_endpoint
|
||||
%bcond_without kafka_endpoint
|
||||
%global _remote_tarball_prefix https://download.ceph.com/tarballs/
|
||||
%endif
|
||||
%if 0%{?suse_version}
|
||||
%bcond_with selinux
|
||||
%bcond_with cephfs_java
|
||||
%bcond_with amqp_endpoint
|
||||
%bcond_with kafka_endpoint
|
||||
#Compat macro for new _fillupdir macro introduced in Nov 2017
|
||||
%if ! %{defined _fillupdir}
|
||||
%global _fillupdir /var/adm/fillup-templates
|
||||
@ -76,7 +82,9 @@
|
||||
%if 0%{without python2}
|
||||
%global _defined_if_python2_absent 1
|
||||
%endif
|
||||
|
||||
%if 0%{?fedora} || 0%{?suse_version} || 0%{?rhel} >= 8
|
||||
%global weak_deps 1
|
||||
%endif
|
||||
%if %{with selinux}
|
||||
# get selinux policy version
|
||||
%{!?_selinux_policy_version: %global _selinux_policy_version 0.0.0}
|
||||
@ -101,7 +109,7 @@
|
||||
# main package definition
|
||||
#################################################################################
|
||||
Name: ceph
|
||||
Version: 14.2.9
|
||||
Version: 14.2.10
|
||||
Release: 0%{?dist}
|
||||
%if 0%{?fedora} || 0%{?rhel}
|
||||
Epoch: 2
|
||||
@ -117,7 +125,7 @@ License: LGPL-2.1 and CC-BY-SA-3.0 and GPL-2.0 and BSL-1.0 and BSD-3-Clause and
|
||||
Group: System/Filesystems
|
||||
%endif
|
||||
URL: http://ceph.com/
|
||||
Source0: %{?_remote_tarball_prefix}ceph-14.2.9.tar.bz2
|
||||
Source0: %{?_remote_tarball_prefix}ceph-14.2.10.tar.bz2
|
||||
%if 0%{?suse_version}
|
||||
# _insert_obs_source_lines_here
|
||||
ExclusiveArch: x86_64 aarch64 ppc64le s390x
|
||||
@ -194,22 +202,18 @@ BuildRequires: yasm
|
||||
%if 0%{with amqp_endpoint}
|
||||
BuildRequires: librabbitmq-devel
|
||||
%endif
|
||||
%if 0%{with kafka_endpoint}
|
||||
BuildRequires: librdkafka-devel
|
||||
%endif
|
||||
%if 0%{with make_check}
|
||||
BuildRequires: jq
|
||||
BuildRequires: libuuid-devel
|
||||
BuildRequires: python%{_python_buildid}-bcrypt
|
||||
BuildRequires: python%{_python_buildid}-coverage
|
||||
BuildRequires: python%{_python_buildid}-nose
|
||||
BuildRequires: python%{_python_buildid}-pecan
|
||||
BuildRequires: python%{_python_buildid}-requests
|
||||
BuildRequires: python%{_python_buildid}-six
|
||||
BuildRequires: python%{_python_buildid}-tox
|
||||
BuildRequires: python%{_python_buildid}-virtualenv
|
||||
%if 0%{?rhel} == 7
|
||||
BuildRequires: pyOpenSSL%{_python_buildid}
|
||||
%else
|
||||
BuildRequires: python%{_python_buildid}-pyOpenSSL
|
||||
%endif
|
||||
BuildRequires: socat
|
||||
%endif
|
||||
%if 0%{with seastar}
|
||||
@ -268,7 +272,7 @@ BuildRequires: python2-Cython
|
||||
%endif
|
||||
BuildRequires: python%{python3_pkgversion}-devel
|
||||
BuildRequires: python%{python3_pkgversion}-setuptools
|
||||
%if 0%{?rhel}
|
||||
%if 0%{?rhel} == 7
|
||||
BuildRequires: python%{python3_version_nodots}-Cython
|
||||
%else
|
||||
BuildRequires: python%{python3_pkgversion}-Cython
|
||||
@ -280,18 +284,31 @@ BuildRequires: lz4-devel >= 1.7
|
||||
# distro-conditional make check dependencies
|
||||
%if 0%{with make_check}
|
||||
%if 0%{?fedora} || 0%{?rhel}
|
||||
BuildRequires: python%{_python_buildid}-coverage
|
||||
BuildRequires: python%{_python_buildid}-pecan
|
||||
BuildRequires: python%{_python_buildid}-tox
|
||||
BuildRequires: xmlsec1
|
||||
%if 0%{?rhel} == 7
|
||||
BuildRequires: pyOpenSSL%{_python_buildid}
|
||||
%else
|
||||
BuildRequires: python%{_python_buildid}-pyOpenSSL
|
||||
%endif
|
||||
BuildRequires: python%{_python_buildid}-cherrypy
|
||||
BuildRequires: python%{_python_buildid}-jwt
|
||||
BuildRequires: python%{_python_buildid}-routes
|
||||
BuildRequires: python%{_python_buildid}-scipy
|
||||
BuildRequires: python%{_python_buildid}-werkzeug
|
||||
BuildRequires: xmlsec1
|
||||
%endif
|
||||
%if 0%{?suse_version}
|
||||
BuildRequires: python%{_python_buildid}-CherryPy
|
||||
BuildRequires: python%{_python_buildid}-PyJWT
|
||||
BuildRequires: python%{_python_buildid}-Routes
|
||||
BuildRequires: python%{_python_buildid}-Werkzeug
|
||||
BuildRequires: python%{_python_buildid}-coverage
|
||||
BuildRequires: python%{_python_buildid}-numpy-devel
|
||||
BuildRequires: python%{_python_buildid}-pecan
|
||||
BuildRequires: python%{_python_buildid}-pyOpenSSL
|
||||
BuildRequires: python%{_python_buildid}-tox
|
||||
BuildRequires: rpm-build
|
||||
BuildRequires: xmlsec1-devel
|
||||
%endif
|
||||
@ -328,6 +345,9 @@ BuildRequires: libcryptopp-devel
|
||||
BuildRequires: libnuma-devel
|
||||
%endif
|
||||
%endif
|
||||
%if 0%{?rhel} >= 8
|
||||
BuildRequires: /usr/bin/pathfix.py
|
||||
%endif
|
||||
|
||||
%description
|
||||
Ceph is a massively scalable, open-source, distributed storage system that runs
|
||||
@ -358,7 +378,6 @@ Requires: grep
|
||||
Requires: logrotate
|
||||
Requires: parted
|
||||
Requires: psmisc
|
||||
Requires: python%{_python_buildid}-requests
|
||||
Requires: python%{_python_buildid}-setuptools
|
||||
Requires: util-linux
|
||||
Requires: xfsprogs
|
||||
@ -370,7 +389,7 @@ Requires: which
|
||||
Requires: gperftools-libs >= 2.6.1
|
||||
%endif
|
||||
%endif
|
||||
%if 0%{?suse_version}
|
||||
%if 0%{?weak_deps}
|
||||
Recommends: chrony
|
||||
%endif
|
||||
%description base
|
||||
@ -389,7 +408,6 @@ Requires: python%{_python_buildid}-rbd = %{_epoch_prefix}%{version}-%{release}
|
||||
Requires: python%{_python_buildid}-cephfs = %{_epoch_prefix}%{version}-%{release}
|
||||
Requires: python%{_python_buildid}-rgw = %{_epoch_prefix}%{version}-%{release}
|
||||
Requires: python%{_python_buildid}-ceph-argparse = %{_epoch_prefix}%{version}-%{release}
|
||||
Requires: python%{_python_buildid}-requests
|
||||
%if 0%{?fedora} || 0%{?rhel}
|
||||
Requires: python%{_python_buildid}-prettytable
|
||||
%endif
|
||||
@ -439,6 +457,7 @@ Group: System/Filesystems
|
||||
Requires: ceph-base = %{_epoch_prefix}%{version}-%{release}
|
||||
Requires: python%{_python_buildid}-bcrypt
|
||||
Requires: python%{_python_buildid}-pecan
|
||||
Requires: python%{_python_buildid}-requests
|
||||
Requires: python%{_python_buildid}-six
|
||||
%if 0%{?fedora} || 0%{?rhel}
|
||||
Requires: python%{_python_buildid}-cherrypy
|
||||
@ -447,13 +466,15 @@ Requires: python%{_python_buildid}-werkzeug
|
||||
%if 0%{?suse_version}
|
||||
Requires: python%{_python_buildid}-CherryPy
|
||||
Requires: python%{_python_buildid}-Werkzeug
|
||||
Recommends: python%{_python_buildid}-influxdb
|
||||
%endif
|
||||
%if 0%{?weak_deps}
|
||||
Recommends: ceph-mgr-dashboard = %{_epoch_prefix}%{version}-%{release}
|
||||
Recommends: ceph-mgr-diskprediction-local = %{_epoch_prefix}%{version}-%{release}
|
||||
Recommends: ceph-mgr-diskprediction-cloud = %{_epoch_prefix}%{version}-%{release}
|
||||
Recommends: ceph-mgr-rook = %{_epoch_prefix}%{version}-%{release}
|
||||
Recommends: ceph-mgr-k8sevents = %{_epoch_prefix}%{version}-%{release}
|
||||
Recommends: ceph-mgr-ssh = %{_epoch_prefix}%{version}-%{release}
|
||||
Recommends: python%{_python_buildid}-influxdb
|
||||
%endif
|
||||
%if 0%{?rhel} == 7
|
||||
Requires: pyOpenSSL
|
||||
@ -479,6 +500,9 @@ Requires: python%{_python_buildid}-cherrypy
|
||||
Requires: python%{_python_buildid}-jwt
|
||||
Requires: python%{_python_buildid}-routes
|
||||
Requires: python%{_python_buildid}-werkzeug
|
||||
%if 0%{?weak_deps}
|
||||
Recommends: python%{_python_buildid}-saml
|
||||
%endif
|
||||
%endif
|
||||
%if 0%{?suse_version}
|
||||
Requires: python%{_python_buildid}-CherryPy
|
||||
@ -878,7 +902,7 @@ Summary: Ceph distributed file system client library
|
||||
%if 0%{?suse_version}
|
||||
Group: System/Libraries
|
||||
%endif
|
||||
Obsoletes: libcephfs1
|
||||
Obsoletes: libcephfs1 < %{_epoch_prefix}%{version}-%{release}
|
||||
%if 0%{?rhel} || 0%{?fedora}
|
||||
Obsoletes: ceph-libs < %{_epoch_prefix}%{version}-%{release}
|
||||
Obsoletes: ceph-libcephfs
|
||||
@ -1102,7 +1126,7 @@ This package provides Ceph’s default alerts for Prometheus.
|
||||
# common
|
||||
#################################################################################
|
||||
%prep
|
||||
%autosetup -p1 -n ceph-14.2.9
|
||||
%autosetup -p1 -n ceph-14.2.10
|
||||
|
||||
%build
|
||||
# LTO can be enabled as soon as the following GCC bug is fixed:
|
||||
@ -1216,6 +1240,11 @@ ${CMAKE} .. \
|
||||
-DWITH_RADOSGW_AMQP_ENDPOINT=ON \
|
||||
%else
|
||||
-DWITH_RADOSGW_AMQP_ENDPOINT=OFF \
|
||||
%endif
|
||||
%if 0%{with kafka_endpoint}
|
||||
-DWITH_RADOSGW_KAFKA_ENDPOINT=ON \
|
||||
%else
|
||||
-DWITH_RADOSGW_KAFKA_ENDPOINT=OFF \
|
||||
%endif
|
||||
-DBOOST_J=$CEPH_SMP_NCPUS \
|
||||
-DWITH_GRAFANA=ON
|
||||
@ -1264,6 +1293,11 @@ install -m 0644 -D udev/50-rbd.rules %{buildroot}%{_udevrulesdir}/50-rbd.rules
|
||||
# sudoers.d
|
||||
install -m 0600 -D sudoers.d/ceph-osd-smartctl %{buildroot}%{_sysconfdir}/sudoers.d/ceph-osd-smartctl
|
||||
|
||||
%if 0%{?rhel} >= 8
|
||||
pathfix.py -pni "%{__python3} %{py3_shbang_opts}" %{buildroot}%{_bindir}/*
|
||||
pathfix.py -pni "%{__python3} %{py3_shbang_opts}" %{buildroot}%{_sbindir}/*
|
||||
%endif
|
||||
|
||||
#set up placeholder directories
|
||||
mkdir -p %{buildroot}%{_sysconfdir}/ceph
|
||||
mkdir -p %{buildroot}%{_localstatedir}/run/ceph
|
||||
@ -2075,7 +2109,7 @@ fi
|
||||
%files -n libcephfs-devel
|
||||
%dir %{_includedir}/cephfs
|
||||
%{_includedir}/cephfs/libcephfs.h
|
||||
%{_includedir}/cephfs/ceph_statx.h
|
||||
%{_includedir}/cephfs/ceph_ll_client.h
|
||||
%{_libdir}/libcephfs.so
|
||||
|
||||
%if 0%{with python2}
|
||||
@ -2242,8 +2276,7 @@ if [ $1 -eq 0 ]; then
|
||||
fi
|
||||
fi
|
||||
exit 0
|
||||
|
||||
%endif # with selinux
|
||||
%endif
|
||||
|
||||
%if 0%{with python2}
|
||||
%files -n python-ceph-compat
|
||||
|
@ -30,17 +30,23 @@
|
||||
%endif
|
||||
%if 0%{?fedora} || 0%{?rhel}
|
||||
%bcond_without selinux
|
||||
%if 0%{?rhel} >= 8
|
||||
%bcond_with cephfs_java
|
||||
%else
|
||||
%bcond_without cephfs_java
|
||||
%endif
|
||||
%bcond_without amqp_endpoint
|
||||
%bcond_without lttng
|
||||
%bcond_without libradosstriper
|
||||
%bcond_without ocf
|
||||
%bcond_without amqp_endpoint
|
||||
%bcond_without kafka_endpoint
|
||||
%global _remote_tarball_prefix https://download.ceph.com/tarballs/
|
||||
%endif
|
||||
%if 0%{?suse_version}
|
||||
%bcond_with selinux
|
||||
%bcond_with cephfs_java
|
||||
%bcond_with amqp_endpoint
|
||||
%bcond_with kafka_endpoint
|
||||
#Compat macro for new _fillupdir macro introduced in Nov 2017
|
||||
%if ! %{defined _fillupdir}
|
||||
%global _fillupdir /var/adm/fillup-templates
|
||||
@ -76,7 +82,9 @@
|
||||
%if 0%{without python2}
|
||||
%global _defined_if_python2_absent 1
|
||||
%endif
|
||||
|
||||
%if 0%{?fedora} || 0%{?suse_version} || 0%{?rhel} >= 8
|
||||
%global weak_deps 1
|
||||
%endif
|
||||
%if %{with selinux}
|
||||
# get selinux policy version
|
||||
%{!?_selinux_policy_version: %global _selinux_policy_version 0.0.0}
|
||||
@ -194,22 +202,18 @@ BuildRequires: yasm
|
||||
%if 0%{with amqp_endpoint}
|
||||
BuildRequires: librabbitmq-devel
|
||||
%endif
|
||||
%if 0%{with kafka_endpoint}
|
||||
BuildRequires: librdkafka-devel
|
||||
%endif
|
||||
%if 0%{with make_check}
|
||||
BuildRequires: jq
|
||||
BuildRequires: libuuid-devel
|
||||
BuildRequires: python%{_python_buildid}-bcrypt
|
||||
BuildRequires: python%{_python_buildid}-coverage
|
||||
BuildRequires: python%{_python_buildid}-nose
|
||||
BuildRequires: python%{_python_buildid}-pecan
|
||||
BuildRequires: python%{_python_buildid}-requests
|
||||
BuildRequires: python%{_python_buildid}-six
|
||||
BuildRequires: python%{_python_buildid}-tox
|
||||
BuildRequires: python%{_python_buildid}-virtualenv
|
||||
%if 0%{?rhel} == 7
|
||||
BuildRequires: pyOpenSSL%{_python_buildid}
|
||||
%else
|
||||
BuildRequires: python%{_python_buildid}-pyOpenSSL
|
||||
%endif
|
||||
BuildRequires: socat
|
||||
%endif
|
||||
%if 0%{with seastar}
|
||||
@ -268,7 +272,7 @@ BuildRequires: python2-Cython
|
||||
%endif
|
||||
BuildRequires: python%{python3_pkgversion}-devel
|
||||
BuildRequires: python%{python3_pkgversion}-setuptools
|
||||
%if 0%{?rhel}
|
||||
%if 0%{?rhel} == 7
|
||||
BuildRequires: python%{python3_version_nodots}-Cython
|
||||
%else
|
||||
BuildRequires: python%{python3_pkgversion}-Cython
|
||||
@ -280,18 +284,31 @@ BuildRequires: lz4-devel >= 1.7
|
||||
# distro-conditional make check dependencies
|
||||
%if 0%{with make_check}
|
||||
%if 0%{?fedora} || 0%{?rhel}
|
||||
BuildRequires: python%{_python_buildid}-coverage
|
||||
BuildRequires: python%{_python_buildid}-pecan
|
||||
BuildRequires: python%{_python_buildid}-tox
|
||||
BuildRequires: xmlsec1
|
||||
%if 0%{?rhel} == 7
|
||||
BuildRequires: pyOpenSSL%{_python_buildid}
|
||||
%else
|
||||
BuildRequires: python%{_python_buildid}-pyOpenSSL
|
||||
%endif
|
||||
BuildRequires: python%{_python_buildid}-cherrypy
|
||||
BuildRequires: python%{_python_buildid}-jwt
|
||||
BuildRequires: python%{_python_buildid}-routes
|
||||
BuildRequires: python%{_python_buildid}-scipy
|
||||
BuildRequires: python%{_python_buildid}-werkzeug
|
||||
BuildRequires: xmlsec1
|
||||
%endif
|
||||
%if 0%{?suse_version}
|
||||
BuildRequires: python%{_python_buildid}-CherryPy
|
||||
BuildRequires: python%{_python_buildid}-PyJWT
|
||||
BuildRequires: python%{_python_buildid}-Routes
|
||||
BuildRequires: python%{_python_buildid}-Werkzeug
|
||||
BuildRequires: python%{_python_buildid}-coverage
|
||||
BuildRequires: python%{_python_buildid}-numpy-devel
|
||||
BuildRequires: python%{_python_buildid}-pecan
|
||||
BuildRequires: python%{_python_buildid}-pyOpenSSL
|
||||
BuildRequires: python%{_python_buildid}-tox
|
||||
BuildRequires: rpm-build
|
||||
BuildRequires: xmlsec1-devel
|
||||
%endif
|
||||
@ -328,6 +345,9 @@ BuildRequires: libcryptopp-devel
|
||||
BuildRequires: libnuma-devel
|
||||
%endif
|
||||
%endif
|
||||
%if 0%{?rhel} >= 8
|
||||
BuildRequires: /usr/bin/pathfix.py
|
||||
%endif
|
||||
|
||||
%description
|
||||
Ceph is a massively scalable, open-source, distributed storage system that runs
|
||||
@ -358,7 +378,6 @@ Requires: grep
|
||||
Requires: logrotate
|
||||
Requires: parted
|
||||
Requires: psmisc
|
||||
Requires: python%{_python_buildid}-requests
|
||||
Requires: python%{_python_buildid}-setuptools
|
||||
Requires: util-linux
|
||||
Requires: xfsprogs
|
||||
@ -370,7 +389,7 @@ Requires: which
|
||||
Requires: gperftools-libs >= 2.6.1
|
||||
%endif
|
||||
%endif
|
||||
%if 0%{?suse_version}
|
||||
%if 0%{?weak_deps}
|
||||
Recommends: chrony
|
||||
%endif
|
||||
%description base
|
||||
@ -389,7 +408,6 @@ Requires: python%{_python_buildid}-rbd = %{_epoch_prefix}%{version}-%{release}
|
||||
Requires: python%{_python_buildid}-cephfs = %{_epoch_prefix}%{version}-%{release}
|
||||
Requires: python%{_python_buildid}-rgw = %{_epoch_prefix}%{version}-%{release}
|
||||
Requires: python%{_python_buildid}-ceph-argparse = %{_epoch_prefix}%{version}-%{release}
|
||||
Requires: python%{_python_buildid}-requests
|
||||
%if 0%{?fedora} || 0%{?rhel}
|
||||
Requires: python%{_python_buildid}-prettytable
|
||||
%endif
|
||||
@ -439,6 +457,7 @@ Group: System/Filesystems
|
||||
Requires: ceph-base = %{_epoch_prefix}%{version}-%{release}
|
||||
Requires: python%{_python_buildid}-bcrypt
|
||||
Requires: python%{_python_buildid}-pecan
|
||||
Requires: python%{_python_buildid}-requests
|
||||
Requires: python%{_python_buildid}-six
|
||||
%if 0%{?fedora} || 0%{?rhel}
|
||||
Requires: python%{_python_buildid}-cherrypy
|
||||
@ -447,13 +466,15 @@ Requires: python%{_python_buildid}-werkzeug
|
||||
%if 0%{?suse_version}
|
||||
Requires: python%{_python_buildid}-CherryPy
|
||||
Requires: python%{_python_buildid}-Werkzeug
|
||||
Recommends: python%{_python_buildid}-influxdb
|
||||
%endif
|
||||
%if 0%{?weak_deps}
|
||||
Recommends: ceph-mgr-dashboard = %{_epoch_prefix}%{version}-%{release}
|
||||
Recommends: ceph-mgr-diskprediction-local = %{_epoch_prefix}%{version}-%{release}
|
||||
Recommends: ceph-mgr-diskprediction-cloud = %{_epoch_prefix}%{version}-%{release}
|
||||
Recommends: ceph-mgr-rook = %{_epoch_prefix}%{version}-%{release}
|
||||
Recommends: ceph-mgr-k8sevents = %{_epoch_prefix}%{version}-%{release}
|
||||
Recommends: ceph-mgr-ssh = %{_epoch_prefix}%{version}-%{release}
|
||||
Recommends: python%{_python_buildid}-influxdb
|
||||
%endif
|
||||
%if 0%{?rhel} == 7
|
||||
Requires: pyOpenSSL
|
||||
@ -479,6 +500,9 @@ Requires: python%{_python_buildid}-cherrypy
|
||||
Requires: python%{_python_buildid}-jwt
|
||||
Requires: python%{_python_buildid}-routes
|
||||
Requires: python%{_python_buildid}-werkzeug
|
||||
%if 0%{?weak_deps}
|
||||
Recommends: python%{_python_buildid}-saml
|
||||
%endif
|
||||
%endif
|
||||
%if 0%{?suse_version}
|
||||
Requires: python%{_python_buildid}-CherryPy
|
||||
@ -878,7 +902,7 @@ Summary: Ceph distributed file system client library
|
||||
%if 0%{?suse_version}
|
||||
Group: System/Libraries
|
||||
%endif
|
||||
Obsoletes: libcephfs1
|
||||
Obsoletes: libcephfs1 < %{_epoch_prefix}%{version}-%{release}
|
||||
%if 0%{?rhel} || 0%{?fedora}
|
||||
Obsoletes: ceph-libs < %{_epoch_prefix}%{version}-%{release}
|
||||
Obsoletes: ceph-libcephfs
|
||||
@ -1216,6 +1240,11 @@ ${CMAKE} .. \
|
||||
-DWITH_RADOSGW_AMQP_ENDPOINT=ON \
|
||||
%else
|
||||
-DWITH_RADOSGW_AMQP_ENDPOINT=OFF \
|
||||
%endif
|
||||
%if 0%{with kafka_endpoint}
|
||||
-DWITH_RADOSGW_KAFKA_ENDPOINT=ON \
|
||||
%else
|
||||
-DWITH_RADOSGW_KAFKA_ENDPOINT=OFF \
|
||||
%endif
|
||||
-DBOOST_J=$CEPH_SMP_NCPUS \
|
||||
-DWITH_GRAFANA=ON
|
||||
@ -1264,6 +1293,11 @@ install -m 0644 -D udev/50-rbd.rules %{buildroot}%{_udevrulesdir}/50-rbd.rules
|
||||
# sudoers.d
|
||||
install -m 0600 -D sudoers.d/ceph-osd-smartctl %{buildroot}%{_sysconfdir}/sudoers.d/ceph-osd-smartctl
|
||||
|
||||
%if 0%{?rhel} >= 8
|
||||
pathfix.py -pni "%{__python3} %{py3_shbang_opts}" %{buildroot}%{_bindir}/*
|
||||
pathfix.py -pni "%{__python3} %{py3_shbang_opts}" %{buildroot}%{_sbindir}/*
|
||||
%endif
|
||||
|
||||
#set up placeholder directories
|
||||
mkdir -p %{buildroot}%{_sysconfdir}/ceph
|
||||
mkdir -p %{buildroot}%{_localstatedir}/run/ceph
|
||||
@ -2075,7 +2109,7 @@ fi
|
||||
%files -n libcephfs-devel
|
||||
%dir %{_includedir}/cephfs
|
||||
%{_includedir}/cephfs/libcephfs.h
|
||||
%{_includedir}/cephfs/ceph_statx.h
|
||||
%{_includedir}/cephfs/ceph_ll_client.h
|
||||
%{_libdir}/libcephfs.so
|
||||
|
||||
%if 0%{with python2}
|
||||
@ -2242,8 +2276,7 @@ if [ $1 -eq 0 ]; then
|
||||
fi
|
||||
fi
|
||||
exit 0
|
||||
|
||||
%endif # with selinux
|
||||
%endif
|
||||
|
||||
%if 0%{with python2}
|
||||
%files -n python-ceph-compat
|
||||
|
@ -1,7 +1,13 @@
|
||||
ceph (14.2.9-1xenial) xenial; urgency=medium
|
||||
ceph (14.2.10-1xenial) xenial; urgency=medium
|
||||
|
||||
|
||||
-- Jenkins Build Slave User <jenkins-build@braggi12.front.sepia.ceph.com> Thu, 09 Apr 2020 16:27:54 +0000
|
||||
-- Jenkins Build Slave User <jenkins-build@braggi11.front.sepia.ceph.com> Thu, 25 Jun 2020 18:20:02 +0000
|
||||
|
||||
ceph (14.2.10-1) stable; urgency=medium
|
||||
|
||||
* New upstream release
|
||||
|
||||
-- Ceph Release Team <ceph-maintainers@ceph.com> Thu, 25 Jun 2020 17:32:29 +0000
|
||||
|
||||
ceph (14.2.9-1) stable; urgency=medium
|
||||
|
||||
|
@ -9,7 +9,7 @@ function(build_fio)
|
||||
DOWNLOAD_DIR ${CMAKE_BINARY_DIR}/src/
|
||||
UPDATE_COMMAND "" # this disables rebuild on each run
|
||||
GIT_REPOSITORY "https://github.com/axboe/fio.git"
|
||||
GIT_TAG "fio-3.10"
|
||||
GIT_TAG "fio-3.15"
|
||||
SOURCE_DIR ${CMAKE_BINARY_DIR}/src/fio
|
||||
BUILD_IN_SOURCE 1
|
||||
CONFIGURE_COMMAND <SOURCE_DIR>/configure
|
||||
|
@ -10,18 +10,29 @@ function(check_cxx_atomics var)
|
||||
check_cxx_source_compiles("
|
||||
#include <atomic>
|
||||
#include <cstdint>
|
||||
|
||||
#if __s390x__
|
||||
// Boost needs 16-byte atomics for tagged pointers.
|
||||
// These are implemented via inline instructions on the platform
|
||||
// if 16-byte alignment can be proven, and are delegated to libatomic
|
||||
// library routines otherwise. Whether or not alignment is provably
|
||||
// OK for a std::atomic unfortunately depends on compiler version and
|
||||
// optimization levels, and also on the details of the expression.
|
||||
// We specifically test access via an otherwise unknown pointer here
|
||||
// to ensure we get the most complex case. If this access can be
|
||||
// done without libatomic, then all accesses can be done.
|
||||
bool atomic16(std::atomic<unsigned __int128> *ptr)
|
||||
{
|
||||
return *ptr != 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
int main() {
|
||||
std::atomic<uint8_t> w1;
|
||||
std::atomic<uint16_t> w2;
|
||||
std::atomic<uint32_t> w4;
|
||||
std::atomic<uint64_t> w8;
|
||||
#ifdef __s390x__
|
||||
// Boost needs 16-byte atomics for tagged pointers.
|
||||
std::atomic<unsigned __int128> w16;
|
||||
#else
|
||||
#define w16 0
|
||||
#endif
|
||||
return w1 + w2 + w4 + w8 + w16;
|
||||
return w1 + w2 + w4 + w8;
|
||||
}
|
||||
" ${var})
|
||||
endfunction(check_cxx_atomics)
|
||||
|
58
ceph/cmake/modules/FindFUSE.cmake
Normal file
58
ceph/cmake/modules/FindFUSE.cmake
Normal file
@ -0,0 +1,58 @@
|
||||
# This module can find FUSE Library
|
||||
#
|
||||
# The following variables will be defined for your use:
|
||||
# - FUSE_FOUND : was FUSE found?
|
||||
# - FUSE_INCLUDE_DIRS : FUSE include directory
|
||||
# - FUSE_LIBRARIES : FUSE library
|
||||
# - FUSE_VERSION : the version of the FUSE library found
|
||||
|
||||
if(PACKAGE_FIND_VERSION AND PACKAGE_FIND_VERSION VERSION_LESS "3.0")
|
||||
set(fuse_names fuse)
|
||||
set(fuse_suffixes fuse)
|
||||
else()
|
||||
set(fuse_names fuse3 fuse)
|
||||
set(fuse_suffixes fuse3 fuse)
|
||||
endif()
|
||||
|
||||
if(APPLE)
|
||||
list(APPEND fuse_names libosxfuse.dylib)
|
||||
list(APPEND fuse_suffixes osxfuse)
|
||||
endif()
|
||||
|
||||
find_path(
|
||||
FUSE_INCLUDE_DIR
|
||||
NAMES fuse_common.h fuse_lowlevel.h fuse.h
|
||||
PATH_SUFFIXES ${fuse_suffixes})
|
||||
|
||||
find_library(FUSE_LIBRARIES
|
||||
NAMES ${fuse_names}
|
||||
PATHS /usr/local/lib64 /usr/local/lib)
|
||||
|
||||
foreach(ver "MAJOR" "MINOR")
|
||||
file(STRINGS "${FUSE_INCLUDE_DIR}/fuse_common.h" fuse_ver_${ver}_line
|
||||
REGEX "^#define[\t ]+FUSE_${ver}_VERSION[\t ]+[0-9]+$")
|
||||
string(REGEX REPLACE ".*#define[\t ]+FUSE_${ver}_VERSION[\t ]+([0-9]+)$"
|
||||
"\\1" FUSE_VERSION_${ver} "${fuse_ver_${ver}_line}")
|
||||
endforeach()
|
||||
set(FUSE_VERSION
|
||||
"${FUSE_VERSION_MAJOR}.${FUSE_VERSION_MINOR}")
|
||||
|
||||
include(FindPackageHandleStandardArgs)
|
||||
find_package_handle_standard_args(FUSE
|
||||
REQUIRED_VARS FUSE_LIBRARIES FUSE_INCLUDE_DIR
|
||||
VERSION_VAR FUSE_VERSION)
|
||||
|
||||
mark_as_advanced(
|
||||
FUSE_INCLUDE_DIR)
|
||||
|
||||
if(FUSE_FOUND)
|
||||
set(FUSE_INCLUDE_DIRS ${FUSE_INCLUDE_DIR})
|
||||
if(NOT TARGET FUSE::FUSE)
|
||||
add_library(FUSE::FUSE UNKNOWN IMPORTED)
|
||||
set_target_properties(FUSE::FUSE PROPERTIES
|
||||
INTERFACE_INCLUDE_DIRECTORIES "${FUSE_INCLUDE_DIRS}"
|
||||
IMPORTED_LINK_INTERFACE_LANGUAGES "C"
|
||||
IMPORTED_LOCATION "${FUSE_LIBRARIES}"
|
||||
VERSION "${FUSE_VERSION}")
|
||||
endif()
|
||||
endif()
|
33
ceph/cmake/modules/FindRDKafka.cmake
Normal file
33
ceph/cmake/modules/FindRDKafka.cmake
Normal file
@ -0,0 +1,33 @@
|
||||
find_package(PkgConfig QUIET)
|
||||
|
||||
pkg_search_module(PC_rdkafka
|
||||
rdkafka)
|
||||
|
||||
find_path(rdkafka_INCLUDE_DIR
|
||||
NAMES librdkafka/rdkafka.h
|
||||
PATHS ${PC_rdkafka_INCLUDE_DIRS})
|
||||
|
||||
find_library(rdkafka_LIBRARY
|
||||
NAMES rdkafka
|
||||
PATHS ${PC_rdkafka_LIBRARY_DIRS})
|
||||
|
||||
include(FindPackageHandleStandardArgs)
|
||||
find_package_handle_standard_args(RDKafka
|
||||
REQUIRED_VARS rdkafka_INCLUDE_DIR rdkafka_LIBRARY
|
||||
VERSION_VAR PC_rdkafka_VERSION)
|
||||
|
||||
if(RDKafka_FOUND)
|
||||
set(RDKafka_VERSION ${PC_rdkafka_VERSION})
|
||||
string(REPLACE "." ";" version_list ${PC_rdkafka_VERSION})
|
||||
list(GET version_list 0 RDKafka_VERSION_MAJOR)
|
||||
list(GET version_list 1 RDKafka_VERSION_MINOR)
|
||||
list(GET version_list 2 RDKafka_VERSION_PATCH)
|
||||
|
||||
if(NOT TARGET RDKafka::RDKafka)
|
||||
add_library(RDKafka::RDKafka UNKNOWN IMPORTED)
|
||||
set_target_properties(RDKafka::RDKafka PROPERTIES
|
||||
INTERFACE_INCLUDE_DIRECTORIES "${rdkafka_INCLUDE_DIR}"
|
||||
IMPORTED_LINK_INTERFACE_LANGUAGES "C"
|
||||
IMPORTED_LOCATION "${rdkafka_LIBRARY}")
|
||||
endif()
|
||||
endif()
|
@ -6,13 +6,13 @@ if(HAVE_JEMALLOC)
|
||||
message(WARNING "JeMalloc does not work well with sanitizers")
|
||||
endif()
|
||||
|
||||
set(Sanitizers_OPTIONS)
|
||||
set(Sanitizers_COMPILE_OPTIONS)
|
||||
|
||||
foreach(component ${Sanitizers_FIND_COMPONENTS})
|
||||
if(component STREQUAL "address")
|
||||
set(Sanitizers_address_COMPILE_OPTIONS "address")
|
||||
set(Sanitizers_address_COMPILE_OPTIONS "-fsanitize=address")
|
||||
elseif(component STREQUAL "leak")
|
||||
set(Sanitizers_leak_COMPILE_OPTIONS "leak")
|
||||
set(Sanitizers_leak_COMPILE_OPTIONS "-fsanitize=leak")
|
||||
elseif(component STREQUAL "thread")
|
||||
if ("address" IN_LIST ${Sanitizers_FIND_COMPONENTS} OR
|
||||
"leak" IN_LIST ${Sanitizers_FIND_COMPONENTS})
|
||||
@ -20,13 +20,14 @@ foreach(component ${Sanitizers_FIND_COMPONENTS})
|
||||
elseif(NOT CMAKE_POSITION_INDEPENDENT_CODE)
|
||||
message(SEND_ERROR "TSan requires all code to be position independent")
|
||||
endif()
|
||||
set(Sanitizers_Thread_COMPILE_OPTIONS "thread")
|
||||
set(Sanitizers_thread_COMPILE_OPTIONS "-fsanitize=thread")
|
||||
elseif(component STREQUAL "undefined_behavior")
|
||||
set(Sanitizers_undefined_behavior_COMPILE_OPTIONS "undefined")
|
||||
# https://gcc.gnu.org/bugzilla/show_bug.cgi?id=88684
|
||||
set(Sanitizers_undefined_behavior_COMPILE_OPTIONS "-fsanitize=undefined;-fno-sanitize=vptr")
|
||||
else()
|
||||
message(SEND_ERROR "Unsupported sanitizer: ${component}")
|
||||
endif()
|
||||
list(APPEND Sanitizers_OPTIONS "${Sanitizers_${component}_COMPILE_OPTIONS}")
|
||||
list(APPEND Sanitizers_COMPILE_OPTIONS "${Sanitizers_${component}_COMPILE_OPTIONS}")
|
||||
endforeach()
|
||||
|
||||
if(Sanitizers_address_COMPILE_OPTIONS OR Sanitizers_leak_COMPILE_OPTIONS)
|
||||
@ -38,12 +39,9 @@ if(Sanitizers_address_COMPILE_OPTIONS OR Sanitizers_leak_COMPILE_OPTIONS)
|
||||
libasan.so.3)
|
||||
endif()
|
||||
|
||||
if(Sanitizers_OPTIONS)
|
||||
string(REPLACE ";" ","
|
||||
Sanitizers_COMPILE_OPTIONS
|
||||
"${Sanitizers_OPTIONS}")
|
||||
set(Sanitizers_COMPILE_OPTIONS
|
||||
"-fsanitize=${Sanitizers_COMPILE_OPTIONS} -fno-omit-frame-pointer")
|
||||
if(Sanitizers_COMPILE_OPTIONS)
|
||||
list(APPEND Sanitizers_COMPILE_OPTIONS
|
||||
"-fno-omit-frame-pointer")
|
||||
endif()
|
||||
|
||||
include(CheckCXXSourceCompiles)
|
||||
@ -55,24 +53,24 @@ check_cxx_source_compiles("int main() {}"
|
||||
include(FindPackageHandleStandardArgs)
|
||||
find_package_handle_standard_args(Sanitizers
|
||||
REQUIRED_VARS
|
||||
Sanitizers_ARE_SUPPORTED
|
||||
Sanitizers_COMPILE_OPTIONS)
|
||||
Sanitizers_COMPILE_OPTIONS
|
||||
Sanitizers_ARE_SUPPORTED)
|
||||
|
||||
if(Sanitizers_FOUND)
|
||||
if(NOT TARGET Sanitizers::Sanitizers)
|
||||
add_library(Sanitizers::Sanitizers INTERFACE IMPORTED)
|
||||
set_target_properties(Sanitizers::Sanitizers PROPERTIES
|
||||
INTERFACE_COMPILE_OPTIONS ${Sanitizers_COMPILE_OPTIONS}
|
||||
INTERFACE_LINK_LIBRARIES ${Sanitizers_COMPILE_OPTIONS})
|
||||
INTERFACE_COMPILE_OPTIONS "${Sanitizers_COMPILE_OPTIONS}"
|
||||
INTERFACE_LINK_LIBRARIES "${Sanitizers_COMPILE_OPTIONS}")
|
||||
endif()
|
||||
foreach(component ${Sanitizers_FIND_COMPONENTS})
|
||||
if(NOT TARGET Sanitizers::${component})
|
||||
set(target Sanitizers::${component})
|
||||
set(compile_option "-fsanitize=${Sanitizers_${component}_COMPILE_OPTIONS}")
|
||||
set(compile_option "${Sanitizers_${component}_COMPILE_OPTIONS}")
|
||||
add_library(${target} INTERFACE IMPORTED)
|
||||
set_target_properties(${target} PROPERTIES
|
||||
INTERFACE_COMPILE_OPTIONS ${compile_option}
|
||||
INTERFACE_LINK_LIBRARIES ${compile_option})
|
||||
INTERFACE_COMPILE_OPTIONS "${compile_option}"
|
||||
INTERFACE_LINK_LIBRARIES "${compile_option}")
|
||||
endif()
|
||||
endforeach()
|
||||
endif()
|
||||
|
@ -1,28 +0,0 @@
|
||||
# This module can find FUSE Library
|
||||
#
|
||||
# The following variables will be defined for your use:
|
||||
# - FUSE_FOUND : was FUSE found?
|
||||
# - FUSE_INCLUDE_DIRS : FUSE include directory
|
||||
# - FUSE_LIBRARIES : FUSE library
|
||||
|
||||
find_path(
|
||||
FUSE_INCLUDE_DIRS
|
||||
NAMES fuse_common.h fuse_lowlevel.h fuse.h
|
||||
PATHS /usr/local/include/osxfuse /usr/local/include
|
||||
PATH_SUFFIXES fuse)
|
||||
|
||||
set(fuse_names fuse)
|
||||
if(APPLE)
|
||||
list(APPEND fuse_names libosxfuse.dylib)
|
||||
endif()
|
||||
|
||||
find_library(FUSE_LIBRARIES
|
||||
NAMES ${fuse_names}
|
||||
PATHS /usr/local/lib64 /usr/local/lib)
|
||||
|
||||
include(FindPackageHandleStandardArgs)
|
||||
find_package_handle_standard_args(fuse DEFAULT_MSG
|
||||
FUSE_INCLUDE_DIRS FUSE_LIBRARIES)
|
||||
|
||||
mark_as_advanced(
|
||||
FUSE_INCLUDE_DIRS FUSE_LIBRARIES)
|
@ -50,6 +50,7 @@ Build-Depends: cmake (>= 3.5),
|
||||
libnl-genl-3-dev,
|
||||
libxml2-dev,
|
||||
librabbitmq-dev,
|
||||
librdkafka-dev,
|
||||
lsb-release,
|
||||
parted,
|
||||
patch,
|
||||
@ -183,11 +184,12 @@ Description: debugging symbols for ceph-mds
|
||||
Package: ceph-mgr
|
||||
Architecture: linux-any
|
||||
Depends: ceph-base (= ${binary:Version}),
|
||||
python-bcrypt,
|
||||
python-cherrypy3,
|
||||
python-jwt,
|
||||
python-openssl,
|
||||
python-pecan,
|
||||
python-bcrypt,
|
||||
python-requests,
|
||||
python-werkzeug,
|
||||
${misc:Depends},
|
||||
${python:Depends},
|
||||
@ -511,7 +513,6 @@ Depends: librbd1 (= ${binary:Version}),
|
||||
python-prettytable,
|
||||
python-rados (= ${binary:Version}),
|
||||
python-rbd (= ${binary:Version}),
|
||||
python-requests,
|
||||
python-rgw (= ${binary:Version}),
|
||||
${misc:Depends},
|
||||
${python:Depends},
|
||||
|
@ -1,3 +1,3 @@
|
||||
usr/include/cephfs/ceph_statx.h
|
||||
usr/include/cephfs/ceph_ll_client.h
|
||||
usr/include/cephfs/libcephfs.h
|
||||
usr/lib/libcephfs.so
|
||||
|
@ -7,34 +7,42 @@ if test -e build; then
|
||||
fi
|
||||
|
||||
PYBUILD="2"
|
||||
source /etc/os-release
|
||||
case "$ID" in
|
||||
fedora)
|
||||
if [ "$VERSION_ID" -ge "29" ] ; then
|
||||
PYBUILD="3"
|
||||
fi
|
||||
;;
|
||||
rhel|centos)
|
||||
MAJOR_VER=$(echo "$VERSION_ID" | sed -e 's/\..*$//')
|
||||
if [ "$MAJOR_VER" -ge "8" ] ; then
|
||||
PYBUILD="3"
|
||||
fi
|
||||
;;
|
||||
opensuse*|suse|sles)
|
||||
PYBUILD="3"
|
||||
WITH_RADOSGW_AMQP_ENDPOINT="OFF"
|
||||
;;
|
||||
esac
|
||||
if [ -r /etc/os-release ]; then
|
||||
source /etc/os-release
|
||||
case "$ID" in
|
||||
fedora)
|
||||
if [ "$VERSION_ID" -ge "29" ] ; then
|
||||
PYBUILD="3"
|
||||
fi
|
||||
;;
|
||||
rhel|centos)
|
||||
MAJOR_VER=$(echo "$VERSION_ID" | sed -e 's/\..*$//')
|
||||
if [ "$MAJOR_VER" -ge "8" ] ; then
|
||||
PYBUILD="3"
|
||||
fi
|
||||
;;
|
||||
opensuse*|suse|sles)
|
||||
PYBUILD="3"
|
||||
ARGS+=" -DWITH_RADOSGW_AMQP_ENDPOINT=OFF"
|
||||
ARGS+=" -DWITH_RADOSGW_KAFKA_ENDPOINT=OFF"
|
||||
;;
|
||||
esac
|
||||
elif [ "$(uname)" == FreeBSD ] ; then
|
||||
PYBUILD="3"
|
||||
ARGS+=" -DWITH_RADOSGW_AMQP_ENDPOINT=OFF"
|
||||
ARGS+=" -DWITH_RADOSGW_KAFKA_ENDPOINT=OFF"
|
||||
else
|
||||
echo Unknown release
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [ "$PYBUILD" = "3" ] ; then
|
||||
ARGS="$ARGS -DWITH_PYTHON2=OFF -DWITH_PYTHON3=ON -DMGR_PYTHON_VERSION=3"
|
||||
ARGS+=" -DWITH_PYTHON2=OFF -DWITH_PYTHON3=ON -DMGR_PYTHON_VERSION=3"
|
||||
fi
|
||||
|
||||
if type ccache > /dev/null 2>&1 ; then
|
||||
echo "enabling ccache"
|
||||
ARGS="$ARGS -DWITH_CCACHE=ON"
|
||||
fi
|
||||
if [ -n "$WITH_RADOSGW_AMQP_ENDPOINT" ] ; then
|
||||
ARGS="$ARGS -DWITH_RADOSGW_AMQP_ENDPOINT=$WITH_RADOSGW_AMQP_ENDPOINT"
|
||||
ARGS+=" -DWITH_CCACHE=ON"
|
||||
fi
|
||||
|
||||
mkdir build
|
||||
|
@ -36,6 +36,7 @@ fi
|
||||
-D CMAKE_C_FLAGS_DEBUG="$C_FLAGS_DEBUG" \
|
||||
-D ENABLE_GIT_VERSION=OFF \
|
||||
-D WITH_RADOSGW_AMQP_ENDPOINT=OFF \
|
||||
-D WITH_RADOSGW_KAFKA_ENDPOINT=OFF \
|
||||
-D WITH_SYSTEM_BOOST=ON \
|
||||
-D WITH_SYSTEM_NPM=ON \
|
||||
-D WITH_LTTNG=OFF \
|
||||
|
@ -6,6 +6,9 @@ CephFS Administrative commands
|
||||
Filesystems
|
||||
-----------
|
||||
|
||||
.. note:: The names of the file systems, metadata pools, and data pools can
|
||||
only have characters in the set [a-zA-Z0-9\_-.].
|
||||
|
||||
These commands operate on the CephFS filesystems in your Ceph cluster.
|
||||
Note that by default only one filesystem is permitted: to enable
|
||||
creation of multiple filesystems use ``ceph fs flag set enable_multiple true``.
|
||||
|
@ -35,6 +35,8 @@ Generally, the metadata pool will have at most a few gigabytes of data. For
|
||||
this reason, a smaller PG count is usually recommended. 64 or 128 is commonly
|
||||
used in practice for large clusters.
|
||||
|
||||
.. note:: The names of the file systems, metadata pools, and data pools can
|
||||
only have characters in the set [a-zA-Z0-9\_-.].
|
||||
|
||||
Creating a filesystem
|
||||
=====================
|
||||
|
@ -20,10 +20,10 @@ Layout fields
|
||||
-------------
|
||||
|
||||
pool
|
||||
String, giving ID or name. Which RADOS pool a file's data objects will be stored in.
|
||||
String, giving ID or name. String can only have characters in the set [a-zA-Z0-9\_-.]. Which RADOS pool a file's data objects will be stored in.
|
||||
|
||||
pool_namespace
|
||||
String. Within the data pool, which RADOS namespace the objects will
|
||||
String with only characters in the set [a-zA-Z0-9\_-.]. Within the data pool, which RADOS namespace the objects will
|
||||
be written to. Empty by default (i.e. default namespace).
|
||||
|
||||
stripe_unit
|
||||
|
@ -79,15 +79,18 @@ Remove a subvolume group using::
|
||||
|
||||
$ ceph fs subvolumegroup rm <vol_name> <group_name> [--force]
|
||||
|
||||
The removal of a subvolume group fails if it is not empty, e.g., has subvolumes
|
||||
or snapshots, or is non-existent. Using the '--force' flag allows the command
|
||||
to succeed even if the subvolume group is non-existent.
|
||||
The removal of a subvolume group fails if it is not empty or non-existent.
|
||||
'--force' flag allows the non-existent subvolume group remove command to succeed.
|
||||
|
||||
|
||||
Fetch the absolute path of a subvolume group using::
|
||||
|
||||
$ ceph fs subvolumegroup getpath <vol_name> <group_name>
|
||||
|
||||
List subvolume groups using::
|
||||
|
||||
$ ceph fs subvolumegroup ls <vol_name>
|
||||
|
||||
Create a snapshot (see :doc:`/cephfs/experimental-features`) of a
|
||||
subvolume group using::
|
||||
|
||||
@ -102,25 +105,30 @@ Remove a snapshot of a subvolume group using::
|
||||
Using the '--force' flag allows the command to succeed that would otherwise
|
||||
fail if the snapshot did not exist.
|
||||
|
||||
List snapshots of a subvolume group using::
|
||||
|
||||
$ ceph fs subvolumegroup snapshot ls <vol_name> <group_name>
|
||||
|
||||
|
||||
FS Subvolumes
|
||||
-------------
|
||||
|
||||
Create a subvolume using::
|
||||
|
||||
$ ceph fs subvolume create <vol_name> <subvol_name> [--size <size_in_bytes> --group_name <subvol_group_name> --pool_layout <data_pool_name> --uid <uid> --gid <gid> --mode <octal_mode>]
|
||||
$ ceph fs subvolume create <vol_name> <subvol_name> [--size <size_in_bytes> --group_name <subvol_group_name> --pool_layout <data_pool_name> --uid <uid> --gid <gid> --mode <octal_mode> --namespace-isolated]
|
||||
|
||||
|
||||
The command succeeds even if the subvolume already exists.
|
||||
|
||||
When creating a subvolume you can specify its subvolume group, data pool layout,
|
||||
uid, gid, file mode in octal numerals, and size in bytes. The size of the subvolume is
|
||||
specified by setting a quota on it (see :doc:`/cephfs/quota`). By default a
|
||||
subvolume is created within the default subvolume group, and with an octal file
|
||||
specified by setting a quota on it (see :doc:`/cephfs/quota`). The subvolume can be
|
||||
created in a separate RADOS namespace by specifying --namespace-isolated option. By
|
||||
default a subvolume is created within the default subvolume group, and with an octal file
|
||||
mode '755', uid of its subvolume group, gid of its subvolume group, data pool layout of
|
||||
its parent directory and no size limit.
|
||||
|
||||
Remove a subvolume group using::
|
||||
Remove a subvolume using::
|
||||
|
||||
$ ceph fs subvolume rm <vol_name> <subvol_name> [--group_name <subvol_group_name> --force]
|
||||
|
||||
@ -130,8 +138,7 @@ First, it move the subvolume to a trash folder, and then asynchronously purges
|
||||
its contents.
|
||||
|
||||
The removal of a subvolume fails if it has snapshots, or is non-existent.
|
||||
Using the '--force' flag allows the command to succeed even if the subvolume is
|
||||
non-existent.
|
||||
'--force' flag allows the non-existent subvolume remove command to succeed.
|
||||
|
||||
Resize a subvolume using::
|
||||
|
||||
@ -146,6 +153,31 @@ Fetch the absolute path of a subvolume using::
|
||||
|
||||
$ ceph fs subvolume getpath <vol_name> <subvol_name> [--group_name <subvol_group_name>]
|
||||
|
||||
Fetch the metadata of a subvolume using::
|
||||
|
||||
$ ceph fs subvolume info <vol_name> <subvol_name> [--group_name <subvol_group_name>]
|
||||
|
||||
The output format is json and contains fields as follows.
|
||||
|
||||
* atime: access time of subvolume path in the format "YYYY-MM-DD HH:MM:SS"
|
||||
* mtime: modification time of subvolume path in the format "YYYY-MM-DD HH:MM:SS"
|
||||
* ctime: change time of subvolume path in the format "YYYY-MM-DD HH:MM:SS"
|
||||
* uid: uid of subvolume path
|
||||
* gid: gid of subvolume path
|
||||
* mode: mode of subvolume path
|
||||
* mon_addrs: list of monitor addresses
|
||||
* bytes_pcent: quota used in percentage if quota is set, else displays "undefined"
|
||||
* bytes_quota: quota size in bytes if quota is set, else displays "infinite"
|
||||
* bytes_used: current used size of the subvolume in bytes
|
||||
* created_at: time of creation of subvolume in the format "YYYY-MM-DD HH:MM:SS"
|
||||
* data_pool: data pool the subvolume belongs to
|
||||
* path: absolute path of a subvolume
|
||||
* type: subvolume type indicating whether it's clone or subvolume
|
||||
* pool_namespace: RADOS namespace of the subvolume
|
||||
|
||||
List subvolumes using::
|
||||
|
||||
$ ceph fs subvolume ls <vol_name> [--group_name <subvol_group_name>]
|
||||
|
||||
Create a snapshot of a subvolume using::
|
||||
|
||||
@ -159,5 +191,118 @@ Remove a snapshot of a subvolume using::
|
||||
Using the '--force' flag allows the command to succeed that would otherwise
|
||||
fail if the snapshot did not exist.
|
||||
|
||||
List snapshots of a subvolume using::
|
||||
|
||||
$ ceph fs subvolume snapshot ls <vol_name> <subvol_name> [--group_name <subvol_group_name>]
|
||||
|
||||
Cloning Snapshots
|
||||
-----------------
|
||||
|
||||
Subvolumes can be created by cloning subvolume snapshots. Cloning is an asynchronous operation involving copying
|
||||
data from a snapshot to a subvolume. Due to this bulk copy nature, cloning is currently inefficient for very huge
|
||||
data sets.
|
||||
|
||||
Before starting a clone operation, the snapshot should be protected. Protecting a snapshot ensures that the snapshot
|
||||
cannot be deleted when a clone operation is in progress. Snapshots can be protected using::
|
||||
|
||||
$ ceph fs subvolume snapshot protect <vol_name> <subvol_name> <snap_name> [--group_name <subvol_group_name>]
|
||||
|
||||
To initiate a clone operation use::
|
||||
|
||||
$ ceph fs subvolume snapshot clone <vol_name> <subvol_name> <snap_name> <target_subvol_name>
|
||||
|
||||
If a snapshot (source subvolume) is a part of non-default group, the group name needs to be specified as per::
|
||||
|
||||
$ ceph fs subvolume snapshot clone <vol_name> <subvol_name> <snap_name> <target_subvol_name> --group_name <subvol_group_name>
|
||||
|
||||
Cloned subvolumes can be a part of a different group than the source snapshot (by default, cloned subvolumes are created in default group). To clone to a particular group use::
|
||||
|
||||
$ ceph fs subvolume snapshot clone <vol_name> <subvol_name> <snap_name> <target_subvol_name> --target_group_name <subvol_group_name>
|
||||
|
||||
Similar to specifying a pool layout when creating a subvolume, pool layout can be specified when creating a cloned subvolume. To create a cloned subvolume with a specific pool layout use::
|
||||
|
||||
$ ceph fs subvolume snapshot clone <vol_name> <subvol_name> <snap_name> <target_subvol_name> --pool_layout <pool_layout>
|
||||
|
||||
To check the status of a clone operation use::
|
||||
|
||||
$ ceph fs clone status <vol_name> <clone_name> [--group_name <group_name>]
|
||||
|
||||
A clone can be in one of the following states:
|
||||
|
||||
#. `pending` : Clone operation has not started
|
||||
#. `in-progress` : Clone operation is in progress
|
||||
#. `complete` : Clone operation has sucessfully finished
|
||||
#. `failed` : Clone operation has failed
|
||||
|
||||
Sample output from an `in-progress` clone operation::
|
||||
|
||||
$ ceph fs subvolume snapshot protect cephfs subvol1 snap1
|
||||
$ ceph fs subvolume snapshot clone cephfs subvol1 snap1 clone1
|
||||
$ ceph fs clone status cephfs clone1
|
||||
{
|
||||
"status": {
|
||||
"state": "in-progress",
|
||||
"source": {
|
||||
"volume": "cephfs",
|
||||
"subvolume": "subvol1",
|
||||
"snapshot": "snap1"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
(NOTE: since `subvol1` is in default group, `source` section in `clone status` does not include group name)
|
||||
|
||||
.. note:: Cloned subvolumes are accessible only after the clone operation has successfully completed.
|
||||
|
||||
For a successsful clone operation, `clone status` would look like so::
|
||||
|
||||
$ ceph fs clone status cephfs clone1
|
||||
{
|
||||
"status": {
|
||||
"state": "complete"
|
||||
}
|
||||
}
|
||||
|
||||
or `failed` state when clone is unsuccessful.
|
||||
|
||||
On failure of a clone operation, the partial clone needs to be deleted and the clone operation needs to be retriggered.
|
||||
To delete a partial clone use::
|
||||
|
||||
$ ceph fs subvolume rm <vol_name> <clone_name> [--group_name <group_name>] --force
|
||||
|
||||
When no clone operations are in progress or scheduled, the snaphot can be unprotected. To unprotect a snapshot use::
|
||||
|
||||
$ ceph fs subvolume snapshot unprotect <vol_name> <subvol_name> <snap_name> [--group_name <subvol_group_name>]
|
||||
|
||||
Note that unprotecting a snapshot would fail if there are pending or in progress clone operations. Also note that,
|
||||
only unprotected snapshots can be removed. This guarantees that a snapshot cannot be deleted when clones are pending
|
||||
(or in progress).
|
||||
|
||||
.. note:: Cloning only synchronizes directories, regular files and symbolic links. Also, inode timestamps (access and
|
||||
modification times) are synchronized upto seconds granularity.
|
||||
|
||||
An `in-progress` or a `pending` clone operation can be canceled. To cancel a clone operation use the `clone cancel` command::
|
||||
|
||||
$ ceph fs clone cancel <vol_name> <clone_name> [--group_name <group_name>]
|
||||
|
||||
On successful cancelation, the cloned subvolume is moved to `canceled` state::
|
||||
|
||||
$ ceph fs subvolume snapshot protect cephfs subvol1 snap1
|
||||
$ ceph fs subvolume snapshot clone cephfs subvol1 snap1 clone1
|
||||
$ ceph fs clone cancel cephfs clone1
|
||||
$ ceph fs clone status cephfs clone1
|
||||
{
|
||||
"status": {
|
||||
"state": "canceled",
|
||||
"source": {
|
||||
"volume": "cephfs",
|
||||
"subvolume": "subvol1",
|
||||
"snapshot": "snap1"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
.. note:: The canceled cloned can be deleted by using --force option in `fs subvolume rm` command.
|
||||
|
||||
.. _manila: https://github.com/openstack/manila
|
||||
.. _CSI: https://github.com/ceph/ceph-csi
|
||||
|
@ -176,21 +176,7 @@
|
||||
we initiate trimming. Set to ``-1`` to disable limits.
|
||||
|
||||
:Type: 32-bit Integer
|
||||
:Default: ``30``
|
||||
|
||||
|
||||
``mds log max expiring``
|
||||
|
||||
:Description: The maximum number of segments to expire in parallels
|
||||
:Type: 32-bit Integer
|
||||
:Default: ``20``
|
||||
|
||||
|
||||
``mds log eopen size``
|
||||
|
||||
:Description: The maximum number of inodes in an EOpen event.
|
||||
:Type: 32-bit Integer
|
||||
:Default: ``100``
|
||||
:Default: ``128``
|
||||
|
||||
|
||||
``mds bal sample interval``
|
||||
|
@ -70,8 +70,20 @@ initiate the scrub.
|
||||
}
|
||||
|
||||
`status` shows the number of inodes that are scheduled to be scrubbed at any point in time,
|
||||
hence, can change on subsequent `scrub status` invocations.
|
||||
hence, can change on subsequent `scrub status` invocations. Also, a high level summary of
|
||||
scrub operation (which includes the operation state and paths on which scrub is triggered)
|
||||
gets displayed in `ceph status`.
|
||||
|
||||
::
|
||||
|
||||
ceph status
|
||||
[...]
|
||||
|
||||
task status:
|
||||
scrub status:
|
||||
mds.0: active [paths:/]
|
||||
|
||||
[...]
|
||||
|
||||
Control (ongoing) Filesystem Scrubs
|
||||
===================================
|
||||
|
@ -71,6 +71,10 @@ Options
|
||||
Available for stat, stat2, get, put, append, truncate, rm, ls
|
||||
and all xattr related operation
|
||||
|
||||
.. option:: -O object_size
|
||||
|
||||
Set the object size for put/get ops and for write benchmarking
|
||||
|
||||
|
||||
Global commands
|
||||
===============
|
||||
|
@ -73,6 +73,10 @@ aspects of your Ceph cluster:
|
||||
* **Performance counters**: Display detailed service-specific statistics for
|
||||
each running service.
|
||||
* **Monitors**: List all MONs, their quorum status, open sessions.
|
||||
* **Monitoring**: Enables creation, re-creation, editing and expiration of
|
||||
Prometheus' Silences, lists the alerting configuration of Prometheus and
|
||||
currently firing alerts. Also shows notifications for firing alerts. Needs
|
||||
configuration.
|
||||
* **Configuration Editor**: Display all available configuration options,
|
||||
their description, type and default values and edit the current values.
|
||||
* **Pools**: List all Ceph pools and their details (e.g. applications,
|
||||
@ -534,9 +538,9 @@ in order to manage silences.
|
||||
|
||||
#. Use the API of Prometheus and the Alertmanager
|
||||
|
||||
This allows you to manage alerts and silences. You will see all alerts and silences
|
||||
the Alertmanager currently knows of in the corresponding listing.
|
||||
Both can be found in the *Cluster* submenu.
|
||||
This allows you to manage alerts and silences. This will enable the "Active
|
||||
Alerts", "All Alerts" as well as the "Silences" tabs in the "Monitoring"
|
||||
section of the "Cluster" menu entry.
|
||||
|
||||
Alerts can be sorted by name, job, severity, state and start time.
|
||||
Unfortunately it's not possible to know when an alert
|
||||
@ -563,8 +567,11 @@ in order to manage silences.
|
||||
|
||||
$ ceph dashboard set-alertmanager-api-host 'http://localhost:9093'
|
||||
|
||||
To be able to show what a silence will match beforehand, you have to add the host
|
||||
and port of the Prometheus server::
|
||||
To be able to see all configured alerts, you will need to configure the URL
|
||||
to the Prometheus API. Using this API, the UI will also help you in verifying
|
||||
that a new silence will match a corresponding alert.
|
||||
|
||||
::
|
||||
|
||||
$ ceph dashboard set-prometheus-api-host <prometheus-host:port> # default: ''
|
||||
|
||||
@ -572,7 +579,7 @@ in order to manage silences.
|
||||
|
||||
$ ceph dashboard set-prometheus-api-host 'http://localhost:9090'
|
||||
|
||||
After setting up the hosts, you have to refresh your the dashboard in your browser window.
|
||||
After setting up the hosts, you have to refresh the dashboard in your browser window.
|
||||
|
||||
#. Use both methods
|
||||
|
||||
|
@ -31,6 +31,8 @@ configurable with ``ceph config-key set``, with keys
|
||||
``mgr/prometheus/server_addr`` and ``mgr/prometheus/server_port``.
|
||||
This port is registered with Prometheus's `registry <https://github.com/prometheus/prometheus/wiki/Default-port-allocations>`_.
|
||||
|
||||
.. _prometheus-rbd-io-statistics:
|
||||
|
||||
RBD IO statistics
|
||||
-----------------
|
||||
|
||||
@ -41,6 +43,10 @@ configuration parameter. The parameter is a comma or space separated list
|
||||
of ``pool[/namespace]`` entries. If the namespace is not specified the
|
||||
statistics are collected for all namespaces in the pool.
|
||||
|
||||
Example to activate the RBD-enabled pools ``pool1``, ``pool2`` and ``poolN``::
|
||||
|
||||
ceph config set mgr mgr/prometheus/rbd_stats_pools "pool1,pool2,poolN"
|
||||
|
||||
The module makes the list of all available images scanning the specified
|
||||
pools and namespaces and refreshes it periodically. The period is
|
||||
configurable via the ``mgr/prometheus/rbd_stats_pools_refresh_interval``
|
||||
@ -48,6 +54,10 @@ parameter (in sec) and is 300 sec (5 minutes) by default. The module will
|
||||
force refresh earlier if it detects statistics from a previously unknown
|
||||
RBD image.
|
||||
|
||||
Example to turn up the sync interval to 10 minutes::
|
||||
|
||||
ceph config set mgr mgr/prometheus/rbd_stats_pools_refresh_interval 600
|
||||
|
||||
Statistic names and labels
|
||||
==========================
|
||||
|
||||
|
@ -31,14 +31,19 @@ the per-channel setting has no effect.)
|
||||
- operating system (OS distribution, kernel version)
|
||||
- stack trace identifying where in the Ceph code the crash occurred
|
||||
|
||||
* **ident** (default: on): User-provided identifying information about
|
||||
* **device** (default: on): Information about device metrics, including
|
||||
|
||||
- anonymized SMART metrics
|
||||
|
||||
* **ident** (default: off): User-provided identifying information about
|
||||
the cluster
|
||||
|
||||
- cluster description
|
||||
- contact email address
|
||||
|
||||
The data being reported does *not* contain any sensitive
|
||||
data like pool names, object names, object contents, or hostnames.
|
||||
data like pool names, object names, object contents, hostnames, or device
|
||||
serial numbers.
|
||||
|
||||
It contains counters and statistics on how the cluster has been
|
||||
deployed, the version of Ceph, the distribition of the hosts and other
|
||||
@ -63,6 +68,15 @@ You can look at what data is reported at any time with the command::
|
||||
|
||||
ceph telemetry show
|
||||
|
||||
To protect your privacy, device reports are generated separately, and data such
|
||||
as hostname and device serial number is anonymized. The device telemetry is
|
||||
sent to a different endpoint and does not associate the device data with a
|
||||
particular cluster. To see a preview of the device report use the command::
|
||||
|
||||
ceph telemetry show-device
|
||||
|
||||
Please note: In order to generate the device report we use Smartmontools
|
||||
version 7.0 and up, which supports JSON output.
|
||||
If you have any concerns about privacy with regard to the information included in
|
||||
this report, please contact the Ceph developers.
|
||||
|
||||
@ -74,15 +88,22 @@ Individual channels can be enabled or disabled with::
|
||||
ceph config set mgr mgr/telemetry/channel_ident false
|
||||
ceph config set mgr mgr/telemetry/channel_basic false
|
||||
ceph config set mgr mgr/telemetry/channel_crash false
|
||||
ceph config set mgr mgr/telemetry/channel_device false
|
||||
ceph telemetry show
|
||||
ceph telemetry show-device
|
||||
|
||||
Enabling Telemetry
|
||||
------------------
|
||||
|
||||
To allow the *telemetry* module to start sharing data,::
|
||||
To allow the *telemetry* module to start sharing data::
|
||||
|
||||
ceph telemetry on
|
||||
|
||||
Please note: Telemetry data is licensed under the Community Data License
|
||||
Agreement - Sharing - Version 1.0 (https://cdla.io/sharing-1-0/). Hence,
|
||||
telemetry module can be enabled only after you add '--license sharing-1-0' to
|
||||
the 'ceph telemetry on' command.
|
||||
|
||||
Telemetry can be disabled at any time with::
|
||||
|
||||
ceph telemetry off
|
||||
@ -95,6 +116,27 @@ You can adjust this interval with::
|
||||
|
||||
ceph config set mgr mgr/telemetry/interval 72 # report every three days
|
||||
|
||||
Status
|
||||
--------
|
||||
|
||||
The see the current configuration::
|
||||
|
||||
ceph telemetry status
|
||||
|
||||
Sending telemetry through a proxy
|
||||
---------------------------------
|
||||
|
||||
If the cluster cannot directly connect to the configured telemetry
|
||||
endpoint (default *telemetry.ceph.com*), you can configure a HTTP/HTTPS
|
||||
proxy server with::
|
||||
|
||||
ceph config set mgr mgr/telemetry/proxy https://10.0.0.1:8080
|
||||
|
||||
You can also include a *user:pass* if needed::
|
||||
|
||||
ceph config set mgr mgr/telemetry/proxy https://ceph:telemetry@10.0.0.1:8080
|
||||
|
||||
|
||||
Contact and Description
|
||||
-----------------------
|
||||
|
||||
@ -104,4 +146,3 @@ completely optional, and disabled by default.::
|
||||
ceph config set mgr mgr/telemetry/contact 'John Doe <john.doe@example.com>'
|
||||
ceph config set mgr mgr/telemetry/description 'My first Ceph cluster'
|
||||
ceph config set mgr mgr/telemetry/channel_ident true
|
||||
|
||||
|
@ -412,6 +412,14 @@ by setting it in the ``[mon]`` section of the configuration file.
|
||||
:Default: ``0``
|
||||
|
||||
|
||||
``mon warn on pool no redundancy``
|
||||
|
||||
:Description: Issue a ``HEALTH_WARN`` in cluster log if any pool is
|
||||
configured with no replicas.
|
||||
:Type: Boolean
|
||||
:Default: ``True``
|
||||
|
||||
|
||||
``mon cache target full warn ratio``
|
||||
|
||||
:Description: Position between pool's ``cache_target_full`` and
|
||||
|
@ -729,21 +729,6 @@ recommended amount with::
|
||||
Please refer to :ref:`choosing-number-of-placement-groups` and
|
||||
:ref:`pg-autoscaler` for more information.
|
||||
|
||||
POOL_TARGET_SIZE_RATIO_OVERCOMMITTED
|
||||
____________________________________
|
||||
|
||||
One or more pools have a ``target_size_ratio`` property set to
|
||||
estimate the expected size of the pool as a fraction of total storage,
|
||||
but the value(s) exceed the total available storage (either by
|
||||
themselves or in combination with other pools' actual usage).
|
||||
|
||||
This is usually an indication that the ``target_size_ratio`` value for
|
||||
the pool is too large and should be reduced or set to zero with::
|
||||
|
||||
ceph osd pool set <pool-name> target_size_ratio 0
|
||||
|
||||
For more information, see :ref:`specifying_pool_target_size`.
|
||||
|
||||
POOL_TARGET_SIZE_BYTES_OVERCOMMITTED
|
||||
____________________________________
|
||||
|
||||
@ -759,6 +744,21 @@ the pool is too large and should be reduced or set to zero with::
|
||||
|
||||
For more information, see :ref:`specifying_pool_target_size`.
|
||||
|
||||
POOL_HAS_TARGET_SIZE_BYTES_AND_RATIO
|
||||
____________________________________
|
||||
|
||||
One or more pools have both ``target_size_bytes`` and
|
||||
``target_size_ratio`` set to estimate the expected size of the pool.
|
||||
Only one of these properties should be non-zero. If both are set,
|
||||
``target_size_ratio`` takes precedence and ``target_size_bytes`` is
|
||||
ignored.
|
||||
|
||||
To reset ``target_size_bytes`` to zero::
|
||||
|
||||
ceph osd pool set <pool-name> target_size_bytes 0
|
||||
|
||||
For more information, see :ref:`specifying_pool_target_size`.
|
||||
|
||||
TOO_FEW_OSDS
|
||||
____________
|
||||
|
||||
|
@ -29,7 +29,7 @@ For example to enable autoscaling on pool ``foo``,::
|
||||
You can also configure the default ``pg_autoscale_mode`` that is
|
||||
applied to any pools that are created in the future with::
|
||||
|
||||
ceph config set global osd_pool_default_autoscale_mode <mode>
|
||||
ceph config set global osd_pool_default_pg_autoscale_mode <mode>
|
||||
|
||||
Viewing PG scaling recommendations
|
||||
----------------------------------
|
||||
@ -41,10 +41,10 @@ the PG count with this command::
|
||||
|
||||
Output will be something like::
|
||||
|
||||
POOL SIZE TARGET SIZE RATE RAW CAPACITY RATIO TARGET RATIO PG_NUM NEW PG_NUM AUTOSCALE
|
||||
a 12900M 3.0 82431M 0.4695 8 128 warn
|
||||
c 0 3.0 82431M 0.0000 0.2000 1 64 warn
|
||||
b 0 953.6M 3.0 82431M 0.0347 8 warn
|
||||
POOL SIZE TARGET SIZE RATE RAW CAPACITY RATIO TARGET RATIO EFFECTIVE RATIO PG_NUM NEW PG_NUM AUTOSCALE
|
||||
a 12900M 3.0 82431M 0.4695 8 128 warn
|
||||
c 0 3.0 82431M 0.0000 0.2000 0.9884 1 64 warn
|
||||
b 0 953.6M 3.0 82431M 0.0347 8 warn
|
||||
|
||||
**SIZE** is the amount of data stored in the pool. **TARGET SIZE**, if
|
||||
present, is the amount of data the administrator has specified that
|
||||
@ -62,11 +62,21 @@ pools') data. **RATIO** is the ratio of that total capacity that
|
||||
this pool is consuming (i.e., ratio = size * rate / raw capacity).
|
||||
|
||||
**TARGET RATIO**, if present, is the ratio of storage that the
|
||||
administrator has specified that they expect this pool to consume.
|
||||
The system uses the larger of the actual ratio and the target ratio
|
||||
for its calculation. If both target size bytes and ratio are specified, the
|
||||
administrator has specified that they expect this pool to consume
|
||||
relative to other pools with target ratios set.
|
||||
If both target size bytes and ratio are specified, the
|
||||
ratio takes precedence.
|
||||
|
||||
**EFFECTIVE RATIO** is the target ratio after adjusting in two ways:
|
||||
|
||||
1. subtracting any capacity expected to be used by pools with target size set
|
||||
2. normalizing the target ratios among pools with target ratio set so
|
||||
they collectively target the rest of the space. For example, 4
|
||||
pools with target_ratio 1.0 would have an effective ratio of 0.25.
|
||||
|
||||
The system uses the larger of the actual ratio and the effective ratio
|
||||
for its calculation.
|
||||
|
||||
**PG_NUM** is the current number of PGs for the pool (or the current
|
||||
number of PGs that the pool is working towards, if a ``pg_num``
|
||||
change is in progress). **NEW PG_NUM**, if present, is what the
|
||||
@ -119,9 +129,9 @@ PGs can be used from the beginning, preventing subsequent changes in
|
||||
``pg_num`` and the overhead associated with moving data around when
|
||||
those adjustments are made.
|
||||
|
||||
The *target size** of a pool can be specified in two ways: either in
|
||||
terms of the absolute size of the pool (i.e., bytes), or as a ratio of
|
||||
the total cluster capacity.
|
||||
The *target size* of a pool can be specified in two ways: either in
|
||||
terms of the absolute size of the pool (i.e., bytes), or as a weight
|
||||
relative to other pools with a ``target_size_ratio`` set.
|
||||
|
||||
For example,::
|
||||
|
||||
@ -130,18 +140,23 @@ For example,::
|
||||
will tell the system that `mypool` is expected to consume 100 TiB of
|
||||
space. Alternatively,::
|
||||
|
||||
ceph osd pool set mypool target_size_ratio .9
|
||||
ceph osd pool set mypool target_size_ratio 1.0
|
||||
|
||||
will tell the system that `mypool` is expected to consume 90% of the
|
||||
total cluster capacity.
|
||||
will tell the system that `mypool` is expected to consume 1.0 relative
|
||||
to the other pools with ``target_size_ratio`` set. If `mypool` is the
|
||||
only pool in the cluster, this means an expected use of 100% of the
|
||||
total capacity. If there is a second pool with ``target_size_ratio``
|
||||
1.0, both pools would expect to use 50% of the cluster capacity.
|
||||
|
||||
You can also set the target size of a pool at creation time with the optional ``--target-size-bytes <bytes>`` or ``--target-size-ratio <ratio>`` arguments to the ``ceph osd pool create`` command.
|
||||
|
||||
Note that if impossible target size values are specified (for example,
|
||||
a capacity larger than the total cluster, or ratio(s) that sum to more
|
||||
than 1.0) then a health warning
|
||||
(``POOL_TARET_SIZE_RATIO_OVERCOMMITTED`` or
|
||||
``POOL_TARGET_SIZE_BYTES_OVERCOMMITTED``) will be raised.
|
||||
a capacity larger than the total cluster) then a health warning
|
||||
(``POOL_TARGET_SIZE_BYTES_OVERCOMMITTED``) will be raised.
|
||||
|
||||
If both ``target_size_ratio`` and ``target_size_bytes`` are specified
|
||||
for a pool, only the ratio will be considered, and a health warning
|
||||
(``POOL_HAS_TARGET_SIZE_BYTES_AND_RATIO``) will be issued.
|
||||
|
||||
Specifying bounds on a pool's PGs
|
||||
---------------------------------
|
||||
|
@ -21,7 +21,7 @@ For example, one may use s3cmd to set or delete a policy thus::
|
||||
"Version": "2012-10-17",
|
||||
"Statement": [{
|
||||
"Effect": "Allow",
|
||||
"Principal": {"AWS": ["arn:aws:iam::usfolks:user/fred"]},
|
||||
"Principal": {"AWS": ["arn:aws:iam::usfolks:user/fred:subuser"]},
|
||||
"Action": "s3:PutObjectAcl",
|
||||
"Resource": [
|
||||
"arn:aws:s3:::happybucket/*"
|
||||
|
@ -73,6 +73,15 @@ Options
|
||||
:Type: Integer (0 or 1)
|
||||
:Default: 0
|
||||
|
||||
``max_connection_backlog``
|
||||
|
||||
:Description: Optional value to define the maximum size for the queue of
|
||||
connections waiting to be accepted. If not configured, the value
|
||||
from ``boost::asio::socket_base::max_connections`` will be used.
|
||||
|
||||
:Type: Integer
|
||||
:Default: None
|
||||
|
||||
|
||||
Civetweb
|
||||
========
|
||||
|
@ -48,6 +48,13 @@ For a v3 version of the OpenStack Identity API you should replace
|
||||
rgw keystone admin domain = {keystone admin domain name}
|
||||
rgw keystone admin project = {keystone admin project name}
|
||||
|
||||
For compatibility with previous versions of ceph, it is also
|
||||
possible to set ``rgw keystone implicit tenants`` to either
|
||||
``s3`` or ``swift``. This has the effect of splitting
|
||||
the identity space such that the indicated protocol will
|
||||
only use implicit tenants, and the other protocol will
|
||||
never use implicit tenants. Some older versions of ceph
|
||||
only supported implicit tenants with swift.
|
||||
|
||||
Prior to Kilo
|
||||
-------------
|
||||
|
@ -154,6 +154,13 @@ are two or more different tenants all creating a container named
|
||||
``foo``, radosgw is able to transparently discern them by their tenant
|
||||
prefix.
|
||||
|
||||
It is also possible to limit the effects of implicit tenants
|
||||
to only apply to swift or s3, by setting ``rgw keystone implicit tenants``
|
||||
to either ``s3`` or ``swift``. This will likely primarily
|
||||
be of use to users who had previously used implicit tenants
|
||||
with older versions of ceph, where implicit tenants
|
||||
only applied to the swift protocol.
|
||||
|
||||
Notes and known issues
|
||||
----------------------
|
||||
|
||||
|
@ -7,7 +7,7 @@ Bucket Notifications
|
||||
.. contents::
|
||||
|
||||
Bucket notifications provide a mechanism for sending information out of the radosgw when certain events are happening on the bucket.
|
||||
Currently, notifications could be sent to HTTP and AMQP0.9.1 endpoints.
|
||||
Currently, notifications could be sent to: HTTP, AMQP0.9.1 and Kafka endpoints.
|
||||
|
||||
Note, that if the events should be stored in Ceph, in addition, or instead of being pushed to an endpoint,
|
||||
the `PubSub Module`_ should be used instead of the bucket notification mechanism.
|
||||
@ -17,8 +17,8 @@ user can only manage its own topics, and can only associate them with buckets it
|
||||
|
||||
In order to send notifications for events for a specific bucket, a notification entity needs to be created. A
|
||||
notification can be created on a subset of event types, or for all event types (default).
|
||||
The notification may also filter out events based on preffix/suffix and/or regular expression matching of the keys. As well as,
|
||||
on the metadata attributes attached to the object.
|
||||
The notification may also filter out events based on prefix/suffix and/or regular expression matching of the keys. As well as,
|
||||
on the metadata attributes attached to the object, or the object tags.
|
||||
There can be multiple notifications for any specific topic, and the same topic could be used for multiple notifications.
|
||||
|
||||
REST API has been defined to provide configuration and control interfaces for the bucket notification
|
||||
@ -67,24 +67,49 @@ To update a topic, use the same command used for topic creation, with the topic
|
||||
&Name=<topic-name>
|
||||
&push-endpoint=<endpoint>
|
||||
[&Attributes.entry.1.key=amqp-exchange&Attributes.entry.1.value=<exchange>]
|
||||
[&Attributes.entry.2.key=amqp-sck-level&Attributes.entry.2.value=ack-level]
|
||||
&Attributes.entry.3.key=verify-sll&Attributes.entry.3.value=true|false]
|
||||
[&Attributes.entry.2.key=amqp-ack-level&Attributes.entry.2.value=none|broker]
|
||||
[&Attributes.entry.3.key=verify-ssl&Attributes.entry.3.value=true|false]
|
||||
[&Attributes.entry.4.key=kafka-ack-level&Attributes.entry.4.value=none|broker]
|
||||
[&Attributes.entry.5.key=use-ssl&Attributes.entry.5.value=true|false]
|
||||
[&Attributes.entry.6.key=ca-location&Attributes.entry.6.value=<file path>]
|
||||
[&Attributes.entry.7.key=OpaqueData&Attributes.entry.7.value=<opaque data>]
|
||||
|
||||
Request parameters:
|
||||
|
||||
- push-endpoint: URI of endpoint to send push notification to
|
||||
- push-endpoint: URI of an endpoint to send push notification to
|
||||
- OpaqueData: opaque data is set in the topic configuration and added to all notifications triggered by the ropic
|
||||
|
||||
- URI schema is: ``http[s]|amqp://[<user>:<password>@]<fqdn>[:<port>][/<amqp-vhost>]``
|
||||
- Same schema is used for HTTP and AMQP endpoints (except amqp-vhost which is specific to AMQP)
|
||||
- Default values for HTTP/S: no user/password, port 80/443
|
||||
- Default values for AMQP: user/password=guest/guest, port 5672, amqp-vhost is "/"
|
||||
- HTTP endpoint
|
||||
|
||||
- verify-ssl: can be used with https endpoints (ignored for other endpoints), indicate whether the server certificate is validated or not ("true" by default)
|
||||
- amqp-exchange: mandatory parameter for AMQP endpoint. The exchanges must exist and be able to route messages based on topics
|
||||
- amqp-ack-level: No end2end acking is required, as messages may persist in the broker before delivered into their final destination. 2 ack methods exist:
|
||||
- URI: ``http[s]://<fqdn>[:<port]``
|
||||
- port defaults to: 80/443 for HTTP/S accordingly
|
||||
- verify-ssl: indicate whether the server certificate is validated by the client or not ("true" by default)
|
||||
|
||||
- "none" - message is considered "delivered" if sent to broker
|
||||
- "broker" message is considered "delivered" if acked by broker
|
||||
- AMQP0.9.1 endpoint
|
||||
|
||||
- URI: ``amqp://[<user>:<password>@]<fqdn>[:<port>][/<vhost>]``
|
||||
- user/password defaults to: guest/guest
|
||||
- user/password may only be provided over HTTPS. Topic creation request will be rejected if not
|
||||
- port defaults to: 5672
|
||||
- vhost defaults to: "/"
|
||||
- amqp-exchange: the exchanges must exist and be able to route messages based on topics (mandatory parameter for AMQP0.9.1)
|
||||
- amqp-ack-level: no end2end acking is required, as messages may persist in the broker before delivered into their final destination. Two ack methods exist:
|
||||
|
||||
- "none": message is considered "delivered" if sent to broker
|
||||
- "broker": message is considered "delivered" if acked by broker (default)
|
||||
|
||||
- Kafka endpoint
|
||||
|
||||
- URI: ``kafka://[<user>:<password>@]<fqdn>[:<port]``
|
||||
- if ``use-ssl`` is set to "true", secure connection will be used for connecting with the broker ("false" by default)
|
||||
- if ``ca-location`` is provided, and secure connection is used, the specified CA will be used, instead of the default one, to authenticate the broker
|
||||
- user/password may only be provided over HTTPS. Topic creation request will be rejected if not
|
||||
- user/password may only be provided together with ``use-ssl``, connection to the broker would fail if not
|
||||
- port defaults to: 9092
|
||||
- kafka-ack-level: no end2end acking is required, as messages may persist in the broker before delivered into their final destination. Two ack methods exist:
|
||||
|
||||
- "none": message is considered "delivered" if sent to broker
|
||||
- "broker": message is considered "delivered" if acked by broker (default)
|
||||
|
||||
.. note::
|
||||
|
||||
@ -136,6 +161,7 @@ Response will have the following format:
|
||||
<EndpointTopic></EndpointTopic>
|
||||
</EndPoint>
|
||||
<TopicArn></TopicArn>
|
||||
<OpaqueData></OpaqueData>
|
||||
</Topic>
|
||||
</GetTopicResult>
|
||||
<ResponseMetadata>
|
||||
@ -146,6 +172,7 @@ Response will have the following format:
|
||||
- User: name of the user that created the topic
|
||||
- Name: name of the topic
|
||||
- EndPoinjtAddress: the push-endpoint URL
|
||||
- if endpoint URL contain user/password information, request must be made over HTTPS. Topic get request will be rejected if not
|
||||
- EndPointArgs: the push-endpoint args
|
||||
- EndpointTopic: the topic name that should be sent to the endpoint (mat be different than the above topic name)
|
||||
- TopicArn: topic ARN
|
||||
@ -196,6 +223,7 @@ Response will have the following format:
|
||||
<EndpointTopic></EndpointTopic>
|
||||
</EndPoint>
|
||||
<TopicArn></TopicArn>
|
||||
<OpaqueData></OpaqueData>
|
||||
</member>
|
||||
</Topics>
|
||||
</ListTopicsResult>
|
||||
@ -204,6 +232,8 @@ Response will have the following format:
|
||||
</ResponseMetadata>
|
||||
</ListTopicsResponse>
|
||||
|
||||
- if endpoint URL contain user/password information, in any of the topic, request must be made over HTTPS. Topic list request will be rejected if not
|
||||
|
||||
Notifications
|
||||
~~~~~~~~~~~~~
|
||||
|
||||
@ -258,10 +288,12 @@ pushed or pulled using the pubsub sync module.
|
||||
"eTag":"",
|
||||
"versionId":"",
|
||||
"sequencer": "",
|
||||
"metadata":[]
|
||||
"metadata":[],
|
||||
"tags":[]
|
||||
}
|
||||
},
|
||||
"eventId":"",
|
||||
"opaqueData":"",
|
||||
}
|
||||
]}
|
||||
|
||||
@ -283,7 +315,9 @@ pushed or pulled using the pubsub sync module.
|
||||
- s3.object.version: object version in case of versioned bucket
|
||||
- s3.object.sequencer: monotonically increasing identifier of the change per object (hexadecimal format)
|
||||
- s3.object.metadata: any metadata set on the object sent as: ``x-amz-meta-`` (an extension to the S3 notification API)
|
||||
- s3.object.tags: any tags set on the objcet (an extension to the S3 notification API)
|
||||
- s3.eventId: unique ID of the event, that could be used for acking (an extension to the S3 notification API)
|
||||
- s3.opaqueData: opaque data is set in the topic configuration and added to all notifications triggered by the ropic (an extension to the S3 notification API)
|
||||
|
||||
.. _PubSub Module : ../pubsub-module
|
||||
.. _S3 Notification Compatibility: ../s3-notification-compatibility
|
||||
|
@ -11,8 +11,8 @@ events. Events are published into predefined topics. Topics can be subscribed to
|
||||
can be pulled from them. Events need to be acked. Also, events will expire and disappear
|
||||
after a period of time.
|
||||
|
||||
A push notification mechanism exists too, currently supporting HTTP and
|
||||
AMQP0.9.1 endpoints, on top of storing the events in Ceph. If events should only be pushed to an endpoint
|
||||
A push notification mechanism exists too, currently supporting HTTP,
|
||||
AMQP0.9.1 and Kafka endpoints. In this case, the events are pushed to an endpoint on top of storing them in Ceph. If events should only be pushed to an endpoint
|
||||
and do not need to be stored in Ceph, the `Bucket Notification`_ mechanism should be used instead of pubsub sync module.
|
||||
|
||||
A user can create different topics. A topic entity is defined by its user and its name. A
|
||||
@ -149,23 +149,46 @@ To update a topic, use the same command used for topic creation, with the topic
|
||||
|
||||
::
|
||||
|
||||
PUT /topics/<topic-name>[?push-endpoint=<endpoint>[&amqp-exchange=<exchange>][&amqp-ack-level=<level>][&verify-ssl=true|false]]
|
||||
PUT /topics/<topic-name>[?OpaqueData=<opaque data>][&push-endpoint=<endpoint>[&amqp-exchange=<exchange>][&amqp-ack-level=none|broker][&verify-ssl=true|false][&kafka-ack-level=none|broker][&use-ssl=true|false][&ca-location=<file path>]]
|
||||
|
||||
Request parameters:
|
||||
|
||||
- push-endpoint: URI of endpoint to send push notification to
|
||||
- push-endpoint: URI of an endpoint to send push notification to
|
||||
- OpaqueData: opaque data is set in the topic configuration and added to all notifications triggered by the ropic
|
||||
|
||||
- URI schema is: ``http[s]|amqp://[<user>:<password>@]<fqdn>[:<port>][/<amqp-vhost>]``
|
||||
- Same schema is used for HTTP and AMQP endpoints (except amqp-vhost which is specific to AMQP)
|
||||
- Default values for HTTP/S: no user/password, port 80/443
|
||||
- Default values for AMQP: user/password=guest/guest, port 5672, amqp-vhost is "/"
|
||||
The endpoint URI may include parameters depending with the type of endpoint:
|
||||
|
||||
- verify-ssl: can be used with https endpoints (ignored for other endpoints), indicate whether the server certificate is validated or not ("true" by default)
|
||||
- amqp-exchange: mandatory parameter for AMQP endpoint. The exchanges must exist and be able to route messages based on topics
|
||||
- amqp-ack-level: No end2end acking is required, as messages may persist in the broker before delivered into their final destination. 2 ack methods exist:
|
||||
- HTTP endpoint
|
||||
|
||||
- "none" - message is considered "delivered" if sent to broker
|
||||
- "broker" message is considered "delivered" if acked by broker
|
||||
- URI: ``http[s]://<fqdn>[:<port]``
|
||||
- port defaults to: 80/443 for HTTP/S accordingly
|
||||
- verify-ssl: indicate whether the server certificate is validated by the client or not ("true" by default)
|
||||
|
||||
- AMQP0.9.1 endpoint
|
||||
|
||||
- URI: ``amqp://[<user>:<password>@]<fqdn>[:<port>][/<vhost>]``
|
||||
- user/password defaults to: guest/guest
|
||||
- user/password may only be provided over HTTPS. Topic creation request will be rejected if not
|
||||
- port defaults to: 5672
|
||||
- vhost defaults to: "/"
|
||||
- amqp-exchange: the exchanges must exist and be able to route messages based on topics (mandatory parameter for AMQP0.9.1)
|
||||
- amqp-ack-level: no end2end acking is required, as messages may persist in the broker before delivered into their final destination. Two ack methods exist:
|
||||
|
||||
- "none": message is considered "delivered" if sent to broker
|
||||
- "broker": message is considered "delivered" if acked by broker (default)
|
||||
|
||||
- Kafka endpoint
|
||||
|
||||
- URI: ``kafka://[<user>:<password>@]<fqdn>[:<port]``
|
||||
- if ``use-ssl`` is set to "true", secure connection will be used for connecting with the broker ("false" by default)
|
||||
- if ``ca-location`` is provided, and secure connection is used, the specified CA will be used, instead of the default one, to authenticate the broker
|
||||
- user/password may only be provided over HTTPS. Topic creation request will be rejected if not
|
||||
- user/password may only be provided together with ``use-ssl``, connection to the broker would fail if not
|
||||
- port defaults to: 9092
|
||||
- kafka-ack-level: no end2end acking is required, as messages may persist in the broker before delivered into their final destination. Two ack methods exist:
|
||||
|
||||
- "none": message is considered "delivered" if sent to broker
|
||||
- "broker": message is considered "delivered" if acked by broker (default)
|
||||
|
||||
The topic ARN in the response will have the following format:
|
||||
|
||||
@ -194,9 +217,11 @@ Response will have the following format (JSON):
|
||||
"bucket_name":"",
|
||||
"oid_prefix":"",
|
||||
"push_endpoint":"",
|
||||
"push_endpoint_args":""
|
||||
"push_endpoint_args":"",
|
||||
"push_endpoint_topic":""
|
||||
},
|
||||
"arn":""
|
||||
"opaqueData":""
|
||||
},
|
||||
"subs":[]
|
||||
}
|
||||
@ -206,7 +231,9 @@ Response will have the following format (JSON):
|
||||
- dest.bucket_name: not used
|
||||
- dest.oid_prefix: not used
|
||||
- dest.push_endpoint: in case of S3-compliant notifications, this value will be used as the push-endpoint URL
|
||||
- if push-endpoint URL contain user/password information, request must be made over HTTPS. Topic get request will be rejected if not
|
||||
- dest.push_endpoint_args: in case of S3-compliant notifications, this value will be used as the push-endpoint args
|
||||
- dest.push_endpoint_topic: in case of S3-compliant notifications, this value will hold the topic name as sent to the endpoint (may be different than the internal topic name)
|
||||
- topic.arn: topic ARN
|
||||
- subs: list of subscriptions associated with this topic
|
||||
|
||||
@ -228,6 +255,8 @@ List all topics that user defined.
|
||||
|
||||
GET /topics
|
||||
|
||||
- if push-endpoint URL contain user/password information, in any of the topic, request must be made over HTTPS. Topic list request will be rejected if not
|
||||
|
||||
S3-Compliant Notifications
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
@ -242,6 +271,7 @@ Detailed under: `Bucket Operations`_.
|
||||
the associated subscription will not be deleted automatically (any events of the deleted bucket could still be access),
|
||||
and will have to be deleted explicitly with the subscription deletion API
|
||||
- Filtering based on metadata (which is an extension to S3) is not supported, and such rules will be ignored
|
||||
- Filtering based on tags (which is an extension to S3) is not supported, and such rules will be ignored
|
||||
|
||||
|
||||
Non S3-Compliant Notifications
|
||||
@ -298,7 +328,8 @@ Response will have the following format (JSON):
|
||||
"bucket_name":"",
|
||||
"oid_prefix":"",
|
||||
"push_endpoint":"",
|
||||
"push_endpoint_args":""
|
||||
"push_endpoint_args":"",
|
||||
"push_endpoint_topic":""
|
||||
}
|
||||
"arn":""
|
||||
},
|
||||
@ -316,24 +347,45 @@ Creates a new subscription.
|
||||
|
||||
::
|
||||
|
||||
PUT /subscriptions/<sub-name>?topic=<topic-name>[&push-endpoint=<endpoint>[&amqp-exchange=<exchange>][&amqp-ack-level=<level>][&verify-ssl=true|false]]
|
||||
PUT /subscriptions/<sub-name>?topic=<topic-name>[?push-endpoint=<endpoint>[&amqp-exchange=<exchange>][&amqp-ack-level=none|broker][&verify-ssl=true|false][&kafka-ack-level=none|broker][&ca-location=<file path>]]
|
||||
|
||||
Request parameters:
|
||||
|
||||
- topic-name: name of topic
|
||||
- push-endpoint: URI of endpoint to send push notification to
|
||||
|
||||
- URI schema is: ``http[s]|amqp://[<user>:<password>@]<fqdn>[:<port>][/<amqp-vhost>]``
|
||||
- Same schema is used for HTTP and AMQP endpoints (except amqp-vhost which is specific to AMQP)
|
||||
- Default values for HTTP/S: no user/password, port 80/443
|
||||
- Default values for AMQP: user/password=guest/guest, port 5672, amqp-vhost is "/"
|
||||
The endpoint URI may include parameters depending with the type of endpoint:
|
||||
|
||||
- verify-ssl: can be used with https endpoints (ignored for other endpoints), indicate whether the server certificate is validated or not ("true" by default)
|
||||
- amqp-exchange: mandatory parameter for AMQP endpoint. The exchanges must exist and be able to route messages based on topics
|
||||
- amqp-ack-level: No end2end acking is required, as messages may persist in the broker before delivered into their final destination. 2 ack methods exist:
|
||||
- HTTP endpoint
|
||||
|
||||
- URI: ``http[s]://<fqdn>[:<port]``
|
||||
- port defaults to: 80/443 for HTTP/S accordingly
|
||||
- verify-ssl: indicate whether the server certificate is validated by the client or not ("true" by default)
|
||||
|
||||
- AMQP0.9.1 endpoint
|
||||
|
||||
- URI: ``amqp://[<user>:<password>@]<fqdn>[:<port>][/<vhost>]``
|
||||
- user/password defaults to : guest/guest
|
||||
- port defaults to: 5672
|
||||
- vhost defaults to: "/"
|
||||
- amqp-exchange: the exchanges must exist and be able to route messages based on topics (mandatory parameter for AMQP0.9.1)
|
||||
- amqp-ack-level: no end2end acking is required, as messages may persist in the broker before delivered into their final destination. Two ack methods exist:
|
||||
|
||||
- "none": message is considered "delivered" if sent to broker
|
||||
- "broker": message is considered "delivered" if acked by broker (default)
|
||||
|
||||
- Kafka endpoint
|
||||
|
||||
- URI: ``kafka://[<user>:<password>@]<fqdn>[:<port]``
|
||||
- if ``ca-location`` is provided, secure connection will be used for connection with the broker
|
||||
- user/password may only be provided over HTTPS. Topic creation request will be rejected if not
|
||||
- user/password may only be provided together with ``ca-location``. Topic creation request will be rejected if not
|
||||
- port defaults to: 9092
|
||||
- kafka-ack-level: no end2end acking is required, as messages may persist in the broker before delivered into their final destination. Two ack methods exist:
|
||||
|
||||
- "none": message is considered "delivered" if sent to broker
|
||||
- "broker": message is considered "delivered" if acked by broker (default)
|
||||
|
||||
- "none": message is considered "delivered" if sent to broker
|
||||
- "broker": message is considered "delivered" if acked by broker
|
||||
|
||||
Get Subscription Information
|
||||
````````````````````````````
|
||||
@ -356,7 +408,8 @@ Response will have the following format (JSON):
|
||||
"bucket_name":"",
|
||||
"oid_prefix":"",
|
||||
"push_endpoint":"",
|
||||
"push_endpoint_args":""
|
||||
"push_endpoint_args":"",
|
||||
"push_endpoint_topic":""
|
||||
}
|
||||
"s3_id":""
|
||||
}
|
||||
@ -364,6 +417,13 @@ Response will have the following format (JSON):
|
||||
- user: name of the user that created the subscription
|
||||
- name: name of the subscription
|
||||
- topic: name of the topic the subscription is associated with
|
||||
- dest.bucket_name: name of the bucket storing the events
|
||||
- dest.oid_prefix: oid prefix for the events stored in the bucket
|
||||
- dest.push_endpoint: in case of S3-compliant notifications, this value will be used as the push-endpoint URL
|
||||
- if push-endpoint URL contain user/password information, request must be made over HTTPS. Topic get request will be rejected if not
|
||||
- dest.push_endpoint_args: in case of S3-compliant notifications, this value will be used as the push-endpoint args
|
||||
- dest.push_endpoint_topic: in case of S3-compliant notifications, this value will hold the topic name as sent to the endpoint (may be different than the internal topic name)
|
||||
- s3_id: in case of S3-compliant notifications, this will hold the notification name that created the subscription
|
||||
|
||||
Delete Subscription
|
||||
```````````````````
|
||||
@ -438,10 +498,12 @@ the events will have an S3-compatible record format (JSON):
|
||||
"eTag":"",
|
||||
"versionId":"",
|
||||
"sequencer":"",
|
||||
"metadata":[]
|
||||
"metadata":[],
|
||||
"tags":[]
|
||||
}
|
||||
},
|
||||
"eventId":"",
|
||||
"opaqueData":"",
|
||||
}
|
||||
]}
|
||||
|
||||
@ -462,7 +524,9 @@ the events will have an S3-compatible record format (JSON):
|
||||
- s3.object.version: object version in case of versioned bucket
|
||||
- s3.object.sequencer: monotonically increasing identifier of the change per object (hexadecimal format)
|
||||
- s3.object.metadata: not supported (an extension to the S3 notification API)
|
||||
- s3.object.tags: not supported (an extension to the S3 notification API)
|
||||
- s3.eventId: unique ID of the event, that could be used for acking (an extension to the S3 notification API)
|
||||
- s3.opaqueData: opaque data is set in the topic configuration and added to all notifications triggered by the ropic (an extension to the S3 notification API)
|
||||
|
||||
In case that the subscription was not created via a non S3-compatible notification,
|
||||
the events will have the following event format (JSON):
|
||||
|
@ -49,6 +49,8 @@ Ceph's bucket notification API has the following extensions:
|
||||
|
||||
- Filtering based on metadata attributes attached to the object
|
||||
|
||||
- Filtering based on object tags
|
||||
|
||||
- Filtering overlapping is allowed, so that same event could be sent as different notification
|
||||
|
||||
|
||||
@ -110,10 +112,14 @@ Note that most of the API is not applicable to Ceph, and only the following acti
|
||||
- ``DeleteTopic``
|
||||
- ``ListTopics``
|
||||
|
||||
We also extend it by:
|
||||
We also have the following extensions to topic configuration:
|
||||
|
||||
- In ``GetTopic`` we allow fetching a specific topic, instead of all user topics
|
||||
- In ``CreateTopic``
|
||||
|
||||
- we allow setting endpoint attributes
|
||||
- we allow setting opaque data thta will be sent to the endpoint in the notification
|
||||
|
||||
- ``GetTopic`` - allowing for fetching a specific topic, instead of all user topics
|
||||
- In ``CreateTopic`` we allow setting endpoint attributes
|
||||
|
||||
.. _AWS Simple Notification Service API: https://docs.aws.amazon.com/sns/latest/api/API_Operations.html
|
||||
.. _AWS S3 Bucket Notifications API: https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html
|
||||
|
@ -509,7 +509,13 @@ Parameters are XML encoded in the body of the request, in the following format:
|
||||
<Name></Name>
|
||||
<Value></Value>
|
||||
</FilterRule>
|
||||
</s3Metadata>
|
||||
</S3Metadata>
|
||||
<S3Tags>
|
||||
<FilterRule>
|
||||
<Name></Name>
|
||||
<Value></Value>
|
||||
</FilterRule>
|
||||
</S3Tags>
|
||||
</Filter>
|
||||
</TopicConfiguration>
|
||||
</NotificationConfiguration>
|
||||
@ -528,15 +534,19 @@ Parameters are XML encoded in the body of the request, in the following format:
|
||||
| ``Event`` | String | List of supported events see: `S3 Notification Compatibility`_. Multiple ``Event`` | No |
|
||||
| | | entities can be used. If omitted, all events are handled | |
|
||||
+-------------------------------+-----------+--------------------------------------------------------------------------------------+----------+
|
||||
| ``Filter`` | Container | Holding ``S3Key`` and ``S3Metadata`` entities | No |
|
||||
| ``Filter`` | Container | Holding ``S3Key``, ``S3Metadata`` and ``S3Tags`` entities | No |
|
||||
+-------------------------------+-----------+--------------------------------------------------------------------------------------+----------+
|
||||
| ``S3Key`` | Container | Holding a list of ``FilterRule`` entities, for filtering based on object key. | No |
|
||||
| | | At most, 3 entities may be in the list, with ``Name`` be ``prefix``, ``suffix`` or | |
|
||||
| | | ``regex``. All filter rules in the list must match for the filter to match. | |
|
||||
+-------------------------------+-----------+--------------------------------------------------------------------------------------+----------+
|
||||
| ``S3Metadata`` | Container | Holding a list of ``FilterRule`` entities, for filtering based on object metadata. | No |
|
||||
| | | All filter rules in the list must match the ones defined on the object. The object, | |
|
||||
| | | have other metadata entitied not listed in the filter. | |
|
||||
| | | All filter rules in the list must match the metadata defined on the object. However, | |
|
||||
| | | the object still match if it has other metadata entries not listed in the filter. | |
|
||||
+-------------------------------+-----------+--------------------------------------------------------------------------------------+----------+
|
||||
| ``S3Tags`` | Container | Holding a list of ``FilterRule`` entities, for filtering based on object tags. | No |
|
||||
| | | All filter rules in the list must match the tags defined on the object. However, | |
|
||||
| | | the object still match it it has other tags not listed in the filter. | |
|
||||
+-------------------------------+-----------+--------------------------------------------------------------------------------------+----------+
|
||||
| ``S3Key.FilterRule`` | Container | Holding ``Name`` and ``Value`` entities. ``Name`` would be: ``prefix``, ``suffix`` | Yes |
|
||||
| | | or ``regex``. The ``Value`` would hold the key prefix, key suffix or a regular | |
|
||||
@ -544,7 +554,10 @@ Parameters are XML encoded in the body of the request, in the following format:
|
||||
+-------------------------------+-----------+--------------------------------------------------------------------------------------+----------+
|
||||
| ``S3Metadata.FilterRule`` | Container | Holding ``Name`` and ``Value`` entities. ``Name`` would be the name of the metadata | Yes |
|
||||
| | | attribute (e.g. ``x-amz-meta-xxx``). The ``Value`` would be the expected value for | |
|
||||
| | | this attribute | |
|
||||
| | | this attribute. | |
|
||||
+-------------------------------+-----------+--------------------------------------------------------------------------------------+----------+
|
||||
| ``S3Tags.FilterRule`` | Container | Holding ``Name`` and ``Value`` entities. ``Name`` would be the tag key, | Yes |
|
||||
| | | and ``Value`` would be the tag value. | |
|
||||
+-------------------------------+-----------+--------------------------------------------------------------------------------------+----------+
|
||||
|
||||
|
||||
@ -647,7 +660,13 @@ Response is XML encoded in the body of the request, in the following format:
|
||||
<Name></Name>
|
||||
<Value></Value>
|
||||
</FilterRule>
|
||||
</s3Metadata>
|
||||
</S3Metadata>
|
||||
<S3Tags>
|
||||
<FilterRule>
|
||||
<Name></Name>
|
||||
<Value></Value>
|
||||
</FilterRule>
|
||||
</S3Tags>
|
||||
</Filter>
|
||||
</TopicConfiguration>
|
||||
</NotificationConfiguration>
|
||||
@ -679,4 +698,4 @@ HTTP Response
|
||||
| ``404`` | NoSuchKey | The notification does not exist (if provided) |
|
||||
+---------------+-----------------------+----------------------------------------------------------+
|
||||
|
||||
.. _S3 Notification Compatibility: ../s3-notification-compatibility
|
||||
.. _S3 Notification Compatibility: ../../s3-notification-compatibility
|
||||
|
@ -360,7 +360,10 @@ Instructions
|
||||
and verify that each monitor has both a ``v2:`` and ``v1:`` address
|
||||
listed.
|
||||
|
||||
#. For each host that has been upgrade, you should update your
|
||||
Running nautilus OSDs will not bind to their v2 address automatically.
|
||||
They must be restarted for that to happen.
|
||||
|
||||
#. For each host that has been upgraded, you should update your
|
||||
``ceph.conf`` file so that it references both the v2 and v1
|
||||
addresses. Things will still work if only the v1 IP and port are
|
||||
listed, but each CLI instantiation or daemon will need to reconnect
|
||||
|
@ -319,11 +319,11 @@ else
|
||||
$SUDO apt-get install -y libxmlsec1 libxmlsec1-nss libxmlsec1-openssl libxmlsec1-dev
|
||||
;;
|
||||
centos|fedora|rhel|ol|virtuozzo)
|
||||
yumdnf="yum"
|
||||
builddepcmd="yum-builddep -y --setopt=*.skip_if_unavailable=true"
|
||||
if test "$(echo "$VERSION_ID >= 22" | bc)" -ne 0; then
|
||||
yumdnf="dnf"
|
||||
builddepcmd="dnf -y builddep --allowerasing"
|
||||
yumdnf="dnf"
|
||||
builddepcmd="dnf -y builddep --allowerasing"
|
||||
if [[ $ID =~ centos|rhel ]] && version_lt $VERSION_ID 8; then
|
||||
yumdnf="yum"
|
||||
builddepcmd="yum-builddep -y --setopt=*.skip_if_unavailable=true"
|
||||
fi
|
||||
echo "Using $yumdnf to install dependencies"
|
||||
if [ "$ID" = "centos" -a "$ARCH" = "aarch64" ]; then
|
||||
@ -333,29 +333,27 @@ else
|
||||
fi
|
||||
case "$ID" in
|
||||
fedora)
|
||||
if test $yumdnf = yum; then
|
||||
$SUDO $yumdnf install -y yum-utils
|
||||
fi
|
||||
$SUDO $yumdnf install -y $yumdnf-utils
|
||||
;;
|
||||
centos|rhel|ol|virtuozzo)
|
||||
MAJOR_VERSION="$(echo $VERSION_ID | cut -d. -f1)"
|
||||
$SUDO yum install -y yum-utils
|
||||
$SUDO $yumdnf install -y $yumdnf-utils
|
||||
if test $ID = rhel ; then
|
||||
$SUDO yum-config-manager --enable rhel-$MAJOR_VERSION-server-optional-rpms
|
||||
fi
|
||||
rpm --quiet --query epel-release || \
|
||||
$SUDO yum -y install --nogpgcheck https://dl.fedoraproject.org/pub/epel/epel-release-latest-$MAJOR_VERSION.noarch.rpm
|
||||
$SUDO $yumdnf -y install --nogpgcheck https://dl.fedoraproject.org/pub/epel/epel-release-latest-$MAJOR_VERSION.noarch.rpm
|
||||
$SUDO rpm --import /etc/pki/rpm-gpg/RPM-GPG-KEY-EPEL-$MAJOR_VERSION
|
||||
$SUDO rm -f /etc/yum.repos.d/dl.fedoraproject.org*
|
||||
if test $ID = centos -a $MAJOR_VERSION = 7 ; then
|
||||
$SUDO $yumdnf install -y python36-devel
|
||||
case "$ARCH" in
|
||||
x86_64)
|
||||
$SUDO yum -y install centos-release-scl
|
||||
$SUDO $yumdnf -y install centos-release-scl
|
||||
dts_ver=8
|
||||
;;
|
||||
aarch64)
|
||||
$SUDO yum -y install centos-release-scl-rh
|
||||
$SUDO $yumdnf -y install centos-release-scl-rh
|
||||
$SUDO yum-config-manager --disable centos-sclo-rh
|
||||
$SUDO yum-config-manager --enable centos-sclo-rh-testing
|
||||
dts_ver=8
|
||||
@ -366,6 +364,10 @@ else
|
||||
--enable rhel-server-rhscl-7-rpms \
|
||||
--enable rhel-7-server-devtools-rpms
|
||||
dts_ver=8
|
||||
elif test $ID = centos -a $MAJOR_VERSION = 8 ; then
|
||||
$SUDO dnf config-manager --set-enabled PowerTools
|
||||
elif test $ID = rhel -a $MAJOR_VERSION = 8 ; then
|
||||
$SUDO subscription-manager repos --enable "codeready-builder-for-rhel-8-*-rpms"
|
||||
fi
|
||||
;;
|
||||
esac
|
||||
|
@ -260,6 +260,7 @@
|
||||
"#d44a3a"
|
||||
],
|
||||
"datasource": "$datasource",
|
||||
"decimals": 2,
|
||||
"format": "percentunit",
|
||||
"gauge": {
|
||||
"maxValue": 100,
|
||||
|
@ -798,7 +798,7 @@
|
||||
"steppedLine": false,
|
||||
"targets": [
|
||||
{
|
||||
"expr": "(irate(node_disk_writes_completed{instance=~\"($ceph_hosts).*\"}[5m]) or irate(node_disk_writes_completed_total{instance=~\"($ceph_hosts).*\"}[5m])) * on(instance, device) group_left(ceph_daemon) label_replace(label_replace(ceph_disk_occupation, \"device\", \"$1\", \"device\", \"/dev/(.*)\"), \"instance\", \"$1\", \"instance\", \"(.*)\")",
|
||||
"expr": "(irate(node_disk_writes_completed{instance=~\"($ceph_hosts).*\"}[5m]) or irate(node_disk_writes_completed_total{instance=~\"($ceph_hosts).*\"}[5m])) * on(instance, device) group_left(ceph_daemon) label_replace(label_replace(ceph_disk_occupation, \"device\", \"$1\", \"device\", \"/dev/(.*)\"), \"instance\", \"$1\", \"instance\", \"([^:.]*).*\")",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "{{device}}({{ceph_daemon}}) writes",
|
||||
@ -807,7 +807,7 @@
|
||||
"textEditor": true
|
||||
},
|
||||
{
|
||||
"expr": "(irate(node_disk_reads_completed{instance=~\"($ceph_hosts).*\"}[5m]) or irate(node_disk_reads_completed_total{instance=~\"($ceph_hosts).*\"}[5m])) * on(instance, device) group_left(ceph_daemon) label_replace(label_replace(ceph_disk_occupation, \"device\", \"$1\", \"device\", \"/dev/(.*)\"), \"instance\", \"$1\", \"instance\", \"(.*)\")",
|
||||
"expr": "(irate(node_disk_reads_completed{instance=~\"($ceph_hosts).*\"}[5m]) or irate(node_disk_reads_completed_total{instance=~\"($ceph_hosts).*\"}[5m])) * on(instance, device) group_left(ceph_daemon) label_replace(label_replace(ceph_disk_occupation, \"device\", \"$1\", \"device\", \"/dev/(.*)\"), \"instance\", \"$1\", \"instance\", \"([^:.]*).*\")",
|
||||
"format": "time_series",
|
||||
"hide": false,
|
||||
"intervalFactor": 1,
|
||||
@ -899,14 +899,14 @@
|
||||
"steppedLine": false,
|
||||
"targets": [
|
||||
{
|
||||
"expr": "(irate(node_disk_bytes_written{instance=~\"($ceph_hosts).*\"}[5m]) or irate(node_disk_written_bytes_total{instance=~\"($ceph_hosts).*\"}[5m])) * on(instance, device) group_left(ceph_daemon) label_replace(label_replace(ceph_disk_occupation, \"device\", \"$1\", \"device\", \"/dev/(.*)\"), \"instance\", \"$1\", \"instance\", \"(.*)\")",
|
||||
"expr": "label_replace((irate(node_disk_bytes_written{instance=~\"($ceph_hosts).*\"}[5m]) or irate(node_disk_written_bytes_total{instance=~\"($ceph_hosts).*\"}[5m])), \"instance\", \"$1\", \"instance\", \"([^:.]*).*\") * on(instance, device) group_left(ceph_daemon) label_replace(label_replace(ceph_disk_occupation, \"device\", \"$1\", \"device\", \"/dev/(.*)\"), \"instance\", \"$1\", \"instance\", \"([^:.]*).*\")",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "{{device}}({{ceph_daemon}}) write",
|
||||
"refId": "B"
|
||||
},
|
||||
{
|
||||
"expr": "(irate(node_disk_bytes_read{instance=~\"($ceph_hosts).*\"}[5m]) or irate(node_disk_read_bytes_total{instance=~\"($ceph_hosts).*\"}[5m])) * on(instance, device) group_left(ceph_daemon) label_replace(label_replace(ceph_disk_occupation, \"device\", \"$1\", \"device\", \"/dev/(.*)\"), \"instance\", \"$1\", \"instance\", \"(.*)\")",
|
||||
"expr": "label_replace((irate(node_disk_bytes_read{instance=~\"($ceph_hosts).*\"}[5m]) or irate(node_disk_read_bytes_total{instance=~\"($ceph_hosts).*\"}[5m])), \"instance\", \"$1\", \"instance\", \"([^:.]*).*\") * on(instance, device) group_left(ceph_daemon) label_replace(label_replace(ceph_disk_occupation, \"device\", \"$1\", \"device\", \"/dev/(.*)\"), \"instance\", \"$1\", \"instance\", \"([^:.]*).*\")",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "{{device}}({{ceph_daemon}}) read",
|
||||
@ -992,7 +992,7 @@
|
||||
"steppedLine": false,
|
||||
"targets": [
|
||||
{
|
||||
"expr": "max by(instance,device) ((irate(node_disk_write_time_seconds_total{ instance=~\"($ceph_hosts).*\"}[5m]) ) / clamp_min(irate(node_disk_writes_completed_total{ instance=~\"($ceph_hosts).*\"}[5m]), 0.001) or (irate(node_disk_read_time_seconds_total{ instance=~\"($ceph_hosts).*\"}[5m]) ) / clamp_min(irate(node_disk_reads_completed_total{ instance=~\"($ceph_hosts).*\"}[5m]), 0.001) ) * on(instance,device) group_left(ceph_daemon) label_replace(label_replace(ceph_disk_occupation{instance=~\"($ceph_hosts).*\"}, \"device\", \"$1\", \"device\", \"/dev/(.*)\"), \"instance\", \"$1\", \"instance\", \"(.*)\")",
|
||||
"expr": "max by(instance,device) (label_replace((irate(node_disk_write_time_seconds_total{ instance=~\"($ceph_hosts).*\"}[5m]) ) / clamp_min(irate(node_disk_writes_completed_total{ instance=~\"($ceph_hosts).*\"}[5m]), 0.001) or (irate(node_disk_read_time_seconds_total{ instance=~\"($ceph_hosts).*\"}[5m]) ) / clamp_min(irate(node_disk_reads_completed_total{ instance=~\"($ceph_hosts).*\"}[5m]), 0.001), \"instance\", \"$1\", \"instance\", \"([^:.]*).*\")) * on(instance,device) group_left(ceph_daemon) label_replace(label_replace(ceph_disk_occupation{instance=~\"($ceph_hosts).*\"}, \"device\", \"$1\", \"device\", \"/dev/(.*)\"), \"instance\", \"$1\", \"instance\", \"([^:.]*).*\")",
|
||||
"format": "time_series",
|
||||
"hide": false,
|
||||
"intervalFactor": 1,
|
||||
@ -1083,7 +1083,7 @@
|
||||
"steppedLine": false,
|
||||
"targets": [
|
||||
{
|
||||
"expr": "((irate(node_disk_io_time_ms{instance=~\"($ceph_hosts).*\"}[5m]) / 10 ) or irate(node_disk_io_time_seconds_total{instance=~\"($ceph_hosts).*\"}[5m]) * 100)* on(instance, device) group_left(ceph_daemon) label_replace(label_replace(ceph_disk_occupation{instance=~\"($ceph_hosts).*\"}, \"device\", \"$1\", \"device\", \"/dev/(.*)\"), \"instance\", \"$1\", \"instance\", \"(.*)\")",
|
||||
"expr": "label_replace(((irate(node_disk_io_time_ms{instance=~\"($ceph_hosts).*\"}[5m]) / 10 ) or irate(node_disk_io_time_seconds_total{instance=~\"($ceph_hosts).*\"}[5m]) * 100), \"instance\", \"$1\", \"instance\", \"([^:.]*).*\") * on(instance, device) group_left(ceph_daemon) label_replace(label_replace(ceph_disk_occupation{instance=~\"($ceph_hosts).*\"}, \"device\", \"$1\", \"device\", \"/dev/(.*)\"), \"instance\", \"$1\", \"instance\", \"([^:.]*).*\")",
|
||||
"format": "time_series",
|
||||
"hide": false,
|
||||
"intervalFactor": 1,
|
||||
@ -1168,7 +1168,7 @@
|
||||
"options": [],
|
||||
"query": "label_values(node_scrape_collector_success, instance) ",
|
||||
"refresh": 1,
|
||||
"regex": "([^:]*).*",
|
||||
"regex": "([^.:]*).*",
|
||||
"skipUrlSync": false,
|
||||
"sort": 3,
|
||||
"tagValuesQuery": "",
|
||||
|
@ -133,6 +133,7 @@
|
||||
"datasource": "$datasource",
|
||||
"decimals": 0,
|
||||
"description": "Average CPU busy across all hosts (OSD, RGW, MON etc) within the cluster",
|
||||
"decimals": 2,
|
||||
"format": "percentunit",
|
||||
"gauge": {
|
||||
"maxValue": 100,
|
||||
@ -216,6 +217,7 @@
|
||||
"datasource": "$datasource",
|
||||
"decimals": 0,
|
||||
"description": "Average Memory Usage across all hosts in the cluster (excludes buffer/cache usage)",
|
||||
"decimals": 2,
|
||||
"format": "percentunit",
|
||||
"gauge": {
|
||||
"maxValue": 100,
|
||||
@ -267,7 +269,7 @@
|
||||
"tableColumn": "",
|
||||
"targets": [
|
||||
{
|
||||
"expr": "avg (((node_memory_MemTotal{instance=~\"($osd_hosts|$mon_hosts|$mds_hosts|$rgw_hosts).*\"} or node_memory_MemTotal_bytes{instance=~\"($osd_hosts|$mon_hosts|$mds_hosts|$rgw_hosts).*\"})- (\n (node_memory_MemFree{instance=~\"($osd_hosts|$mon_hosts|$mds_hosts|$rgw_hosts).*\"} or node_memory_MemFree_bytes{instance=~\"($osd_hosts|$mon_hosts|$mds_hosts|$rgw_hosts).*\"}) + \n (node_memory_Cached{instance=~\"($osd_hosts|$mon_hosts|$mds_hosts|$rgw_hosts).*\"} or node_memory_Cached_bytes{instance=~\"($osd_hosts|$mon_hosts|$mds_hosts|$rgw_hosts).*\"}) + \n (node_memory_Buffers{instance=~\"($osd_hosts|$mon_hosts|$mds_hosts|$rgw_hosts).*\"} or node_memory_Buffers_bytes{instance=~\"($osd_hosts|$mon_hosts|$mds_hosts|$rgw_hosts).*\"}) +\n (node_memory_Slab{instance=~\"($osd_hosts|$mon_hosts|$mds_hosts|$rgw_hosts).*\"} or node_memory_Slab_bytes{instance=~\"($osd_hosts|$mon_hosts|$mds_hosts|$rgw_hosts).*\"})\n )) /\n (node_memory_MemTotal{instance=~\"($osd_hosts|$mon_hosts|$mds_hosts|$rgw_hosts).*\"} or node_memory_MemTotal_bytes{instance=~\"[[osd_hosts]]|[[rgw_hosts]]|[[mon_hosts]]|[[mds_hosts]].*\"} ))",
|
||||
"expr": "avg (((node_memory_MemTotal{instance=~\"($osd_hosts|$mon_hosts|$mds_hosts|$rgw_hosts).*\"} or node_memory_MemTotal_bytes{instance=~\"($osd_hosts|$mon_hosts|$mds_hosts|$rgw_hosts).*\"})- (\n (node_memory_MemFree{instance=~\"($osd_hosts|$mon_hosts|$mds_hosts|$rgw_hosts).*\"} or node_memory_MemFree_bytes{instance=~\"($osd_hosts|$mon_hosts|$mds_hosts|$rgw_hosts).*\"}) + \n (node_memory_Cached{instance=~\"($osd_hosts|$mon_hosts|$mds_hosts|$rgw_hosts).*\"} or node_memory_Cached_bytes{instance=~\"($osd_hosts|$mon_hosts|$mds_hosts|$rgw_hosts).*\"}) + \n (node_memory_Buffers{instance=~\"($osd_hosts|$mon_hosts|$mds_hosts|$rgw_hosts).*\"} or node_memory_Buffers_bytes{instance=~\"($osd_hosts|$mon_hosts|$mds_hosts|$rgw_hosts).*\"}) +\n (node_memory_Slab{instance=~\"($osd_hosts|$mon_hosts|$mds_hosts|$rgw_hosts).*\"} or node_memory_Slab_bytes{instance=~\"($osd_hosts|$mon_hosts|$mds_hosts|$rgw_hosts).*\"})\n )) /\n (node_memory_MemTotal{instance=~\"($osd_hosts|$mon_hosts|$mds_hosts|$rgw_hosts).*\"} or node_memory_MemTotal_bytes{instance=~\"($osd_hosts|$rgw_hosts|$mon_hosts|$mds_hosts).*\"} ))",
|
||||
"format": "time_series",
|
||||
"instant": true,
|
||||
"intervalFactor": 1,
|
||||
@ -431,7 +433,7 @@
|
||||
"tableColumn": "",
|
||||
"targets": [
|
||||
{
|
||||
"expr" : "avg (\n ((irate(node_disk_io_time_ms[5m]) / 10 ) or\n (irate(node_disk_io_time_seconds_total[5m]) * 100)\n ) *\n on(instance, device) label_replace(label_replace(ceph_disk_occupation{instance=~\"($osd_hosts).*\"}, \"device\", \"$1\", \"device\", \"/dev/(.*)\"), \"instance\", \"$1\", \"instance\", \"(.*)\")\n)",
|
||||
"expr" : "avg (\n label_replace((irate(node_disk_io_time_ms[5m]) / 10 ) or\n (irate(node_disk_io_time_seconds_total[5m]) * 100), \"instance\", \"$1\", \"instance\", \"([^.:]*).*\"\n ) *\n on(instance, device) label_replace(label_replace(ceph_disk_occupation{instance=~\"($osd_hosts).*\"}, \"device\", \"$1\", \"device\", \"/dev/(.*)\"), \"instance\", \"$1\", \"instance\", \"([^.:]*).*\")\n)",
|
||||
"format": "time_series",
|
||||
"instant": true,
|
||||
"intervalFactor": 1,
|
||||
@ -670,7 +672,7 @@
|
||||
"timeFrom": null,
|
||||
"timeRegions": [],
|
||||
"timeShift": null,
|
||||
"title": "Network Load - Top 10",
|
||||
"title": "Network Load - Top 10 Hosts",
|
||||
"tooltip": {
|
||||
"shared": true,
|
||||
"sort": 2,
|
||||
@ -740,7 +742,7 @@
|
||||
"multi": false,
|
||||
"name": "osd_hosts",
|
||||
"options": [],
|
||||
"query": "label_values(ceph_disk_occupation, instance)",
|
||||
"query": "label_values(ceph_disk_occupation, exported_instance)",
|
||||
"refresh": 1,
|
||||
"regex": "([^.]*).*",
|
||||
"skipUrlSync": false,
|
||||
|
@ -390,14 +390,14 @@
|
||||
"steppedLine": false,
|
||||
"targets": [
|
||||
{
|
||||
"expr": "(irate(node_disk_read_time_seconds_total[1m]) / irate(node_disk_reads_completed_total[1m]) and on (instance, device) label_replace(label_replace(ceph_disk_occupation{ceph_daemon=~\"$osd\"}, \"device\", \"$1\", \"device\", \"/dev/(.*)\"), \"instance\", \"$1\", \"instance\", \"(.*)\"))",
|
||||
"expr": "(label_replace(irate(node_disk_read_time_seconds_total[1m]) / irate(node_disk_reads_completed_total[1m]), \"instance\", \"$1\", \"instance\", \"([^:.]*).*\") and on (instance, device) label_replace(label_replace(ceph_disk_occupation{ceph_daemon=~\"$osd\"}, \"device\", \"$1\", \"device\", \"/dev/(.*)\"), \"instance\", \"$1\", \"instance\", \"([^:.]*).*\"))",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "{{instance}}/{{device}} Reads",
|
||||
"refId": "A"
|
||||
},
|
||||
{
|
||||
"expr": "(irate(node_disk_write_time_seconds_total[1m]) / irate(node_disk_writes_completed_total[1m]) and on (instance, device) label_replace(label_replace(ceph_disk_occupation{ceph_daemon=~\"$osd\"}, \"device\", \"$1\", \"device\", \"/dev/(.*)\"), \"instance\", \"$1\", \"instance\", \"(.*)\"))",
|
||||
"expr": "(label_replace(irate(node_disk_write_time_seconds_total[1m]) / irate(node_disk_writes_completed_total[1m]), \"instance\", \"$1\", \"instance\", \"([^:.]*).*\") and on (instance, device) label_replace(label_replace(ceph_disk_occupation{ceph_daemon=~\"$osd\"}, \"device\", \"$1\", \"device\", \"/dev/(.*)\"), \"instance\", \"$1\", \"instance\", \"([^:.]*).*\"))",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "{{instance}}/{{device}} Writes",
|
||||
@ -486,14 +486,14 @@
|
||||
"steppedLine": false,
|
||||
"targets": [
|
||||
{
|
||||
"expr": "irate(node_disk_writes_completed_total[1m]) and on (instance, device) label_replace(label_replace(ceph_disk_occupation{ceph_daemon=~\"$osd\"}, \"device\", \"$1\", \"device\", \"/dev/(.*)\"), \"instance\", \"$1\", \"instance\", \"(.*)\")",
|
||||
"expr": "label_replace(irate(node_disk_writes_completed_total[1m]), \"instance\", \"$1\", \"instance\", \"([^:.]*).*\") and on (instance, device) label_replace(label_replace(ceph_disk_occupation{ceph_daemon=~\"$osd\"}, \"device\", \"$1\", \"device\", \"/dev/(.*)\"), \"instance\", \"$1\", \"instance\", \"([^:.]*).*\")",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "{{device}} on {{instance}} Writes",
|
||||
"refId": "A"
|
||||
},
|
||||
{
|
||||
"expr": "irate(node_disk_reads_completed_total[1m]) and on (instance, device) label_replace(label_replace(ceph_disk_occupation{ceph_daemon=~\"$osd\"}, \"device\", \"$1\", \"device\", \"/dev/(.*)\"), \"instance\", \"$1\", \"instance\", \"(.*)\")",
|
||||
"expr": "label_replace(irate(node_disk_reads_completed_total[1m]), \"instance\", \"$1\", \"instance\", \"([^:.]*).*\") and on (instance, device) label_replace(label_replace(ceph_disk_occupation{ceph_daemon=~\"$osd\"}, \"device\", \"$1\", \"device\", \"/dev/(.*)\"), \"instance\", \"$1\", \"instance\", \"([^:.]*).*\")",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "{{device}} on {{instance}} Reads",
|
||||
@ -582,14 +582,14 @@
|
||||
"steppedLine": false,
|
||||
"targets": [
|
||||
{
|
||||
"expr": "irate(node_disk_read_bytes_total[1m]) and on (instance, device) label_replace(label_replace(ceph_disk_occupation{ceph_daemon=~\"$osd\"}, \"device\", \"$1\", \"device\", \"/dev/(.*)\"), \"instance\", \"$1\", \"instance\", \"(.*)\")",
|
||||
"expr": "label_replace(irate(node_disk_read_bytes_total[1m]), \"instance\", \"$1\", \"instance\", \"([^:.]*).*\") and on (instance, device) label_replace(label_replace(ceph_disk_occupation{ceph_daemon=~\"$osd\"}, \"device\", \"$1\", \"device\", \"/dev/(.*)\"), \"instance\", \"$1\", \"instance\", \"([^:.]*).*\")",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "{{instance}} {{device}} Reads",
|
||||
"refId": "A"
|
||||
},
|
||||
{
|
||||
"expr": "irate(node_disk_written_bytes_total[1m]) and on (instance, device) label_replace(label_replace(ceph_disk_occupation{ceph_daemon=~\"$osd\"}, \"device\", \"$1\", \"device\", \"/dev/(.*)\"), \"instance\", \"$1\", \"instance\", \"(.*)\")",
|
||||
"expr": "label_replace(irate(node_disk_written_bytes_total[1m]), \"instance\", \"$1\", \"instance\", \"([^:.]*).*\") and on (instance, device) label_replace(label_replace(ceph_disk_occupation{ceph_daemon=~\"$osd\"}, \"device\", \"$1\", \"device\", \"/dev/(.*)\"), \"instance\", \"$1\", \"instance\", \"([^:.]*).*\")",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "{{instance}} {{device}} Writes",
|
||||
@ -673,7 +673,7 @@
|
||||
"steppedLine": false,
|
||||
"targets": [
|
||||
{
|
||||
"expr": "irate(node_disk_io_time_seconds_total[1m]) and on (instance, device) label_replace(label_replace(ceph_disk_occupation{ceph_daemon=~\"$osd\"}, \"device\", \"$1\", \"device\", \"/dev/(.*)\"), \"instance\", \"$1\", \"instance\", \"(.*)\")",
|
||||
"expr": "label_replace(irate(node_disk_io_time_seconds_total[1m]), \"instance\", \"$1\", \"instance\", \"([^:.]*).*\") and on (instance, device) label_replace(label_replace(ceph_disk_occupation{ceph_daemon=~\"$osd\"}, \"device\", \"$1\", \"device\", \"/dev/(.*)\"), \"instance\", \"$1\", \"instance\", \"([^:.]*).*\")",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "{{device}} on {{instance}}",
|
||||
|
@ -50,6 +50,7 @@
|
||||
"#d44a3a"
|
||||
],
|
||||
"datasource": "$datasource",
|
||||
"decimals": 2,
|
||||
"format": "percentunit",
|
||||
"gauge": {
|
||||
"maxValue": 1,
|
||||
@ -101,7 +102,7 @@
|
||||
"tableColumn": "",
|
||||
"targets": [
|
||||
{
|
||||
"expr": "(ceph_pool_stored / ceph_pool_max_avail) * on(pool_id) group_left(instance,name) ceph_pool_metadata{name=~\"$pool_name\"}",
|
||||
"expr": "(ceph_pool_stored / (ceph_pool_stored + ceph_pool_max_avail)) * on(pool_id) group_left(instance,name) ceph_pool_metadata{name=~\"$pool_name\"}",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"refId": "A"
|
||||
@ -182,7 +183,7 @@
|
||||
"tableColumn": "",
|
||||
"targets": [
|
||||
{
|
||||
"expr": "((ceph_pool_max_avail - ceph_pool_stored) / deriv(ceph_pool_stored[6h])) * on(pool_id) group_left(instance,name) ceph_pool_metadata{name=~\"$pool_name\"} > 0",
|
||||
"expr": "(ceph_pool_max_avail / deriv(ceph_pool_stored[6h])) * on(pool_id) group_left(instance,name) ceph_pool_metadata{name=~\"$pool_name\"} > 0",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"refId": "A"
|
||||
@ -257,7 +258,7 @@
|
||||
"thresholds": [],
|
||||
"timeFrom": null,
|
||||
"timeShift": null,
|
||||
"title": "$pool_name Object In-/Egress",
|
||||
"title": "$pool_name Object Ingress/Egress",
|
||||
"tooltip": {
|
||||
"shared": true,
|
||||
"sort": 0,
|
||||
|
@ -83,14 +83,14 @@
|
||||
"steppedLine": false,
|
||||
"targets": [
|
||||
{
|
||||
"expr": "avg(rate(ceph_rgw_get_initial_lat_sum[30s]) / rate(ceph_rgw_get_initial_lat_count[30s]))",
|
||||
"expr": "rate(ceph_rgw_get_initial_lat_sum[30s]) / rate(ceph_rgw_get_initial_lat_count[30s])",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "GET AVG",
|
||||
"refId": "A"
|
||||
},
|
||||
{
|
||||
"expr": "avg(rate(ceph_rgw_put_initial_lat_sum[30s]) / rate(ceph_rgw_put_initial_lat_count[30s]))",
|
||||
"expr": "rate(ceph_rgw_put_initial_lat_sum[30s]) / rate(ceph_rgw_put_initial_lat_count[30s])",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "PUT AVG",
|
||||
|
409
ceph/monitoring/grafana/dashboards/rbd-details.json
Normal file
409
ceph/monitoring/grafana/dashboards/rbd-details.json
Normal file
@ -0,0 +1,409 @@
|
||||
{
|
||||
"__inputs": [],
|
||||
"__requires": [
|
||||
{
|
||||
"type": "grafana",
|
||||
"id": "grafana",
|
||||
"name": "Grafana",
|
||||
"version": "5.3.3"
|
||||
},
|
||||
{
|
||||
"type": "panel",
|
||||
"id": "graph",
|
||||
"name": "Graph",
|
||||
"version": "5.0.0"
|
||||
}
|
||||
],
|
||||
"annotations": {
|
||||
"list": [
|
||||
{
|
||||
"builtIn": 1,
|
||||
"datasource": "-- Grafana --",
|
||||
"enable": true,
|
||||
"hide": true,
|
||||
"iconColor": "rgba(0, 211, 255, 1)",
|
||||
"name": "Annotations & Alerts",
|
||||
"type": "dashboard"
|
||||
}
|
||||
]
|
||||
},
|
||||
"description": "Detailed Performance of RBD Images (IOPS/Latency)",
|
||||
"editable": false,
|
||||
"gnetId": null,
|
||||
"graphTooltip": 0,
|
||||
"id": null,
|
||||
"iteration": 1584428820779,
|
||||
"links": [],
|
||||
"panels": [
|
||||
{
|
||||
"aliasColors": {},
|
||||
"bars": false,
|
||||
"dashLength": 10,
|
||||
"dashes": false,
|
||||
"datasource": "$Datasource",
|
||||
"fill": 1,
|
||||
"gridPos": {
|
||||
"h": 9,
|
||||
"w": 8,
|
||||
"x": 0,
|
||||
"y": 0
|
||||
},
|
||||
"id": 6,
|
||||
"legend": {
|
||||
"avg": false,
|
||||
"current": false,
|
||||
"hideEmpty": false,
|
||||
"hideZero": false,
|
||||
"max": false,
|
||||
"min": false,
|
||||
"show": true,
|
||||
"total": false,
|
||||
"values": false
|
||||
},
|
||||
"lines": true,
|
||||
"linewidth": 1,
|
||||
"links": [],
|
||||
"nullPointMode": "null as zero",
|
||||
"percentage": false,
|
||||
"pointradius": 5,
|
||||
"points": false,
|
||||
"renderer": "flot",
|
||||
"seriesOverrides": [],
|
||||
"spaceLength": 10,
|
||||
"stack": false,
|
||||
"steppedLine": false,
|
||||
"targets": [
|
||||
{
|
||||
"expr": "irate(ceph_rbd_write_ops{pool=\"$Pool\", image=\"$Image\"}[30s])",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "Write {{instance}}",
|
||||
"refId": "A"
|
||||
},
|
||||
{
|
||||
"expr": "irate(ceph_rbd_read_ops{pool=\"$Pool\", image=\"$Image\"}[30s])",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "Read {{instance}}",
|
||||
"refId": "B"
|
||||
}
|
||||
],
|
||||
"thresholds": [],
|
||||
"timeFrom": null,
|
||||
"timeShift": null,
|
||||
"title": "IOPS Count",
|
||||
"tooltip": {
|
||||
"shared": true,
|
||||
"sort": 0,
|
||||
"value_type": "individual"
|
||||
},
|
||||
"type": "graph",
|
||||
"xaxis": {
|
||||
"buckets": null,
|
||||
"mode": "time",
|
||||
"name": null,
|
||||
"show": true,
|
||||
"values": []
|
||||
},
|
||||
"yaxes": [
|
||||
{
|
||||
"format": "iops",
|
||||
"label": null,
|
||||
"logBase": 1,
|
||||
"max": null,
|
||||
"min": "0",
|
||||
"show": true
|
||||
},
|
||||
{
|
||||
"format": "iops",
|
||||
"label": null,
|
||||
"logBase": 1,
|
||||
"max": null,
|
||||
"min": null,
|
||||
"show": true
|
||||
}
|
||||
],
|
||||
"yaxis": {
|
||||
"align": true,
|
||||
"alignLevel": null
|
||||
}
|
||||
},
|
||||
{
|
||||
"aliasColors": {},
|
||||
"bars": false,
|
||||
"dashLength": 10,
|
||||
"dashes": false,
|
||||
"datasource": "$Datasource",
|
||||
"fill": 1,
|
||||
"gridPos": {
|
||||
"h": 9,
|
||||
"w": 8,
|
||||
"x": 8,
|
||||
"y": 0
|
||||
},
|
||||
"id": 4,
|
||||
"legend": {
|
||||
"avg": false,
|
||||
"current": false,
|
||||
"max": false,
|
||||
"min": false,
|
||||
"show": true,
|
||||
"total": false,
|
||||
"values": false
|
||||
},
|
||||
"lines": true,
|
||||
"linewidth": 1,
|
||||
"links": [],
|
||||
"nullPointMode": "null as zero",
|
||||
"percentage": false,
|
||||
"pointradius": 5,
|
||||
"points": false,
|
||||
"renderer": "flot",
|
||||
"seriesOverrides": [],
|
||||
"spaceLength": 10,
|
||||
"stack": false,
|
||||
"steppedLine": false,
|
||||
"targets": [
|
||||
{
|
||||
"expr": "irate(ceph_rbd_write_bytes{pool=\"$Pool\", image=\"$Image\"}[30s])",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "Read {{instance}}",
|
||||
"refId": "A"
|
||||
},
|
||||
{
|
||||
"expr": "irate(ceph_rbd_read_bytes{pool=\"$Pool\", image=\"$Image\"}[30s])",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "Write {{instance}}",
|
||||
"refId": "B"
|
||||
}
|
||||
],
|
||||
"thresholds": [],
|
||||
"timeFrom": null,
|
||||
"timeShift": null,
|
||||
"title": "IO Bytes per Second",
|
||||
"tooltip": {
|
||||
"shared": true,
|
||||
"sort": 0,
|
||||
"value_type": "individual"
|
||||
},
|
||||
"type": "graph",
|
||||
"xaxis": {
|
||||
"buckets": null,
|
||||
"mode": "time",
|
||||
"name": null,
|
||||
"show": true,
|
||||
"values": []
|
||||
},
|
||||
"yaxes": [
|
||||
{
|
||||
"format": "Bps",
|
||||
"label": null,
|
||||
"logBase": 1,
|
||||
"max": null,
|
||||
"min": "0",
|
||||
"show": true
|
||||
},
|
||||
{
|
||||
"format": "Bps",
|
||||
"label": null,
|
||||
"logBase": 1,
|
||||
"max": null,
|
||||
"min": null,
|
||||
"show": true
|
||||
}
|
||||
],
|
||||
"yaxis": {
|
||||
"align": true,
|
||||
"alignLevel": null
|
||||
}
|
||||
},
|
||||
{
|
||||
"aliasColors": {},
|
||||
"bars": false,
|
||||
"dashLength": 10,
|
||||
"dashes": false,
|
||||
"datasource": "$Datasource",
|
||||
"fill": 1,
|
||||
"gridPos": {
|
||||
"h": 9,
|
||||
"w": 8,
|
||||
"x": 16,
|
||||
"y": 0
|
||||
},
|
||||
"id": 2,
|
||||
"legend": {
|
||||
"avg": false,
|
||||
"current": false,
|
||||
"max": false,
|
||||
"min": false,
|
||||
"show": true,
|
||||
"total": false,
|
||||
"values": false
|
||||
},
|
||||
"lines": true,
|
||||
"linewidth": 1,
|
||||
"links": [],
|
||||
"nullPointMode": "null as zero",
|
||||
"percentage": false,
|
||||
"pointradius": 5,
|
||||
"points": false,
|
||||
"renderer": "flot",
|
||||
"seriesOverrides": [],
|
||||
"spaceLength": 10,
|
||||
"stack": false,
|
||||
"steppedLine": false,
|
||||
"targets": [
|
||||
{
|
||||
"expr": "irate(ceph_rbd_write_latency_sum{pool=\"$Pool\", image=\"$Image\"}[30s]) / irate(ceph_rbd_write_latency_count{pool=\"$Pool\", image=\"$Image\"}[30s])",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "Write Latency Sum",
|
||||
"refId": "A"
|
||||
},
|
||||
{
|
||||
"expr": "irate(ceph_rbd_read_latency_sum{pool=\"$Pool\", image=\"$Image\"}[30s]) / irate(ceph_rbd_read_latency_count{pool=\"$Pool\", image=\"$Image\"}[30s])",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "Read Latency Sum",
|
||||
"refId": "B"
|
||||
}
|
||||
],
|
||||
"thresholds": [],
|
||||
"timeFrom": null,
|
||||
"timeShift": null,
|
||||
"title": "Averange Latency",
|
||||
"tooltip": {
|
||||
"shared": true,
|
||||
"sort": 0,
|
||||
"value_type": "individual"
|
||||
},
|
||||
"type": "graph",
|
||||
"xaxis": {
|
||||
"buckets": null,
|
||||
"mode": "time",
|
||||
"name": null,
|
||||
"show": true,
|
||||
"values": []
|
||||
},
|
||||
"yaxes": [
|
||||
{
|
||||
"format": "ns",
|
||||
"label": null,
|
||||
"logBase": 1,
|
||||
"max": null,
|
||||
"min": "0",
|
||||
"show": true
|
||||
},
|
||||
{
|
||||
"format": "ns",
|
||||
"label": null,
|
||||
"logBase": 1,
|
||||
"max": null,
|
||||
"min": null,
|
||||
"show": true
|
||||
}
|
||||
],
|
||||
"yaxis": {
|
||||
"align": true,
|
||||
"alignLevel": null
|
||||
}
|
||||
}
|
||||
],
|
||||
"refresh": false,
|
||||
"schemaVersion": 16,
|
||||
"style": "dark",
|
||||
"tags": [],
|
||||
"templating": {
|
||||
"list": [
|
||||
{
|
||||
"current": {},
|
||||
"hide": 0,
|
||||
"label": null,
|
||||
"name": "Datasource",
|
||||
"options": [],
|
||||
"query": "prometheus",
|
||||
"refresh": 1,
|
||||
"regex": "",
|
||||
"skipUrlSync": false,
|
||||
"type": "datasource"
|
||||
},
|
||||
{
|
||||
"allValue": null,
|
||||
"current": {},
|
||||
"datasource": "$Datasource",
|
||||
"hide": 0,
|
||||
"includeAll": false,
|
||||
"label": null,
|
||||
"multi": false,
|
||||
"name": "Pool",
|
||||
"options": [],
|
||||
"query": "label_values(pool)",
|
||||
"refresh": 1,
|
||||
"regex": "",
|
||||
"skipUrlSync": false,
|
||||
"sort": 0,
|
||||
"tagValuesQuery": "",
|
||||
"tags": [],
|
||||
"tagsQuery": "",
|
||||
"type": "query",
|
||||
"useTags": false
|
||||
},
|
||||
{
|
||||
"allValue": null,
|
||||
"current": {},
|
||||
"datasource": "$Datasource",
|
||||
"hide": 0,
|
||||
"includeAll": false,
|
||||
"label": null,
|
||||
"multi": false,
|
||||
"name": "Image",
|
||||
"options": [],
|
||||
"query": "label_values(image)",
|
||||
"refresh": 1,
|
||||
"regex": "",
|
||||
"skipUrlSync": false,
|
||||
"sort": 0,
|
||||
"tagValuesQuery": "",
|
||||
"tags": [],
|
||||
"tagsQuery": "",
|
||||
"type": "query",
|
||||
"useTags": false
|
||||
}
|
||||
]
|
||||
},
|
||||
"time": {
|
||||
"from": "now-1h",
|
||||
"to": "now"
|
||||
},
|
||||
"timepicker": {
|
||||
"refresh_intervals": [
|
||||
"5s",
|
||||
"10s",
|
||||
"30s",
|
||||
"1m",
|
||||
"5m",
|
||||
"15m",
|
||||
"30m",
|
||||
"1h",
|
||||
"2h",
|
||||
"1d"
|
||||
],
|
||||
"time_options": [
|
||||
"5m",
|
||||
"15m",
|
||||
"1h",
|
||||
"6h",
|
||||
"12h",
|
||||
"24h",
|
||||
"2d",
|
||||
"7d",
|
||||
"30d"
|
||||
]
|
||||
},
|
||||
"timezone": "",
|
||||
"title": "RBD Details",
|
||||
"uid": "YhCYGcuZz",
|
||||
"version": 7
|
||||
}
|
@ -44,13 +44,13 @@ groups:
|
||||
- name: osd
|
||||
rules:
|
||||
- alert: 10% OSDs down
|
||||
expr: (sum(ceph_osd_up) / count(ceph_osd_up)) * 100 <= 90
|
||||
expr: count(ceph_osd_up == 0) / count(ceph_osd_up) * 100 >= 10
|
||||
labels:
|
||||
severity: critical
|
||||
type: ceph_default
|
||||
annotations:
|
||||
description: |
|
||||
{{ $value | humanize}}% or {{with query "sum(ceph_osd_up)" }}{{ . | first | value }}{{ end }} of {{ with query "count(ceph_osd_up)"}}{{. | first | value }}{{ end }} OSDs are down (>=10%).
|
||||
{{ $value | humanize }}% or {{ with query "count(ceph_osd_up == 0)" }}{{ . | first | value }}{{ end }} of {{ with query "count(ceph_osd_up)" }}{{ . | first | value }}{{ end }} OSDs are down (≥ 10%).
|
||||
|
||||
The following OSDs are down:
|
||||
{{- range query "(ceph_osd_up * on(ceph_daemon) group_left(hostname) ceph_osd_metadata) == 0" }}
|
||||
@ -156,6 +156,7 @@ groups:
|
||||
rules:
|
||||
- alert: root volume full
|
||||
expr: node_filesystem_avail_bytes{mountpoint="/"} / node_filesystem_size_bytes{mountpoint="/"} * 100 < 5
|
||||
for: 5m
|
||||
labels:
|
||||
severity: critical
|
||||
type: ceph_default
|
||||
@ -186,15 +187,10 @@ groups:
|
||||
Node {{ $labels.instance }} experiences packet errors > 1
|
||||
packet/s on interface {{ $labels.device }}.
|
||||
|
||||
# predict fs fill-up times
|
||||
- alert: storage filling
|
||||
- alert: storage filling up
|
||||
expr: |
|
||||
(
|
||||
(
|
||||
node_filesystem_free_bytes / deriv(node_filesystem_free_bytes[2d])
|
||||
* on(instance) group_left(nodename) node_uname_info
|
||||
) <= 5
|
||||
) > 0
|
||||
predict_linear(node_filesystem_free_bytes[2d], 3600 * 24 * 5) *
|
||||
on(instance) group_left(nodename) node_uname_info < 0
|
||||
labels:
|
||||
severity: warning
|
||||
type: ceph_default
|
||||
@ -208,7 +204,7 @@ groups:
|
||||
rules:
|
||||
- alert: pool full
|
||||
expr: |
|
||||
ceph_pool_stored / ceph_pool_max_avail
|
||||
ceph_pool_stored / (ceph_pool_stored + ceph_pool_max_avail)
|
||||
* on(pool_id) group_right ceph_pool_metadata * 100 > 90
|
||||
labels:
|
||||
severity: critical
|
||||
@ -219,10 +215,9 @@ groups:
|
||||
- alert: pool filling up
|
||||
expr: |
|
||||
(
|
||||
(
|
||||
(ceph_pool_max_avail - ceph_pool_stored) / deriv(ceph_pool_max_avail[2d])
|
||||
) * on(pool_id) group_right ceph_pool_metadata <= 5
|
||||
) > 0
|
||||
predict_linear(ceph_pool_stored[2d], 3600 * 24 * 5) >=
|
||||
ceph_pool_max_avail
|
||||
) * on(pool_id) group_left(name) ceph_pool_metadata
|
||||
labels:
|
||||
severity: warning
|
||||
type: ceph_default
|
||||
|
1
ceph/qa/.teuthology_branch
Normal file
1
ceph/qa/.teuthology_branch
Normal file
@ -0,0 +1 @@
|
||||
master
|
@ -9,3 +9,4 @@ overrides:
|
||||
- \(MDS_DAMAGE\)
|
||||
- \(MDS_ALL_DOWN\)
|
||||
- \(MDS_UP_LESS_THAN_MAX\)
|
||||
- \(TOO_FEW_PGS\)
|
||||
|
@ -2,6 +2,7 @@ overrides:
|
||||
ceph:
|
||||
log-whitelist:
|
||||
- SLOW_OPS
|
||||
- slow request
|
||||
conf:
|
||||
osd:
|
||||
filestore flush min: 0
|
||||
|
2
ceph/qa/distros/all/centos_7.8.yaml
Normal file
2
ceph/qa/distros/all/centos_7.8.yaml
Normal file
@ -0,0 +1,2 @@
|
||||
os_type: centos
|
||||
os_version: "7.8"
|
2
ceph/qa/distros/all/rhel_7.8.yaml
Normal file
2
ceph/qa/distros/all/rhel_7.8.yaml
Normal file
@ -0,0 +1,2 @@
|
||||
os_type: rhel
|
||||
os_version: "7.8"
|
@ -1 +1 @@
|
||||
../all/centos_7.6.yaml
|
||||
../all/centos_7.8.yaml
|
@ -1 +1 @@
|
||||
../all/rhel_7.6.yaml
|
||||
../all/rhel_7.8.yaml
|
@ -1 +1 @@
|
||||
../all/centos_7.5.yaml
|
||||
../all/centos_7.8.yaml
|
@ -1 +1 @@
|
||||
../all/rhel_7.5.yaml
|
||||
../all/rhel_7.8.yaml
|
@ -1 +1 @@
|
||||
../all/centos_7.6.yaml
|
||||
../all/centos_7.8.yaml
|
@ -1 +1 @@
|
||||
../all/rhel_7.6.yaml
|
||||
../all/rhel_7.8.yaml
|
@ -13,6 +13,7 @@ overrides:
|
||||
debug rocksdb: 10
|
||||
bluestore compression mode: aggressive
|
||||
bluestore fsck on mount: true
|
||||
bluestore compression algorithm: lz4
|
||||
# lower the full ratios since we can fill up a 100gb osd so quickly
|
||||
mon osd full ratio: .9
|
||||
mon osd backfillfull_ratio: .85
|
24
ceph/qa/objectstore/bluestore-comp-snappy.yaml
Normal file
24
ceph/qa/objectstore/bluestore-comp-snappy.yaml
Normal file
@ -0,0 +1,24 @@
|
||||
overrides:
|
||||
thrashosds:
|
||||
bdev_inject_crash: 2
|
||||
bdev_inject_crash_probability: .5
|
||||
ceph:
|
||||
fs: xfs
|
||||
conf:
|
||||
osd:
|
||||
osd objectstore: bluestore
|
||||
bluestore block size: 96636764160
|
||||
debug bluestore: 20
|
||||
debug bluefs: 20
|
||||
debug rocksdb: 10
|
||||
bluestore compression mode: aggressive
|
||||
bluestore fsck on mount: true
|
||||
bluestore compression algorithm: snappy
|
||||
# lower the full ratios since we can fill up a 100gb osd so quickly
|
||||
mon osd full ratio: .9
|
||||
mon osd backfillfull_ratio: .85
|
||||
mon osd nearfull ratio: .8
|
||||
osd failsafe full ratio: .95
|
||||
|
||||
# this doesn't work with failures bc the log writes are not atomic across the two backends
|
||||
# bluestore bluefs env mirror: true
|
24
ceph/qa/objectstore/bluestore-comp-zlib.yaml
Normal file
24
ceph/qa/objectstore/bluestore-comp-zlib.yaml
Normal file
@ -0,0 +1,24 @@
|
||||
overrides:
|
||||
thrashosds:
|
||||
bdev_inject_crash: 2
|
||||
bdev_inject_crash_probability: .5
|
||||
ceph:
|
||||
fs: xfs
|
||||
conf:
|
||||
osd:
|
||||
osd objectstore: bluestore
|
||||
bluestore block size: 96636764160
|
||||
debug bluestore: 20
|
||||
debug bluefs: 20
|
||||
debug rocksdb: 10
|
||||
bluestore compression mode: aggressive
|
||||
bluestore fsck on mount: true
|
||||
bluestore compression algorithm: zlib
|
||||
# lower the full ratios since we can fill up a 100gb osd so quickly
|
||||
mon osd full ratio: .9
|
||||
mon osd backfillfull_ratio: .85
|
||||
mon osd nearfull ratio: .8
|
||||
osd failsafe full ratio: .95
|
||||
|
||||
# this doesn't work with failures bc the log writes are not atomic across the two backends
|
||||
# bluestore bluefs env mirror: true
|
24
ceph/qa/objectstore/bluestore-comp-zstd.yaml
Normal file
24
ceph/qa/objectstore/bluestore-comp-zstd.yaml
Normal file
@ -0,0 +1,24 @@
|
||||
overrides:
|
||||
thrashosds:
|
||||
bdev_inject_crash: 2
|
||||
bdev_inject_crash_probability: .5
|
||||
ceph:
|
||||
fs: xfs
|
||||
conf:
|
||||
osd:
|
||||
osd objectstore: bluestore
|
||||
bluestore block size: 96636764160
|
||||
debug bluestore: 20
|
||||
debug bluefs: 20
|
||||
debug rocksdb: 10
|
||||
bluestore compression mode: aggressive
|
||||
bluestore fsck on mount: true
|
||||
bluestore compression algorithm: zstd
|
||||
# lower the full ratios since we can fill up a 100gb osd so quickly
|
||||
mon osd full ratio: .9
|
||||
mon osd backfillfull_ratio: .85
|
||||
mon osd nearfull ratio: .8
|
||||
osd failsafe full ratio: .95
|
||||
|
||||
# this doesn't work with failures bc the log writes are not atomic across the two backends
|
||||
# bluestore bluefs env mirror: true
|
@ -4,3 +4,4 @@ overrides:
|
||||
global:
|
||||
osd_min_pg_log_entries: 1
|
||||
osd_max_pg_log_entries: 2
|
||||
osd_pg_log_trim_min: 0
|
||||
|
@ -93,13 +93,9 @@ if ls $(dirname $(sysctl -n $KERNCORE)) | grep -q '^core\|core$' ; then
|
||||
fi
|
||||
|
||||
ulimit -c unlimited
|
||||
for f in $(cd $location ; find . -perm $exec_mode -type f)
|
||||
for f in $(cd $location ; find . -mindepth 2 -perm $exec_mode -type f)
|
||||
do
|
||||
f=$(echo $f | sed 's/\.\///')
|
||||
# This is tested with misc/test-ceph-helpers.sh
|
||||
if [[ "$f" = "ceph-helpers.sh" ]]; then
|
||||
continue
|
||||
fi
|
||||
if [[ "$all" = "false" ]]; then
|
||||
found=false
|
||||
for c in "${!select[@]}"
|
||||
|
@ -173,7 +173,7 @@ function rados_put_get_data() {
|
||||
ceph osd out ${last_osd} || return 1
|
||||
! get_osds $poolname $objname | grep '\<'${last_osd}'\>' || return 1
|
||||
ceph osd in ${last_osd} || return 1
|
||||
run_osd $dir ${last_osd} || return 1
|
||||
activate_osd $dir ${last_osd} || return 1
|
||||
wait_for_clean || return 1
|
||||
fi
|
||||
|
||||
@ -373,7 +373,7 @@ function TEST_ec_object_attr_read_error() {
|
||||
inject_eio ec mdata $poolname $objname $dir 1 || return 1
|
||||
|
||||
# Restart OSD
|
||||
run_osd $dir ${primary_osd} || return 1
|
||||
activate_osd $dir ${primary_osd} || return 1
|
||||
|
||||
# Cluster should recover this object
|
||||
wait_for_clean || return 1
|
||||
@ -541,7 +541,7 @@ function TEST_ec_backfill_unfound() {
|
||||
inject_eio ec data $poolname $testobj $dir 0 || return 1
|
||||
inject_eio ec data $poolname $testobj $dir 1 || return 1
|
||||
|
||||
run_osd $dir ${last_osd} || return 1
|
||||
activate_osd $dir ${last_osd} || return 1
|
||||
ceph osd in ${last_osd} || return 1
|
||||
|
||||
sleep 15
|
||||
@ -621,7 +621,7 @@ function TEST_ec_recovery_unfound() {
|
||||
inject_eio ec data $poolname $testobj $dir 0 || return 1
|
||||
inject_eio ec data $poolname $testobj $dir 1 || return 1
|
||||
|
||||
run_osd $dir ${last_osd} || return 1
|
||||
activate_osd $dir ${last_osd} || return 1
|
||||
ceph osd in ${last_osd} || return 1
|
||||
|
||||
sleep 15
|
||||
|
307
ceph/qa/standalone/mon/mon-last-epoch-clean.sh
Executable file
307
ceph/qa/standalone/mon/mon-last-epoch-clean.sh
Executable file
@ -0,0 +1,307 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
source $CEPH_ROOT/qa/standalone/ceph-helpers.sh
|
||||
|
||||
|
||||
function run() {
|
||||
local dir=$1
|
||||
shift
|
||||
|
||||
export CEPH_MON="127.0.0.1:7302" # git grep '\<7105\>' : there must be only one
|
||||
export CEPH_ARGS
|
||||
CEPH_ARGS+="--fsid=$(uuidgen) --auth-supported=none "
|
||||
CEPH_ARGS+="--mon-host=$CEPH_MON "
|
||||
|
||||
local funcs=${@:-$(set | sed -n -e 's/^\(TEST_[0-9a-z_]*\) .*/\1/p')}
|
||||
for func in $funcs ; do
|
||||
setup $dir || return 1
|
||||
$func $dir || return 1
|
||||
teardown $dir || return 1
|
||||
done
|
||||
}
|
||||
|
||||
|
||||
function check_lec_equals_pools() {
|
||||
|
||||
local pool_id=$1
|
||||
|
||||
report=$(ceph report)
|
||||
lec=$(echo $report | \
|
||||
jq '.osdmap_clean_epochs.min_last_epoch_clean')
|
||||
|
||||
if [[ -z "$pool_id" ]]; then
|
||||
pools=($(echo $report | \
|
||||
jq \
|
||||
".osdmap_clean_epochs.last_epoch_clean.per_pool[] |" \
|
||||
" select(.floor == $lec) | .poolid"))
|
||||
|
||||
[[ ${#pools[*]} -eq 2 ]] || ( echo $report ; return 1 )
|
||||
else
|
||||
floor=($(echo $report | \
|
||||
jq \
|
||||
".osdmap_clean_epochs.last_epoch_clean.per_pool[] |" \
|
||||
" select(.poolid == $pool_id) | .floor"))
|
||||
|
||||
[[ $lec -eq $floor ]] || ( echo $report ; return 1 )
|
||||
fi
|
||||
return 0
|
||||
}
|
||||
|
||||
function check_lec_lower_than_pool() {
|
||||
|
||||
local pool_id=$1
|
||||
[[ -z "$pool_id" ]] && ( echo "expected pool_id as parameter" ; exit 1 )
|
||||
|
||||
report=$(ceph report)
|
||||
lec=$(echo $report | \
|
||||
jq '.osdmap_clean_epochs.min_last_epoch_clean')
|
||||
|
||||
floor=($(echo $report | \
|
||||
jq \
|
||||
".osdmap_clean_epochs.last_epoch_clean.per_pool[] |" \
|
||||
" select(.poolid == $pool_id) | .floor"))
|
||||
|
||||
[[ $lec -lt $floor ]] || ( echo $report ; return 1 )
|
||||
return 0
|
||||
}
|
||||
|
||||
function check_floor_pool_greater_than_pool() {
|
||||
|
||||
local pool_a=$1
|
||||
local pool_b=$1
|
||||
[[ -z "$pool_a" ]] && ( echo "expected id as first parameter" ; exit 1 )
|
||||
[[ -z "$pool_b" ]] && ( echo "expected id as second parameter" ; exit 1 )
|
||||
|
||||
report=$(ceph report)
|
||||
|
||||
floor_a=($(echo $report | \
|
||||
jq \
|
||||
".osdmap_clean_epochs.last_epoch_clean.per_pool[] |" \
|
||||
" select(.poolid == $pool_a) | .floor"))
|
||||
|
||||
floor_b=($(echo $report | \
|
||||
jq \
|
||||
".osdmap_clean_epochs.last_epoch_clean.per_pool[] |" \
|
||||
" select(.poolid == $pool_b) | .floor"))
|
||||
|
||||
[[ $floor_a -gt $floor_b ]] || ( echo $report ; return 1 )
|
||||
return 0
|
||||
}
|
||||
|
||||
function check_lec_honours_osd() {
|
||||
|
||||
local osd=$1
|
||||
|
||||
report=$(ceph report)
|
||||
lec=$(echo $report | \
|
||||
jq '.osdmap_clean_epochs.min_last_epoch_clean')
|
||||
|
||||
if [[ -z "$osd" ]]; then
|
||||
osds=($(echo $report | \
|
||||
jq \
|
||||
".osdmap_clean_epochs.osd_epochs[] |" \
|
||||
" select(.epoch >= $lec) | .id"))
|
||||
|
||||
[[ ${#osds[*]} -eq 3 ]] || ( echo $report ; return 1 )
|
||||
else
|
||||
epoch=($(echo $report | \
|
||||
jq \
|
||||
".osdmap_clean_epochs.osd_epochs[] |" \
|
||||
" select(.id == $id) | .epoch"))
|
||||
[[ ${#epoch[*]} -eq 1 ]] || ( echo $report ; return 1 )
|
||||
[[ ${epoch[0]} -ge $lec ]] || ( echo $report ; return 1 )
|
||||
fi
|
||||
|
||||
return 0
|
||||
}
|
||||
|
||||
function validate_fc() {
|
||||
report=$(ceph report)
|
||||
lec=$(echo $report | \
|
||||
jq '.osdmap_clean_epochs.min_last_epoch_clean')
|
||||
osdm_fc=$(echo $report | \
|
||||
jq '.osdmap_first_committed')
|
||||
|
||||
[[ $lec -eq $osdm_fc ]] || ( echo $report ; return 1 )
|
||||
return 0
|
||||
}
|
||||
|
||||
function get_fc_lc_diff() {
|
||||
report=$(ceph report)
|
||||
osdm_fc=$(echo $report | \
|
||||
jq '.osdmap_first_committed')
|
||||
osdm_lc=$(echo $report | \
|
||||
jq '.osdmap_last_committed')
|
||||
|
||||
echo $((osdm_lc - osdm_fc))
|
||||
}
|
||||
|
||||
function get_pool_id() {
|
||||
|
||||
local pn=$1
|
||||
[[ -z "$pn" ]] && ( echo "expected pool name as argument" ; exit 1 )
|
||||
|
||||
report=$(ceph report)
|
||||
pool_id=$(echo $report | \
|
||||
jq ".osdmap.pools[] | select(.pool_name == \"$pn\") | .pool")
|
||||
|
||||
[[ $pool_id -ge 0 ]] || \
|
||||
( echo "unexpected pool id for pool \'$pn\': $pool_id" ; return -1 )
|
||||
|
||||
echo $pool_id
|
||||
return 0
|
||||
}
|
||||
|
||||
function wait_for_total_num_maps() {
|
||||
# rip wait_for_health, becaue it's easier than deduplicating the code
|
||||
local -a delays=($(get_timeout_delays $TIMEOUT .1))
|
||||
local -i loop=0
|
||||
local -i v_diff=$1
|
||||
|
||||
while [[ $(get_fc_lc_diff) -gt $v_diff ]]; do
|
||||
if (( $loop >= ${#delays[*]} )) ; then
|
||||
echo "maps were not trimmed"
|
||||
return 1
|
||||
fi
|
||||
sleep ${delays[$loop]}
|
||||
loop+=1
|
||||
done
|
||||
}
|
||||
|
||||
function TEST_mon_last_clean_epoch() {
|
||||
|
||||
local dir=$1
|
||||
|
||||
run_mon $dir a || return 1
|
||||
run_mgr $dir x || return 1
|
||||
run_osd $dir 0 || return 1
|
||||
run_osd $dir 1 || return 1
|
||||
run_osd $dir 2 || return 1
|
||||
osd_pid=$(cat $dir/osd.2.pid)
|
||||
|
||||
sleep 5
|
||||
|
||||
ceph tell osd.* injectargs '--osd-beacon-report-interval 10' || exit 1
|
||||
ceph tell mon.* injectargs \
|
||||
'--mon-min-osdmap-epochs 2 --paxos-service-trim-min 1' || exit 1
|
||||
|
||||
create_pool foo 32
|
||||
create_pool bar 32
|
||||
|
||||
foo_id=$(get_pool_id "foo")
|
||||
bar_id=$(get_pool_id "bar")
|
||||
|
||||
[[ $foo_id -lt 0 ]] && ( echo "couldn't find pool 'foo' id" ; exit 1 )
|
||||
[[ $bar_id -lt 0 ]] && ( echo "couldn't find pool 'bar' id" ; exit 1 )
|
||||
|
||||
# no real clue why we are getting these warnings, but let's make them go
|
||||
# away so we can be happy.
|
||||
|
||||
ceph osd set-full-ratio 0.97
|
||||
ceph osd set-backfillfull-ratio 0.97
|
||||
|
||||
wait_for_health_ok || exit 1
|
||||
|
||||
pre_map_diff=$(get_fc_lc_diff)
|
||||
wait_for_total_num_maps 2
|
||||
post_map_diff=$(get_fc_lc_diff)
|
||||
|
||||
[[ $post_map_diff -le $pre_map_diff ]] || exit 1
|
||||
|
||||
pre_map_diff=$post_map_diff
|
||||
|
||||
ceph osd pool set foo size 3
|
||||
ceph osd pool set bar size 3
|
||||
|
||||
wait_for_health_ok || exit 1
|
||||
|
||||
check_lec_equals_pools || exit 1
|
||||
check_lec_honours_osd || exit 1
|
||||
validate_fc || exit 1
|
||||
|
||||
# down osd.2; expected result (because all pools' size equals 3):
|
||||
# - number of committed maps increase over 2
|
||||
# - lec equals fc
|
||||
# - lec equals osd.2's epoch
|
||||
# - all pools have floor equal to lec
|
||||
|
||||
while kill $osd_pid ; do sleep 1 ; done
|
||||
ceph osd down 2
|
||||
sleep 5 # seriously, just to make sure things settle; we may not need this.
|
||||
|
||||
# generate some maps
|
||||
for ((i=0; i <= 10; ++i)); do
|
||||
ceph osd set noup
|
||||
sleep 1
|
||||
ceph osd unset noup
|
||||
sleep 1
|
||||
done
|
||||
|
||||
post_map_diff=$(get_fc_lc_diff)
|
||||
[[ $post_map_diff -gt 2 ]] || exit 1
|
||||
|
||||
validate_fc || exit 1
|
||||
check_lec_equals_pools || exit 1
|
||||
check_lec_honours_osd 2 || exit 1
|
||||
|
||||
# adjust pool 'bar' size to 2; expect:
|
||||
# - number of committed maps still over 2
|
||||
# - lec equals fc
|
||||
# - lec equals pool 'foo' floor
|
||||
# - pool 'bar' floor greater than pool 'foo'
|
||||
|
||||
ceph osd pool set bar size 2
|
||||
|
||||
diff_ver=$(get_fc_lc_diff)
|
||||
[[ $diff_ver -gt 2 ]] || exit 1
|
||||
|
||||
validate_fc || exit 1
|
||||
|
||||
check_lec_equals_pools $foo_id || exit 1
|
||||
check_lec_lower_than_pool $bar_id || exit 1
|
||||
|
||||
check_floor_pool_greater_than_pool $bar_id $foo_id || exit 1
|
||||
|
||||
# set pool 'foo' size to 2; expect:
|
||||
# - health_ok
|
||||
# - lec equals pools
|
||||
# - number of committed maps decreases
|
||||
# - lec equals fc
|
||||
|
||||
pre_map_diff=$(get_fc_lc_diff)
|
||||
|
||||
ceph osd pool set foo size 2 || exit 1
|
||||
wait_for_clean || exit 1
|
||||
|
||||
check_lec_equals_pools || exit 1
|
||||
validate_fc || exit 1
|
||||
|
||||
if ! wait_for_total_num_maps 2 ; then
|
||||
post_map_diff=$(get_fc_lc_diff)
|
||||
# number of maps is decreasing though, right?
|
||||
[[ $post_map_diff -lt $pre_map_diff ]] || exit 1
|
||||
fi
|
||||
|
||||
# bring back osd.2; expect:
|
||||
# - health_ok
|
||||
# - lec equals fc
|
||||
# - number of committed maps equals 2
|
||||
# - all pools have floor equal to lec
|
||||
|
||||
pre_map_diff=$(get_fc_lc_diff)
|
||||
|
||||
activate_osd $dir 2 || exit 1
|
||||
wait_for_health_ok || exit 1
|
||||
validate_fc || exit 1
|
||||
check_lec_equals_pools || exit 1
|
||||
|
||||
if ! wait_for_total_num_maps 2 ; then
|
||||
post_map_diff=$(get_fc_lc_diff)
|
||||
# number of maps is decreasing though, right?
|
||||
[[ $post_map_diff -lt $pre_map_diff ]] || exit 1
|
||||
fi
|
||||
|
||||
return 0
|
||||
}
|
||||
|
||||
main mon-last-clean-epoch "$@"
|
@ -37,7 +37,7 @@ function TEST_ec_error_rollforward() {
|
||||
|
||||
rados -p ec put foo /etc/passwd
|
||||
|
||||
kill -STOP `cat $dir/osd.2.pid`
|
||||
kill -STOP $(cat $dir/osd.2.pid)
|
||||
|
||||
rados -p ec rm foo &
|
||||
pids="$!"
|
||||
@ -49,14 +49,16 @@ function TEST_ec_error_rollforward() {
|
||||
rados -p ec rm c &
|
||||
pids+=" $!"
|
||||
sleep 1
|
||||
kill -9 `cat $dir/osd.?.pid`
|
||||
# Use SIGKILL so stopped osd.2 will terminate
|
||||
# and kill_daemons waits for daemons to die
|
||||
kill_daemons $dir KILL osd
|
||||
kill $pids
|
||||
wait
|
||||
|
||||
run_osd $dir 0 || return 1
|
||||
run_osd $dir 1 || return 1
|
||||
run_osd $dir 2 || return 1
|
||||
run_osd $dir 3 || return 1
|
||||
activate_osd $dir 0 || return 1
|
||||
activate_osd $dir 1 || return 1
|
||||
activate_osd $dir 2 || return 1
|
||||
activate_osd $dir 3 || return 1
|
||||
|
||||
wait_for_clean || return 1
|
||||
}
|
||||
|
@ -147,7 +147,7 @@ function TEST_backfill_test_simple() {
|
||||
do
|
||||
ceph osd pool set "${poolprefix}$p" size 2
|
||||
done
|
||||
sleep 5
|
||||
sleep 30
|
||||
|
||||
wait_for_not_backfilling 240 || return 1
|
||||
wait_for_not_activating 60 || return 1
|
||||
@ -226,7 +226,7 @@ function TEST_backfill_test_multi() {
|
||||
do
|
||||
ceph osd pool set "${poolprefix}$p" size 2
|
||||
done
|
||||
sleep 5
|
||||
sleep 30
|
||||
|
||||
wait_for_not_backfilling 240 || return 1
|
||||
wait_for_not_activating 60 || return 1
|
||||
@ -378,7 +378,7 @@ function TEST_backfill_test_sametarget() {
|
||||
|
||||
ceph osd pool set $pool1 size 2
|
||||
ceph osd pool set $pool2 size 2
|
||||
sleep 5
|
||||
sleep 30
|
||||
|
||||
wait_for_not_backfilling 240 || return 1
|
||||
wait_for_not_activating 60 || return 1
|
||||
@ -470,10 +470,8 @@ function TEST_backfill_multi_partial() {
|
||||
osd="0"
|
||||
fi
|
||||
|
||||
sleep 5
|
||||
kill $(cat $dir/osd.$fillosd.pid)
|
||||
kill_daemon $dir/osd.$fillosd.pid TERM
|
||||
ceph osd out osd.$fillosd
|
||||
sleep 2
|
||||
|
||||
_objectstore_tool_nodown $dir $fillosd --op export-remove --pgid 1.0 --file $dir/fillexport.out || return 1
|
||||
activate_osd $dir $fillosd || return 1
|
||||
@ -489,8 +487,7 @@ function TEST_backfill_multi_partial() {
|
||||
ceph pg dump pgs
|
||||
# The $osd OSD is started, but we don't wait so we can kill $fillosd at the same time
|
||||
_objectstore_tool_nowait $dir $osd --op export --pgid 2.0 --file $dir/export.out
|
||||
kill $(cat $dir/osd.$fillosd.pid)
|
||||
sleep 5
|
||||
kill_daemon $dir/osd.$fillosd.pid TERM
|
||||
_objectstore_tool_nodown $dir $fillosd --force --op remove --pgid 2.0
|
||||
_objectstore_tool_nodown $dir $fillosd --op import --pgid 2.0 --file $dir/export.out || return 1
|
||||
_objectstore_tool_nodown $dir $fillosd --op import --pgid 1.0 --file $dir/fillexport.out || return 1
|
||||
@ -508,12 +505,12 @@ function TEST_backfill_multi_partial() {
|
||||
done
|
||||
done
|
||||
|
||||
kill $(cat $dir/osd.$osd.pid)
|
||||
kill_daemon $dir/osd.$osd.pid TERM
|
||||
ceph osd out osd.$osd
|
||||
|
||||
activate_osd $dir $fillosd || return 1
|
||||
ceph osd in osd.$fillosd
|
||||
sleep 15
|
||||
sleep 30
|
||||
|
||||
wait_for_not_backfilling 240 || return 1
|
||||
wait_for_not_activating 60 || return 1
|
||||
@ -664,7 +661,7 @@ function TEST_ec_backfill_simple() {
|
||||
fi
|
||||
|
||||
sleep 5
|
||||
kill $(cat $dir/osd.$fillosd.pid)
|
||||
kill_daemon $dir/osd.$fillosd.pid TERM
|
||||
ceph osd out osd.$fillosd
|
||||
sleep 2
|
||||
ceph osd erasure-code-profile set ec-profile k=$k m=$m crush-failure-domain=osd technique=reed_sol_van plugin=jerasure || return 1
|
||||
@ -689,7 +686,7 @@ function TEST_ec_backfill_simple() {
|
||||
done
|
||||
done
|
||||
|
||||
kill $(cat $dir/osd.$osd.pid)
|
||||
kill_daemon $dir/osd.$osd.pid TERM
|
||||
ceph osd out osd.$osd
|
||||
|
||||
activate_osd $dir $fillosd || return 1
|
||||
@ -820,7 +817,7 @@ function TEST_ec_backfill_multi() {
|
||||
ceph osd pg-upmap $(expr $p + 1).0 ${nonfillosds% *} $fillosd
|
||||
done
|
||||
|
||||
sleep 10
|
||||
sleep 30
|
||||
|
||||
wait_for_not_backfilling 240 || return 1
|
||||
wait_for_not_activating 60 || return 1
|
||||
@ -958,7 +955,7 @@ function SKIP_TEST_ec_backfill_multi_partial() {
|
||||
#activate_osd $dir $lastosd || return 1
|
||||
#ceph tell osd.0 debug kick_recovery_wq 0
|
||||
|
||||
sleep 10
|
||||
sleep 30
|
||||
ceph pg dump pgs
|
||||
|
||||
wait_for_not_backfilling 240 || return 1
|
||||
@ -1033,7 +1030,7 @@ function SKIP_TEST_ec_backfill_multi_partial() {
|
||||
fi
|
||||
|
||||
sleep 5
|
||||
kill $(cat $dir/osd.$fillosd.pid)
|
||||
kill_daemon $dir/osd.$fillosd.pid TERM
|
||||
ceph osd out osd.$fillosd
|
||||
sleep 2
|
||||
ceph osd erasure-code-profile set ec-profile k=3 m=2 crush-failure-domain=osd technique=reed_sol_van plugin=jerasure || return 1
|
||||
@ -1059,7 +1056,7 @@ function SKIP_TEST_ec_backfill_multi_partial() {
|
||||
done
|
||||
|
||||
#ceph pg map 2.0 --format=json | jq '.'
|
||||
kill $(cat $dir/osd.$osd.pid)
|
||||
kill_daemon $dir/osd.$osd.pid TERM
|
||||
ceph osd out osd.$osd
|
||||
|
||||
_objectstore_tool_nodown $dir $osd --op export --pgid 2.0 --file $dir/export.out
|
||||
@ -1067,7 +1064,7 @@ function SKIP_TEST_ec_backfill_multi_partial() {
|
||||
|
||||
activate_osd $dir $fillosd || return 1
|
||||
ceph osd in osd.$fillosd
|
||||
sleep 15
|
||||
sleep 30
|
||||
|
||||
wait_for_not_backfilling 240 || return 1
|
||||
wait_for_not_activating 60 || return 1
|
||||
|
@ -353,6 +353,7 @@ function TEST_backfill_out2() {
|
||||
sleep 2
|
||||
primary=$(get_primary $poolname obj1)
|
||||
ceph osd unset nobackfill
|
||||
ceph tell osd.$primary get_latest_osdmap
|
||||
ceph tell osd.$primary debug kick_recovery_wq 0
|
||||
sleep 2
|
||||
|
||||
@ -410,6 +411,7 @@ function TEST_backfill_sizeup4_allout() {
|
||||
sleep 2
|
||||
primary=$(get_primary $poolname obj1)
|
||||
ceph osd unset nobackfill
|
||||
ceph tell osd.$primary get_latest_osdmap
|
||||
ceph tell osd.$primary debug kick_recovery_wq 0
|
||||
sleep 2
|
||||
|
||||
@ -474,6 +476,7 @@ function TEST_backfill_remapped() {
|
||||
primary=$(get_primary $poolname obj1)
|
||||
|
||||
ceph osd unset nobackfill
|
||||
ceph tell osd.$primary get_latest_osdmap
|
||||
ceph tell osd.$primary debug kick_recovery_wq 0
|
||||
|
||||
sleep 2
|
||||
@ -534,6 +537,7 @@ function TEST_backfill_ec_all_out() {
|
||||
sleep 2
|
||||
primary=$(get_primary $poolname obj1)
|
||||
ceph osd unset nobackfill
|
||||
ceph tell osd.$primary get_latest_osdmap
|
||||
ceph tell osd.$primary debug kick_recovery_wq 0
|
||||
sleep 2
|
||||
|
||||
@ -584,6 +588,7 @@ function TEST_backfill_ec_prim_out() {
|
||||
sleep 2
|
||||
primary=$(get_primary $poolname obj1)
|
||||
ceph osd unset nobackfill
|
||||
ceph tell osd.$primary get_latest_osdmap
|
||||
ceph tell osd.$primary debug kick_recovery_wq 0
|
||||
sleep 2
|
||||
|
||||
@ -642,6 +647,7 @@ function TEST_backfill_ec_down_all_out() {
|
||||
sleep 2
|
||||
primary=$(get_primary $poolname obj1)
|
||||
ceph osd unset nobackfill
|
||||
ceph tell osd.$primary get_latest_osdmap
|
||||
ceph tell osd.$primary debug kick_recovery_wq 0
|
||||
sleep 2
|
||||
flush_pg_stats
|
||||
@ -726,6 +732,7 @@ function TEST_backfill_ec_down_out() {
|
||||
sleep 2
|
||||
primary=$(get_primary $poolname obj1)
|
||||
ceph osd unset nobackfill
|
||||
ceph tell osd.$primary get_latest_osdmap
|
||||
ceph tell osd.$primary debug kick_recovery_wq 0
|
||||
sleep 2
|
||||
|
||||
|
@ -140,13 +140,13 @@ function TEST_bluestore() {
|
||||
|
||||
ceph-bluestore-tool --path $dir/3 fsck || return 1
|
||||
|
||||
run_osd $dir 0 || return 1
|
||||
activate_osd $dir 0 || return 1
|
||||
osd_pid0=$(cat $dir/osd.0.pid)
|
||||
run_osd $dir 1 || return 1
|
||||
activate_osd $dir 1 || return 1
|
||||
osd_pid1=$(cat $dir/osd.1.pid)
|
||||
run_osd $dir 2 || return 1
|
||||
activate_osd $dir 2 || return 1
|
||||
osd_pid2=$(cat $dir/osd.2.pid)
|
||||
run_osd $dir 3 || return 1
|
||||
activate_osd $dir 3 || return 1
|
||||
osd_pid3=$(cat $dir/osd.3.pid)
|
||||
|
||||
wait_for_clean || return 1
|
||||
@ -218,13 +218,13 @@ function TEST_bluestore() {
|
||||
|
||||
ceph-bluestore-tool --path $dir/3 fsck || return 1
|
||||
|
||||
run_osd $dir 0 || return 1
|
||||
activate_osd $dir 0 || return 1
|
||||
osd_pid0=$(cat $dir/osd.0.pid)
|
||||
run_osd $dir 1 || return 1
|
||||
activate_osd $dir 1 || return 1
|
||||
osd_pid1=$(cat $dir/osd.1.pid)
|
||||
run_osd $dir 2 || return 1
|
||||
activate_osd $dir 2 || return 1
|
||||
osd_pid2=$(cat $dir/osd.2.pid)
|
||||
run_osd $dir 3 || return 1
|
||||
activate_osd $dir 3 || return 1
|
||||
osd_pid3=$(cat $dir/osd.3.pid)
|
||||
|
||||
# write some objects
|
||||
@ -324,13 +324,13 @@ function TEST_bluestore() {
|
||||
|
||||
ceph-bluestore-tool --path $dir/3 fsck || return 1
|
||||
|
||||
run_osd $dir 0 || return 1
|
||||
activate_osd $dir 0 || return 1
|
||||
osd_pid0=$(cat $dir/osd.0.pid)
|
||||
run_osd $dir 1 || return 1
|
||||
activate_osd $dir 1 || return 1
|
||||
osd_pid1=$(cat $dir/osd.1.pid)
|
||||
run_osd $dir 2 || return 1
|
||||
activate_osd $dir 2 || return 1
|
||||
osd_pid2=$(cat $dir/osd.2.pid)
|
||||
run_osd $dir 3 || return 1
|
||||
activate_osd $dir 3 || return 1
|
||||
osd_pid3=$(cat $dir/osd.3.pid)
|
||||
|
||||
# write some objects
|
||||
|
@ -61,7 +61,7 @@ function TEST_filestore_to_bluestore() {
|
||||
--op dup || return 1
|
||||
CEPH_ARGS=$O
|
||||
|
||||
run_osd $dir 0 || return 1
|
||||
activate_osd $dir 0 || return 1
|
||||
|
||||
while ! ceph osd stat | grep '3 up' ; do sleep 1 ; done
|
||||
ceph osd metadata 0 | grep bluestore || return 1
|
||||
|
@ -472,7 +472,7 @@ function TEST_recovery_multi() {
|
||||
|
||||
kill $(cat $dir/osd.${primary}.pid)
|
||||
ceph osd down osd.${primary}
|
||||
run_osd $dir ${otherosd}
|
||||
activate_osd $dir ${otherosd}
|
||||
sleep 3
|
||||
|
||||
for i in $(seq $(expr $half + 1) $objects)
|
||||
@ -485,7 +485,7 @@ function TEST_recovery_multi() {
|
||||
|
||||
ceph osd unset noout
|
||||
ceph osd out osd.$primary osd.$otherosd
|
||||
run_osd $dir ${primary}
|
||||
activate_osd $dir ${primary}
|
||||
sleep 3
|
||||
|
||||
ceph osd pool set test size 4
|
||||
|
@ -208,7 +208,7 @@ function TEST_rep_backfill_unfound() {
|
||||
inject_eio rep data $poolname $testobj $dir 0 || return 1
|
||||
inject_eio rep data $poolname $testobj $dir 1 || return 1
|
||||
|
||||
run_osd $dir ${last_osd} || return 1
|
||||
activate_osd $dir ${last_osd} || return 1
|
||||
ceph osd in ${last_osd} || return 1
|
||||
|
||||
sleep 15
|
||||
@ -285,7 +285,7 @@ function TEST_rep_recovery_unfound() {
|
||||
inject_eio rep data $poolname $testobj $dir 0 || return 1
|
||||
inject_eio rep data $poolname $testobj $dir 1 || return 1
|
||||
|
||||
run_osd $dir ${last_osd} || return 1
|
||||
activate_osd $dir ${last_osd} || return 1
|
||||
ceph osd in ${last_osd} || return 1
|
||||
|
||||
sleep 15
|
||||
|
@ -105,7 +105,7 @@ function TEST_repro_long_log2()
|
||||
local PRIMARY=$(ceph pg $PGID query | jq '.info.stats.up_primary')
|
||||
kill_daemons $dir TERM osd.$PRIMARY || return 1
|
||||
CEPH_ARGS="--osd-max-pg-log-entries=2 --no-mon-config" ceph-objectstore-tool --data-path $dir/$PRIMARY --pgid $PGID --op trim-pg-log || return 1
|
||||
run_osd $dir $PRIMARY || return 1
|
||||
activate_osd $dir $PRIMARY || return 1
|
||||
wait_for_clean || return 1
|
||||
test_log_size $PGID 2 || return 1
|
||||
}
|
||||
|
@ -480,7 +480,7 @@ function TEST_auto_repair_bluestore_failed() {
|
||||
grep scrub_finish $dir/osd.${primary}.log
|
||||
grep -q "scrub_finish.*still present after re-scrub" $dir/osd.${primary}.log || return 1
|
||||
ceph pg dump pgs
|
||||
ceph pg dump pgs | grep -q "^$(pgid).*+failed_repair" || return 1
|
||||
ceph pg dump pgs | grep -q "^${pgid}.*+failed_repair" || return 1
|
||||
|
||||
# Verify - obj1 should be back
|
||||
# Restarted osd get $ceph_osd_args passed
|
||||
@ -495,7 +495,7 @@ function TEST_auto_repair_bluestore_failed() {
|
||||
sleep 2
|
||||
|
||||
ceph pg dump pgs
|
||||
ceph pg dump pgs | grep -q "^$(pgid).* active+clean " || return 1
|
||||
ceph pg dump pgs | grep -q "^${pgid}.* active+clean " || return 1
|
||||
grep scrub_finish $dir/osd.${primary}.log
|
||||
|
||||
# Tear down
|
||||
@ -550,7 +550,7 @@ function TEST_auto_repair_bluestore_failed_norecov() {
|
||||
flush_pg_stats
|
||||
grep -q "scrub_finish.*present with no repair possible" $dir/osd.${primary}.log || return 1
|
||||
ceph pg dump pgs
|
||||
ceph pg dump pgs | grep -q "^$(pgid).*+failed_repair" || return 1
|
||||
ceph pg dump pgs | grep -q "^${pgid}.*+failed_repair" || return 1
|
||||
|
||||
# Tear down
|
||||
teardown $dir || return 1
|
||||
@ -600,13 +600,14 @@ function TEST_repair_stats() {
|
||||
OSD=$(expr $i % 2)
|
||||
_objectstore_tool_nodown $dir $OSD obj$i remove || return 1
|
||||
done
|
||||
run_osd $dir $primary $ceph_osd_args || return 1
|
||||
run_osd $dir $other $ceph_osd_args || return 1
|
||||
activate_osd $dir $primary $ceph_osd_args || return 1
|
||||
activate_osd $dir $other $ceph_osd_args || return 1
|
||||
wait_for_clean || return 1
|
||||
|
||||
repair $pgid
|
||||
wait_for_clean || return 1
|
||||
ceph pg dump pgs
|
||||
flush_pg_stats
|
||||
|
||||
# This should have caused 1 object to be repaired
|
||||
ceph pg $pgid query | jq '.info.stats.stat_sum'
|
||||
@ -673,13 +674,14 @@ function TEST_repair_stats_ec() {
|
||||
OSD=$(expr $i % 2)
|
||||
_objectstore_tool_nodown $dir $OSD obj$i remove || return 1
|
||||
done
|
||||
run_osd $dir $primary $ceph_osd_args || return 1
|
||||
run_osd $dir $other $ceph_osd_args || return 1
|
||||
activate_osd $dir $primary $ceph_osd_args || return 1
|
||||
activate_osd $dir $other $ceph_osd_args || return 1
|
||||
wait_for_clean || return 1
|
||||
|
||||
repair $pgid
|
||||
wait_for_clean || return 1
|
||||
ceph pg dump pgs
|
||||
flush_pg_stats
|
||||
|
||||
# This should have caused 1 object to be repaired
|
||||
ceph pg $pgid query | jq '.info.stats.stat_sum'
|
||||
|
@ -201,9 +201,11 @@ function TEST_scrub_snaps() {
|
||||
|
||||
for osd in $(seq 0 $(expr $OSDS - 1))
|
||||
do
|
||||
run_osd $dir $osd || return 1
|
||||
activate_osd $dir $osd || return 1
|
||||
done
|
||||
|
||||
wait_for_clean || return 1
|
||||
|
||||
local pgid="${poolid}.0"
|
||||
if ! pg_scrub "$pgid" ; then
|
||||
return 1
|
||||
@ -811,9 +813,11 @@ function _scrub_snaps_multi() {
|
||||
|
||||
for osd in $(seq 0 $(expr $OSDS - 1))
|
||||
do
|
||||
run_osd $dir $osd || return 1
|
||||
activate_osd $dir $osd || return 1
|
||||
done
|
||||
|
||||
wait_for_clean || return 1
|
||||
|
||||
local pgid="${poolid}.0"
|
||||
if ! pg_scrub "$pgid" ; then
|
||||
return 1
|
||||
|
@ -45,7 +45,7 @@ if sys.version_info[0] >= 3:
|
||||
def decode(s):
|
||||
return s.decode('utf-8')
|
||||
|
||||
def check_output(*args, **kwargs):
|
||||
def check_output(*args, **kwargs): # noqa
|
||||
return decode(subprocess.check_output(*args, **kwargs))
|
||||
else:
|
||||
def decode(s):
|
||||
@ -336,7 +336,7 @@ def check_entry_transactions(entry, enum):
|
||||
|
||||
|
||||
def check_transaction_ops(ops, enum, tnum):
|
||||
if len(ops) is 0:
|
||||
if len(ops) == 0:
|
||||
logging.warning("No ops found in entry {e} trans {t}".format(e=enum, t=tnum))
|
||||
errors = 0
|
||||
for onum in range(len(ops)):
|
||||
@ -375,7 +375,7 @@ def test_dump_journal(CFSD_PREFIX, osds):
|
||||
os.unlink(TMPFILE)
|
||||
|
||||
journal_errors = check_journal(jsondict)
|
||||
if journal_errors is not 0:
|
||||
if journal_errors != 0:
|
||||
logging.error(jsondict)
|
||||
ERRORS += journal_errors
|
||||
|
||||
@ -519,7 +519,7 @@ def get_osd_weights(CFSD_PREFIX, osd_ids, osd_path):
|
||||
for line in output.strip().split('\n'):
|
||||
print(line)
|
||||
linev = re.split('\s+', line)
|
||||
if linev[0] is '':
|
||||
if linev[0] == '':
|
||||
linev.pop(0)
|
||||
print('linev %s' % linev)
|
||||
weights.append(float(linev[2]))
|
||||
|
@ -1,6 +1,7 @@
|
||||
overrides:
|
||||
ceph:
|
||||
log-whitelist:
|
||||
- Replacing daemon mds
|
||||
- Scrub error on inode
|
||||
- Behind on trimming
|
||||
- Metadata damage detected
|
||||
|
@ -1,2 +0,0 @@
|
||||
overrides:
|
||||
python: python2
|
@ -1,2 +0,0 @@
|
||||
overrides:
|
||||
python: python3
|
@ -17,3 +17,4 @@ tasks:
|
||||
- exec:
|
||||
client.0:
|
||||
- ceph_test_trim_caps
|
||||
- ceph_test_ino_release_cb
|
||||
|
@ -10,9 +10,11 @@ overrides:
|
||||
- \(SLOW_OPS\)
|
||||
- overall HEALTH_
|
||||
- \(MON_MSGR2_NOT_ENABLED\)
|
||||
- slow request
|
||||
conf:
|
||||
global:
|
||||
bluestore warn on legacy statfs: false
|
||||
mon pg warn min per osd: 0
|
||||
mon:
|
||||
mon warn on osd down out interval zero: false
|
||||
|
||||
|
@ -10,9 +10,11 @@ overrides:
|
||||
- \(SLOW_OPS\)
|
||||
- overall HEALTH_
|
||||
- \(MON_MSGR2_NOT_ENABLED\)
|
||||
- slow request
|
||||
conf:
|
||||
global:
|
||||
bluestore warn on legacy statfs: false
|
||||
mon pg warn min per osd: 0
|
||||
mon:
|
||||
mon warn on osd down out interval zero: false
|
||||
|
||||
|
@ -2,6 +2,7 @@ overrides:
|
||||
ceph:
|
||||
conf:
|
||||
global:
|
||||
mon pg warn min per osd: 0
|
||||
bluestore warn on legacy statfs: false
|
||||
|
||||
tasks:
|
||||
|
1
ceph/qa/suites/fs/verify/centos_latest.yaml
Symbolic link
1
ceph/qa/suites/fs/verify/centos_latest.yaml
Symbolic link
@ -0,0 +1 @@
|
||||
.qa/distros/supported/centos_latest.yaml
|
@ -1,5 +1,5 @@
|
||||
# see http://tracker.ceph.com/issues/20360 and http://tracker.ceph.com/issues/18126
|
||||
os_type: centos
|
||||
# Only works on os_type: centos
|
||||
# See http://tracker.ceph.com/issues/20360 and http://tracker.ceph.com/issues/18126
|
||||
|
||||
overrides:
|
||||
install:
|
||||
|
@ -2,6 +2,7 @@ overrides:
|
||||
ceph:
|
||||
log-whitelist:
|
||||
- SLOW_OPS
|
||||
- slow request
|
||||
tasks:
|
||||
- workunit:
|
||||
clients:
|
||||
|
@ -2,6 +2,7 @@ overrides:
|
||||
ceph:
|
||||
log-whitelist:
|
||||
- SLOW_OPS
|
||||
- slow request
|
||||
conf:
|
||||
osd:
|
||||
filestore flush min: 0
|
||||
|
1
ceph/qa/suites/multimds/basic/0-supported-random-distro$
Symbolic link
1
ceph/qa/suites/multimds/basic/0-supported-random-distro$
Symbolic link
@ -0,0 +1 @@
|
||||
.qa/distros/supported-random-distro$/
|
1
ceph/qa/suites/multimds/thrash/0-supported-random-distro$
Symbolic link
1
ceph/qa/suites/multimds/thrash/0-supported-random-distro$
Symbolic link
@ -0,0 +1 @@
|
||||
.qa/distros/supported-random-distro$/
|
1
ceph/qa/suites/multimds/verify/centos_latest.yaml
Symbolic link
1
ceph/qa/suites/multimds/verify/centos_latest.yaml
Symbolic link
@ -0,0 +1 @@
|
||||
.qa/distros/supported/centos_latest.yaml
|
@ -9,6 +9,7 @@ overrides:
|
||||
index_pool_pg_size: 64
|
||||
tasks:
|
||||
- cbt:
|
||||
branch: 'nautilus'
|
||||
benchmarks:
|
||||
cosbench:
|
||||
obj_size: [64KB]
|
||||
|
@ -5,6 +5,7 @@ meta:
|
||||
|
||||
tasks:
|
||||
- cbt:
|
||||
branch: 'nautilus'
|
||||
benchmarks:
|
||||
librbdfio:
|
||||
op_size: [4096]
|
||||
|
@ -5,6 +5,7 @@ meta:
|
||||
|
||||
tasks:
|
||||
- cbt:
|
||||
branch: 'nautilus'
|
||||
benchmarks:
|
||||
radosbench:
|
||||
concurrent_ops: 4
|
||||
|
@ -18,7 +18,7 @@ tasks:
|
||||
- influxdb python module not found
|
||||
- \(MGR_ZABBIX_
|
||||
- foo bar
|
||||
- evicting unresponsive client
|
||||
- Failed to open Telegraf
|
||||
- cephfs_test_runner:
|
||||
modules:
|
||||
- tasks.mgr.test_module_selftest
|
||||
|
@ -15,6 +15,7 @@ overrides:
|
||||
# slow mons -> slow peering -> PG_AVAILABILITY
|
||||
- \(PG_AVAILABILITY\)
|
||||
- \(SLOW_OPS\)
|
||||
- slow request
|
||||
tasks:
|
||||
- install:
|
||||
- ceph:
|
||||
|
@ -11,6 +11,7 @@ overrides:
|
||||
- \(PG_
|
||||
- \(POOL_APP_NOT_ENABLED\)
|
||||
- \(SMALLER_PGP_NUM\)
|
||||
- slow request
|
||||
conf:
|
||||
global:
|
||||
debug objecter: 20
|
||||
|
@ -19,5 +19,6 @@ tasks:
|
||||
- \(PG_
|
||||
- \(SLOW_OPS\)
|
||||
- No standby daemons available
|
||||
- slow request
|
||||
- mon_clock_skew_check:
|
||||
expect-skew: true
|
||||
|
@ -5,4 +5,6 @@ tasks:
|
||||
- overall HEALTH_
|
||||
- \(MON_DOWN\)
|
||||
- \(PG_AVAILABILITY\)
|
||||
- \(SLOW_OPS\)
|
||||
- slow request
|
||||
- mon_recovery:
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user