mirror of
https://git.proxmox.com/git/ceph.git
synced 2025-04-28 12:54:34 +00:00
import ceph octopus 15.2.17
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
This commit is contained in:
parent
bf2dd02a9c
commit
25a15a5da3
@ -2,7 +2,7 @@ cmake_minimum_required(VERSION 3.10.2)
|
||||
# remove cmake/modules/FindPython* once 3.12 is required
|
||||
|
||||
project(ceph
|
||||
VERSION 15.2.16
|
||||
VERSION 15.2.17
|
||||
LANGUAGES CXX C ASM)
|
||||
|
||||
foreach(policy
|
||||
|
@ -1,3 +1,9 @@
|
||||
15.2.16
|
||||
-------
|
||||
|
||||
* A health warning will now be reported if the ``require-osd-release`` flag is not
|
||||
set to the appropriate release after a cluster upgrade.
|
||||
|
||||
15.2.14
|
||||
-------
|
||||
|
||||
|
@ -1,2 +1,2 @@
|
||||
plantweb
|
||||
git+https://github.com/readthedocs/readthedocs-sphinx-search@master
|
||||
git+https://github.com/readthedocs/readthedocs-sphinx-search@main
|
||||
|
@ -1,4 +1,4 @@
|
||||
Sphinx == 3.5.4
|
||||
Sphinx == 4.4.0
|
||||
git+https://github.com/ceph/sphinx-ditaa.git@py3#egg=sphinx-ditaa
|
||||
breathe >= 4.20.0
|
||||
pyyaml >= 5.1.2
|
||||
|
@ -1,7 +1,7 @@
|
||||
# Contributor: John Coyle <dx9err@gmail.com>
|
||||
# Maintainer: John Coyle <dx9err@gmail.com>
|
||||
pkgname=ceph
|
||||
pkgver=15.2.16
|
||||
pkgver=15.2.17
|
||||
pkgrel=0
|
||||
pkgdesc="Ceph is a distributed object store and file system"
|
||||
pkgusers="ceph"
|
||||
@ -63,7 +63,7 @@ makedepends="
|
||||
xmlstarlet
|
||||
yasm
|
||||
"
|
||||
source="ceph-15.2.16.tar.bz2"
|
||||
source="ceph-15.2.17.tar.bz2"
|
||||
subpackages="
|
||||
$pkgname-base
|
||||
$pkgname-common
|
||||
@ -116,7 +116,7 @@ _sysconfdir=/etc
|
||||
_udevrulesdir=/etc/udev/rules.d
|
||||
_python_sitelib=/usr/lib/python2.7/site-packages
|
||||
|
||||
builddir=$srcdir/ceph-15.2.16
|
||||
builddir=$srcdir/ceph-15.2.17
|
||||
|
||||
build() {
|
||||
export CEPH_BUILD_VIRTUALENV=$builddir
|
||||
|
@ -98,7 +98,7 @@
|
||||
# main package definition
|
||||
#################################################################################
|
||||
Name: ceph
|
||||
Version: 15.2.16
|
||||
Version: 15.2.17
|
||||
Release: 0%{?dist}
|
||||
%if 0%{?fedora} || 0%{?rhel}
|
||||
Epoch: 2
|
||||
@ -114,7 +114,7 @@ License: LGPL-2.1 and LGPL-3.0 and CC-BY-SA-3.0 and GPL-2.0 and BSL-1.0 and BSD-
|
||||
Group: System/Filesystems
|
||||
%endif
|
||||
URL: http://ceph.com/
|
||||
Source0: %{?_remote_tarball_prefix}ceph-15.2.16.tar.bz2
|
||||
Source0: %{?_remote_tarball_prefix}ceph-15.2.17.tar.bz2
|
||||
%if 0%{?suse_version}
|
||||
# _insert_obs_source_lines_here
|
||||
ExclusiveArch: x86_64 aarch64 ppc64le s390x
|
||||
@ -1141,7 +1141,7 @@ This package provides Ceph’s default alerts for Prometheus.
|
||||
# common
|
||||
#################################################################################
|
||||
%prep
|
||||
%autosetup -p1 -n ceph-15.2.16
|
||||
%autosetup -p1 -n ceph-15.2.17
|
||||
|
||||
%build
|
||||
# LTO can be enabled as soon as the following GCC bug is fixed:
|
||||
@ -1462,7 +1462,7 @@ exit 0
|
||||
%{_mandir}/man8/cephadm.8*
|
||||
%attr(0700,cephadm,cephadm) %dir %{_sharedstatedir}/cephadm
|
||||
%attr(0700,cephadm,cephadm) %dir %{_sharedstatedir}/cephadm/.ssh
|
||||
%attr(0600,cephadm,cephadm) %{_sharedstatedir}/cephadm/.ssh/authorized_keys
|
||||
%config(noreplace) %attr(0600,cephadm,cephadm) %{_sharedstatedir}/cephadm/.ssh/authorized_keys
|
||||
|
||||
%files common
|
||||
%dir %{_docdir}/ceph
|
||||
|
@ -1462,7 +1462,7 @@ exit 0
|
||||
%{_mandir}/man8/cephadm.8*
|
||||
%attr(0700,cephadm,cephadm) %dir %{_sharedstatedir}/cephadm
|
||||
%attr(0700,cephadm,cephadm) %dir %{_sharedstatedir}/cephadm/.ssh
|
||||
%attr(0600,cephadm,cephadm) %{_sharedstatedir}/cephadm/.ssh/authorized_keys
|
||||
%config(noreplace) %attr(0600,cephadm,cephadm) %{_sharedstatedir}/cephadm/.ssh/authorized_keys
|
||||
|
||||
%files common
|
||||
%dir %{_docdir}/ceph
|
||||
|
@ -1,7 +1,13 @@
|
||||
ceph (15.2.16-1bionic) bionic; urgency=medium
|
||||
ceph (15.2.17-1bionic) bionic; urgency=medium
|
||||
|
||||
|
||||
-- Jenkins Build Slave User <jenkins-build@braggi02.front.sepia.ceph.com> Tue, 01 Mar 2022 06:57:11 +0000
|
||||
-- Jenkins Build Slave User <jenkins-build@braggi10.front.sepia.ceph.com> Tue, 09 Aug 2022 17:17:43 +0000
|
||||
|
||||
ceph (15.2.17-1) stable; urgency=medium
|
||||
|
||||
* New upstream release
|
||||
|
||||
-- Ceph Release Team <ceph-maintainers@ceph.io> Tue, 09 Aug 2022 17:06:59 +0000
|
||||
|
||||
ceph (15.2.16-1) stable; urgency=medium
|
||||
|
||||
|
@ -95,7 +95,7 @@ function(do_build_dpdk dpdk_dir)
|
||||
ExternalProject_Add(dpdk-ext
|
||||
SOURCE_DIR ${dpdk_source_dir}
|
||||
CONFIGURE_COMMAND ${make_cmd} config O=${dpdk_dir} T=${target}
|
||||
BUILD_COMMAND ${make_cmd} O=${dpdk_dir} CC=${CMAKE_C_COMPILER} EXTRA_CFLAGS=-fPIC
|
||||
BUILD_COMMAND ${make_cmd} O=${dpdk_dir} CC=${CMAKE_C_COMPILER} EXTRA_CFLAGS=-fPIC RTE_DEVEL_BUILD=n
|
||||
BUILD_IN_SOURCE 1
|
||||
INSTALL_COMMAND "true")
|
||||
if(NUMA_FOUND)
|
||||
|
@ -102,7 +102,8 @@ breathe_domain_by_extension = {'py': 'py', 'c': 'c', 'h': 'c', 'cc': 'cxx', 'hpp
|
||||
breathe_doxygen_config_options = {
|
||||
'EXPAND_ONLY_PREDEF': 'YES',
|
||||
'MACRO_EXPANSION': 'YES',
|
||||
'PREDEFINED': 'CEPH_RADOS_API= '
|
||||
'PREDEFINED': 'CEPH_RADOS_API= ',
|
||||
'WARN_IF_UNDOCUMENTED': 'NO',
|
||||
}
|
||||
|
||||
# the docs are rendered with github links pointing to master. the javascript
|
||||
@ -113,7 +114,7 @@ edit_on_github_branch = 'master'
|
||||
|
||||
# handles edit-on-github and old version warning display
|
||||
def setup(app):
|
||||
app.add_javascript('js/ceph.js')
|
||||
app.add_js_file('js/ceph.js')
|
||||
if ditaa is None:
|
||||
# add "ditaa" as an alias of "diagram"
|
||||
from plantweb.directive import DiagramDirective
|
||||
|
@ -28,94 +28,163 @@ The following chart illustrates basic development workflow:
|
||||
| pull request | git push \-------------/
|
||||
\--------------/
|
||||
|
||||
Below we present an explanation of this chart. The explanation is written
|
||||
with the assumption that you, the reader, are a beginning developer who
|
||||
has an idea for a bugfix, but do not know exactly how to proceed. Watch
|
||||
the `Getting Started with Ceph Development
|
||||
<https://www.youtube.com/watch?v=t5UIehZ1oLs>`_ video for
|
||||
a practical summary of the same.
|
||||
This page assumes that you are a new contributor with an idea for a bugfix or
|
||||
an enhancement, but you do not know how to proceed. Watch the `Getting Started
|
||||
with Ceph Development <https://www.youtube.com/watch?v=t5UIehZ1oLs>`_ video for
|
||||
a practical summary of this workflow.
|
||||
|
||||
Update the tracker
|
||||
------------------
|
||||
|
||||
Before you start, you should know the `Issue tracker`_ number of the bug
|
||||
you intend to fix. If there is no tracker issue, now is the time to create
|
||||
one.
|
||||
Find the `Redmine issue tracker <https://tracker.ceph.com>`_ number of
|
||||
the bug you intend to fix. If no tracker issue exists, create one. There is
|
||||
only one case in which you do not have to create a Redmine tracker issue: the
|
||||
case of minor documentation changes.
|
||||
|
||||
The tracker is there to explain the issue (bug) to your fellow Ceph
|
||||
developers and keep them informed as you make progress toward resolution.
|
||||
To this end, then, provide a descriptive title as well as sufficient
|
||||
information and details in the description.
|
||||
Simple documentation cleanup does not require a corresponding tracker issue.
|
||||
Major documenatation changes do require a tracker issue. Major documentation
|
||||
changes include adding new documentation chapters or files, and making
|
||||
substantial changes to the structure or content of the documentation.
|
||||
|
||||
If you have sufficient tracker permissions, assign the bug to yourself by
|
||||
changing the ``Assignee`` field. If your tracker permissions have not yet
|
||||
been elevated, simply add a comment to the issue with a short message like
|
||||
"I am working on this issue".
|
||||
A (Redmine) tracker ticket explains the issue (bug) to other Ceph developers to
|
||||
keep them informed as the bug nears resolution. Provide a useful, clear title
|
||||
and include detailed information in the description. When composing the title
|
||||
of the ticket, ask yourself "If I need to search for this ticket two years from
|
||||
now, which keywords am I likely to search for?" Then include those keywords in
|
||||
the title.
|
||||
|
||||
Upstream code
|
||||
-------------
|
||||
If your tracker permissions are elevated, assign the bug to yourself by setting
|
||||
the ``Assignee`` field. If your tracker permissions have not been elevated,
|
||||
just add a comment with a short message that says "I am working on this issue".
|
||||
|
||||
This section, and the ones that follow, correspond to the nodes in the
|
||||
above chart.
|
||||
Ceph Workflow Overview
|
||||
----------------------
|
||||
|
||||
The upstream code lives in https://github.com/ceph/ceph.git, which is
|
||||
sometimes referred to as the "upstream repo", or simply "upstream". As the
|
||||
chart illustrates, we will make a local copy of this code, modify it, test
|
||||
our modifications, and submit the modifications back to the upstream repo
|
||||
for review.
|
||||
Three repositories are involved in the Ceph workflow. They are:
|
||||
|
||||
A local copy of the upstream code is made by
|
||||
1. The upstream repository (ceph/ceph)
|
||||
2. Your fork of the upstream repository (your_github_id/ceph)
|
||||
3. Your local working copy of the repository (on your workstation)
|
||||
|
||||
1. forking the upstream repo on GitHub, and
|
||||
2. cloning your fork to make a local working copy
|
||||
The procedure for making changes to the Ceph repository is as follows:
|
||||
|
||||
See the `the GitHub documentation
|
||||
#. Configure your local environment
|
||||
|
||||
#. :ref:`Create a fork<forking>` of the "upstream Ceph"
|
||||
repository.
|
||||
|
||||
#. :ref:`Clone the fork<cloning>` to your local filesystem.
|
||||
|
||||
#. Fix the bug
|
||||
|
||||
#. :ref:`Synchronize local master with upstream master<synchronizing>`.
|
||||
|
||||
#. :ref:`Create a bugfix branch<bugfix_branch>` in your local working copy.
|
||||
|
||||
#. :ref:`Make alterations to the local working copy of the repository in your
|
||||
local filesystem<fixing_bug_locally>`.
|
||||
|
||||
#. :ref:`Push the changes in your local working copy to your fork<push_changes>`.
|
||||
|
||||
#. Create a Pull Request to push the change upstream
|
||||
|
||||
#. Create a Pull Request that asks for your changes to be added into the
|
||||
"upstream Ceph" repository.
|
||||
|
||||
Preparing Your Local Working Copy of the Ceph Repository
|
||||
--------------------------------------------------------
|
||||
|
||||
The procedures in this section, "Preparing Your Local Working Copy of the Ceph
|
||||
Repository", must be followed only when you are first setting up your local
|
||||
environment. If this is your first time working with the Ceph project, then
|
||||
these commands are necessary and are the first commands that you should run.
|
||||
|
||||
.. _forking:
|
||||
|
||||
Creating a Fork of the Ceph Repository
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
See the `GitHub documentation
|
||||
<https://help.github.com/articles/fork-a-repo/#platform-linux>`_ for
|
||||
detailed instructions on forking. In short, if your GitHub username is
|
||||
"mygithubaccount", your fork of the upstream repo will show up at
|
||||
https://github.com/mygithubaccount/ceph. Once you have created your fork,
|
||||
you clone it by doing:
|
||||
"mygithubaccount", your fork of the upstream repo will appear at
|
||||
``https://github.com/mygithubaccount/ceph``.
|
||||
|
||||
.. _cloning:
|
||||
|
||||
Cloning Your Fork
|
||||
^^^^^^^^^^^^^^^^^
|
||||
|
||||
After you have created your fork, clone it by running the following command:
|
||||
|
||||
.. code::
|
||||
|
||||
$ git clone https://github.com/mygithubaccount/ceph
|
||||
|
||||
While it is possible to clone the upstream repo directly, in this case you
|
||||
must fork it first. Forking is what enables us to open a `GitHub pull
|
||||
request`_.
|
||||
You must fork the Ceph repository before you clone it. If you fail to fork,
|
||||
you cannot open a `GitHub pull request
|
||||
<https://docs.github.com/en/free-pro-team@latest/github/collaborating-with-issues-and-pull-requests/creating-a-pull-request>`_.
|
||||
|
||||
For more information on using GitHub, refer to `GitHub Help
|
||||
<https://help.github.com/>`_.
|
||||
|
||||
Local environment
|
||||
-----------------
|
||||
Configuring Your Local Environment
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
In the local environment created in the previous step, you now have a
|
||||
copy of the ``master`` branch in ``remotes/origin/master``. Since the fork
|
||||
(https://github.com/mygithubaccount/ceph.git) is frozen in time and the
|
||||
upstream repo (https://github.com/ceph/ceph.git, typically abbreviated to
|
||||
``ceph/ceph.git``) is updated frequently by other developers, you will need
|
||||
to sync your fork periodically. To do this, first add the upstream repo as
|
||||
a "remote" and fetch it::
|
||||
The commands in this section configure your local git environment so that it
|
||||
generates "Signed-off-by:" tags. They also set up your local environment so
|
||||
that it can stay synchronized with the upstream repository.
|
||||
|
||||
$ git remote add ceph https://github.com/ceph/ceph.git
|
||||
$ git fetch ceph
|
||||
These commands are necessary only during the initial setup of your local
|
||||
working copy. Another way to say that is "These commands are necessary
|
||||
only the first time that you are working with the Ceph repository. They are,
|
||||
however, unavoidable, and if you fail to run them then you will not be able
|
||||
to work on the Ceph repository.".
|
||||
|
||||
Fetching downloads all objects (commits, branches) that were added since
|
||||
the last sync. After running these commands, all the branches from
|
||||
``ceph/ceph.git`` are downloaded to the local git repo as
|
||||
``remotes/ceph/$BRANCH_NAME`` and can be referenced as
|
||||
``ceph/$BRANCH_NAME`` in certain git commands.
|
||||
1. Configure your local git environment with your name and email address.
|
||||
|
||||
For example, your local ``master`` branch can be reset to the upstream Ceph
|
||||
``master`` branch by doing::
|
||||
.. prompt:: bash $
|
||||
|
||||
$ git fetch ceph
|
||||
$ git checkout master
|
||||
$ git reset --hard ceph/master
|
||||
git config user.name "FIRST_NAME LAST_NAME"
|
||||
git config user.email "MY_NAME@example.com"
|
||||
|
||||
Finally, the ``master`` branch of your fork can then be synced to upstream
|
||||
master by::
|
||||
2. Add the upstream repo as a "remote" and fetch it:
|
||||
|
||||
.. prompt:: bash $
|
||||
|
||||
git remote add ceph https://github.com/ceph/ceph.git
|
||||
git fetch ceph
|
||||
|
||||
These commands fetch all the branches and commits from ``ceph/ceph.git`` to
|
||||
the local git repo as ``remotes/ceph/$BRANCH_NAME`` and can be referenced as
|
||||
``ceph/$BRANCH_NAME`` in local git commands.
|
||||
|
||||
Fixing the Bug
|
||||
--------------
|
||||
|
||||
.. _synchronizing:
|
||||
|
||||
Synchronizing Local Master with Upstream Master
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
In your local git environment, there is a copy of the ``master`` branch in
|
||||
``remotes/origin/master``. This is called "local master". This copy of the
|
||||
master branch (https://github.com/your_github_id/ceph.git) is "frozen in time"
|
||||
at the moment that you cloned it, but the upstream repo
|
||||
(https://github.com/ceph/ceph.git, typically abbreviated to ``ceph/ceph.git``)
|
||||
that it was forked from is not frozen in time: the upstream repo is still being
|
||||
updated by other contributors.
|
||||
|
||||
Because upstream master is continually receiving updates from other
|
||||
contributors, your fork will drift farther and farther from the state of the
|
||||
upstream repo when you cloned it.
|
||||
|
||||
You must keep your fork's master branch synchronized with upstream master in
|
||||
order to reduce drift between your fork's master branch and the upstream master
|
||||
branch.
|
||||
|
||||
Here are the commands for keeping your fork synchronized with the
|
||||
upstream repository:
|
||||
|
||||
$ git push -u origin master
|
||||
|
||||
@ -124,7 +193,10 @@ Bugfix branch
|
||||
|
||||
Next, create a branch for the bugfix:
|
||||
|
||||
.. code::
|
||||
.. _bugfix_branch:
|
||||
|
||||
Creating a Bugfix branch
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
$ git checkout master
|
||||
$ git checkout -b fix_1
|
||||
@ -137,100 +209,177 @@ for long! You are now ready to modify the code.
|
||||
Fix bug locally
|
||||
---------------
|
||||
|
||||
At this point, change the status of the tracker issue to "In progress" to
|
||||
communicate to the other Ceph developers that you have begun working on a
|
||||
fix. If you don't have permission to change that field, your comment that
|
||||
you are working on the issue is sufficient.
|
||||
The first command (git checkout master) makes sure that the bugfix branch
|
||||
"fix_1" is created from the most recent state of the master branch of the
|
||||
upstream repository.
|
||||
|
||||
Possibly, your fix is very simple and requires only minimal testing.
|
||||
More likely, it will be an iterative process involving trial and error, not
|
||||
to mention skill. An explanation of how to fix bugs is beyond the
|
||||
scope of this document. Instead, we focus on the mechanics of the process
|
||||
in the context of the Ceph project.
|
||||
The second command (git checkout -b fix_1) creates a "bugfix branch" called
|
||||
"fix_1" in your local working copy of the repository. The changes that you make
|
||||
in order to fix the bug will be commited to this branch.
|
||||
|
||||
A detailed discussion of the tools available for validating your bugfixes,
|
||||
see the chapters on testing.
|
||||
The third command (git push -u origin fix_1) pushes the bugfix branch from
|
||||
your local working repository to your fork of the upstream repository.
|
||||
|
||||
For now, let us just assume that you have finished work on the bugfix and
|
||||
that you have tested it and believe it works. Commit the changes to your local
|
||||
branch using the ``--signoff`` option::
|
||||
.. _fixing_bug_locally:
|
||||
|
||||
$ git commit -as
|
||||
Fixing the bug in the local working copy
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
and push the changes to your fork::
|
||||
#. Updating the tracker
|
||||
|
||||
$ git push origin fix_1
|
||||
In the `Ceph issue tracker <https://tracker.ceph.com>`_, change the status
|
||||
of the tracker issue to "In progress". This communicates to other Ceph
|
||||
contributors that you have begun working on a fix, which helps to avoid
|
||||
duplication of effort. If you don't have permission to change that field,
|
||||
your comment that you are working on the issue is sufficient.
|
||||
|
||||
GitHub pull request
|
||||
-------------------
|
||||
#. Fixing the bug itself
|
||||
|
||||
This guide cannot tell you how to fix the bug that you have chosen to fix.
|
||||
This guide assumes that you know what required improvement, and that you
|
||||
know what to do to provide that improvement.
|
||||
|
||||
It might be that your fix is simple and requires only minimal testing. But
|
||||
that's unlikely. It is more likely that the process of fixing your bug will
|
||||
be iterative and will involve trial, error, skill, and patience.
|
||||
|
||||
For a detailed discussion of the tools available for validating bugfixes,
|
||||
see the chapters on testing.
|
||||
|
||||
Pushing the Fix to Your Fork
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
You have finished work on the bugfix. You have tested the bugfix, and you
|
||||
believe that it works.
|
||||
|
||||
#. Commit the changes to your local working copy.
|
||||
|
||||
Commit the changes to the `fix_1` branch of your local working copy by using
|
||||
the ``--signoff`` option (here represented as the `s` portion of the `-as`
|
||||
flag):
|
||||
|
||||
.. prompt:: bash $
|
||||
|
||||
git commit -as
|
||||
|
||||
.. _push_changes:
|
||||
|
||||
#. Push the changes to your fork:
|
||||
|
||||
Push the changes from the `fix_1` branch of your local working copy to the
|
||||
`fix_1` branch of your fork of the upstream repository:
|
||||
|
||||
.. prompt:: bash $
|
||||
|
||||
git push origin fix_1
|
||||
|
||||
.. note::
|
||||
|
||||
In the command `git push origin fix_1`, `origin` is the name of your fork
|
||||
of the upstream Ceph repository, and can be thought of as a nickname for
|
||||
`git@github.com:username/ceph.git`, where `username` is your GitHub
|
||||
username.
|
||||
|
||||
It is possible that `origin` is not the name of your fork. Discover the
|
||||
name of your fork by running `git remote -v`, as shown here:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
$ git remote -v
|
||||
ceph https://github.com/ceph/ceph.git (fetch)
|
||||
ceph https://github.com/ceph/ceph.git (push)
|
||||
origin git@github.com:username/ceph.git (fetch)
|
||||
origin git@github.com:username/ceph.git (push)
|
||||
|
||||
The line "origin git@github.com:username/ceph.git (fetch)" and the line
|
||||
"origin git@github.com:username/ceph.git (push)" provide the information
|
||||
that "origin" is the name of your fork of the Ceph repository.
|
||||
|
||||
The next step is to open a GitHub pull request. The purpose of this step is
|
||||
to make your bugfix available to the community of Ceph developers. They
|
||||
will review it and may do additional testing on it.
|
||||
|
||||
In short, this is the point where you "go public" with your modifications.
|
||||
Psychologically, you should be prepared to receive suggestions and
|
||||
constructive criticism. Don't worry! In our experience, the Ceph project is
|
||||
a friendly place!
|
||||
After you have pushed the bugfix to your fork, open a GitHub pull request
|
||||
(PR). This makes your bugfix visible to the community of Ceph contributors.
|
||||
They will review it. They may perform additional testing on your bugfix, and
|
||||
they might request changes to the bugfix.
|
||||
|
||||
If you are uncertain how to use pull requests, you may read
|
||||
`this GitHub pull request tutorial`_.
|
||||
Be prepared to receive suggestions and constructive criticism in the form of
|
||||
comments within the PR.
|
||||
|
||||
If you don't know how to create and manage pull requests, read `this GitHub
|
||||
pull request tutorial`_.
|
||||
|
||||
.. _`this GitHub pull request tutorial`:
|
||||
https://help.github.com/articles/using-pull-requests/
|
||||
|
||||
For some ideas on what constitutes a "good" pull request, see
|
||||
To learn what constitutes a "good" pull request, see
|
||||
the `Git Commit Good Practice`_ article at the `OpenStack Project Wiki`_.
|
||||
|
||||
.. _`Git Commit Good Practice`: https://wiki.openstack.org/wiki/GitCommitMessages
|
||||
.. _`OpenStack Project Wiki`: https://wiki.openstack.org/wiki/Main_Page
|
||||
|
||||
Once your pull request (PR) is opened, update the `Issue tracker`_ by
|
||||
adding a comment to the bug pointing the other developers to your PR. The
|
||||
update can be as simple as::
|
||||
See also our own `Submitting Patches
|
||||
<https://github.com/ceph/ceph/blob/master/SubmittingPatches.rst>`_ document.
|
||||
|
||||
After your pull request (PR) has been opened, update the `issue tracker
|
||||
<https://tracker.ceph.com>`_ by adding a comment directing other contributors
|
||||
to your PR. The comment can be as simple as this::
|
||||
|
||||
*PR*: https://github.com/ceph/ceph/pull/$NUMBER_OF_YOUR_PULL_REQUEST
|
||||
|
||||
Automated PR validation
|
||||
-----------------------
|
||||
|
||||
When your PR hits GitHub, the Ceph project's `Continuous Integration (CI)
|
||||
<https://en.wikipedia.org/wiki/Continuous_integration>`_
|
||||
infrastructure will test it automatically. At the time of this writing
|
||||
(March 2016), the automated CI testing included a test to check that the
|
||||
commits in the PR are properly signed (see `Submitting patches`_) and a
|
||||
`make check`_ test.
|
||||
When you create or update your PR, the Ceph project's `Continuous Integration
|
||||
(CI) <https://en.wikipedia.org/wiki/Continuous_integration>`_ infrastructure
|
||||
automatically tests it. At the time of this writing (May 2022), the automated
|
||||
CI testing included many tests. These five are among them:
|
||||
|
||||
The latter, `make check`_, builds the PR and runs it through a battery of
|
||||
tests. These tests run on machines operated by the Ceph Continuous
|
||||
Integration (CI) team. When the tests complete, the result will be shown
|
||||
on GitHub in the pull request itself.
|
||||
#. a test to check that the commits are properly signed (see `Submitting Patches <https://github.com/ceph/ceph/blob/master/SubmittingPatches.rst>`_):
|
||||
#. a test to check that the documentation builds
|
||||
#. a test to check that the submodules are unmodified
|
||||
#. a test to check that the API is in order
|
||||
#. a :ref:`make check<make-check>` test
|
||||
|
||||
You can (and should) also test your modifications before you open a PR.
|
||||
Refer to the chapters on testing for details.
|
||||
Additional tests may be run depending on which files your PR modifies.
|
||||
|
||||
The :ref:`make check<make-check>` test builds the PR and runs it through a
|
||||
battery of tests. These tests run on servers that are operated by the Ceph
|
||||
Continuous Integration (CI) team. When the tests have completed their run, the
|
||||
result is shown on GitHub in the pull request itself.
|
||||
|
||||
Test your modifications before you open a PR. Refer to the chapters
|
||||
on testing for details.
|
||||
|
||||
Notes on PR make check test
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
The GitHub `make check`_ test is driven by a Jenkins instance.
|
||||
The GitHub :ref:`make check<make-check>` test is driven by a Jenkins instance.
|
||||
|
||||
Jenkins merges the PR branch into the latest version of the base branch before
|
||||
starting the build, so you don't have to rebase the PR to pick up any fixes.
|
||||
Jenkins merges your PR branch into the latest version of the base branch before
|
||||
it starts any tests. This means that you don't have to rebase the PR in order
|
||||
to pick up any fixes.
|
||||
|
||||
You can trigger the PR tests at any time by adding a comment to the PR - the
|
||||
comment should contain the string "test this please". Since a human subscribed
|
||||
to the PR might interpret that as a request for him or her to test the PR,
|
||||
it's good to write the request as "Jenkins, test this please".
|
||||
You can trigger PR tests at any time by adding a comment to the PR - the
|
||||
comment should contain the string "test this please". Since a human who is
|
||||
subscribed to the PR might interpret that as a request for him or her to test
|
||||
the PR, you must address Jenkins directly. For example, write "jenkins retest
|
||||
this please". If you need to run only one of the tests, you can request it with
|
||||
a command like "jenkins test signed". A list of these requests is automatically
|
||||
added to the end of each new PR's description, so check there to find the
|
||||
single test you need.
|
||||
|
||||
The `make check`_ log is the place to go if there is a failure and you're not
|
||||
sure what caused it. To reach it, first click on "details" (next to the `make
|
||||
check`_ test in the PR) to get into the Jenkins web GUI, and then click on
|
||||
"Console Output" (on the left).
|
||||
If there is a build failure and you aren't sure what caused it, check the
|
||||
:ref:`make check<make-check>` log. To access the make check log, click the
|
||||
"details" (next to the :ref:`make check<make-check>` test in the PR) link to
|
||||
enter the Jenkins web GUI. Then click "Console Output" (on the left).
|
||||
|
||||
Jenkins is set up to grep the log for strings known to have been associated
|
||||
with `make check`_ failures in the past. However, there is no guarantee that
|
||||
the strings are associated with any given `make check`_ failure. You have to
|
||||
dig into the log to be sure.
|
||||
Jenkins is configured to search logs for strings that are known to have been
|
||||
associated with :ref:`make check<make-check>` failures in the past. However,
|
||||
there is no guarantee that these known strings are associated with any given
|
||||
:ref:`make check<make-check>` failure. You'll have to read through the log to
|
||||
determine the cause of your specific failure.
|
||||
|
||||
Integration tests AKA ceph-qa-suite
|
||||
-----------------------------------
|
||||
@ -245,16 +394,16 @@ sub-directory`_ and are run via the `teuthology framework`_.
|
||||
.. _`teuthology framework`: https://github.com/ceph/teuthology
|
||||
|
||||
The Ceph community has access to the `Sepia lab
|
||||
<https://wiki.sepia.ceph.com/doku.php>`_ where `integration tests`_ can be
|
||||
run on real hardware. Other developers may add tags like "needs-qa" to your
|
||||
PR. This allows PRs that need testing to be merged into a single branch and
|
||||
tested all at the same time. Since teuthology suites can take hours (even
|
||||
days in some cases) to run, this can save a lot of time.
|
||||
<https://wiki.sepia.ceph.com/doku.php>`_ where :ref:`integration
|
||||
tests<integration-tests>` can be run on real hardware. Other developers may add
|
||||
tags like "needs-qa" to your PR. This allows PRs that need testing to be merged
|
||||
into a single branch and tested all at the same time. Since teuthology suites
|
||||
can take hours (even days in some cases) to run, this can save a lot of time.
|
||||
|
||||
To request access to the Sepia lab, start `here <https://wiki.sepia.ceph.com/doku.php?id=vpnaccess>`_.
|
||||
|
||||
Integration testing is discussed in more detail in the `integration testing`_
|
||||
chapter.
|
||||
Integration testing is discussed in more detail in the :ref:`integration
|
||||
tests<integration-tests>` chapter.
|
||||
|
||||
Code review
|
||||
-----------
|
||||
@ -262,7 +411,7 @@ Code review
|
||||
Once your bugfix has been thoroughly tested, or even during this process,
|
||||
it will be subjected to code review by other developers. This typically
|
||||
takes the form of correspondence in the PR itself, but can be supplemented
|
||||
by discussions on `IRC`_ and the `Mailing list`_.
|
||||
by discussions on :ref:`IRC<irc>` and the :ref:`mailing-list-subscribe`.
|
||||
|
||||
Amending your PR
|
||||
----------------
|
||||
@ -290,17 +439,9 @@ Merge
|
||||
The bugfixing process culminates when one of the project leads decides to
|
||||
merge your PR.
|
||||
|
||||
When this happens, it is a signal for you (or the lead who merged the PR)
|
||||
to change the `Issue tracker`_ status to "Resolved". Some issues may be
|
||||
flagged for backporting, in which case the status should be changed to
|
||||
"Pending Backport" (see the `Backporting`_ chapter for details).
|
||||
When this happens, it is a signal for you (or the lead who merged the PR) to
|
||||
change the `Ceph issue tracker <https://tracker.ceph.com>`_ status to
|
||||
"Resolved". Some issues may be flagged for backporting, in which case the
|
||||
status should be changed to "Pending Backport" (see the
|
||||
:ref:`Backporting<backporting>` chapter for details).
|
||||
|
||||
|
||||
.. _make check:
|
||||
.. _Backporting: ../essentials/#backporting
|
||||
.. _IRC: ../essentials/#irc
|
||||
.. _Issue Tracker: ../issue-tracker
|
||||
.. _Integration Tests: ../tests-integration-tests
|
||||
.. _Integration Testing: ../tests-integration-tests
|
||||
.. _Mailing List: ../essentials/#mailing-list
|
||||
.. _Submitting Patches: ../essentials/#submitting-patches
|
||||
|
@ -89,6 +89,8 @@ click on `New issue`_.
|
||||
.. _`jump to the Ceph project`: http://tracker.ceph.com/projects/ceph
|
||||
.. _`New issue`: http://tracker.ceph.com/projects/ceph/issues/new
|
||||
|
||||
.. _mailing-list-subscribe:
|
||||
|
||||
Mailing list
|
||||
------------
|
||||
|
||||
@ -129,6 +131,7 @@ There are also `other Ceph-related mailing lists`_.
|
||||
|
||||
.. _`other Ceph-related mailing lists`: https://ceph.com/irc/
|
||||
|
||||
.. _irc:
|
||||
|
||||
IRC
|
||||
---
|
||||
@ -225,6 +228,8 @@ Kubernetes/Rook development cluster
|
||||
|
||||
See :ref:`kubernetes-dev`
|
||||
|
||||
.. _backporting:
|
||||
|
||||
Backporting
|
||||
-----------
|
||||
|
||||
|
@ -1,3 +1,5 @@
|
||||
.. _issue tracker:
|
||||
|
||||
Issue Tracker
|
||||
=============
|
||||
|
||||
|
@ -1,12 +1,14 @@
|
||||
.. _integration-tests:
|
||||
|
||||
Testing - Integration Tests
|
||||
===========================
|
||||
|
||||
Ceph has two types of tests: `make check`_ tests and integration tests.
|
||||
When a test requires multiple machines, root access or lasts for a
|
||||
longer time (for example, to simulate a realistic Ceph deployment), it
|
||||
is deemed to be an integration test. Integration tests are organized into
|
||||
"suites", which are defined in the `ceph/qa sub-directory`_ and run with
|
||||
the ``teuthology-suite`` command.
|
||||
Ceph has two types of tests: :ref:`make check<make-check>` tests and
|
||||
integration tests. When a test requires multiple machines, root access or
|
||||
lasts for a longer time (for example, to simulate a realistic Ceph deployment),
|
||||
it is deemed to be an integration test. Integration tests are organized into
|
||||
"suites", which are defined in the `ceph/qa sub-directory`_ and run with the
|
||||
``teuthology-suite`` command.
|
||||
|
||||
The ``teuthology-suite`` command is part of the `teuthology framework`_.
|
||||
In the sections that follow we attempt to provide a detailed introduction
|
||||
@ -491,7 +493,6 @@ test will be first.
|
||||
|
||||
.. _ceph/qa sub-directory: https://github.com/ceph/ceph/tree/master/qa
|
||||
.. _Integration testing: testing-integration-tests
|
||||
.. _make check:
|
||||
.. _Sepia Lab: https://wiki.sepia.ceph.com/doku.php
|
||||
.. _teuthology repository: https://github.com/ceph/teuthology
|
||||
.. _teuthology framework: https://github.com/ceph/teuthology
|
||||
|
@ -1,18 +1,20 @@
|
||||
Testing - unit tests
|
||||
====================
|
||||
|
||||
Ceph has two types of tests: unit tests (also called `make check`_ tests) and
|
||||
integration tests. Strictly speaking, the `make check`_ tests are not "unit
|
||||
tests", but rather tests that can be run easily on a single build machine
|
||||
after compiling Ceph from source, whereas integration tests require packages
|
||||
and multi-machine clusters to run.
|
||||
Ceph has two types of tests: unit tests (also called :ref:`make
|
||||
check<make-check>` tests) and integration tests. Strictly speaking, the
|
||||
:ref:`make check<make-check>` tests are not "unit tests", but rather tests that
|
||||
can be run easily on a single build machine after compiling Ceph from source,
|
||||
whereas integration tests require packages and multi-machine clusters to run.
|
||||
|
||||
.. _make-check:
|
||||
|
||||
What does "make check" mean?
|
||||
----------------------------
|
||||
|
||||
After compiling Ceph, the code can be run through a battery of tests covering
|
||||
various aspects of Ceph. For historical reasons, this battery of tests is
|
||||
often referred to as `make check`_ even though the actual command used to run
|
||||
often referred to as :ref:`make check<make-check>` even though the actual command used to run
|
||||
the tests is now ``ctest``. For inclusion in this battery of tests, a test
|
||||
must:
|
||||
|
||||
@ -55,8 +57,8 @@ Unit testing of CLI tools
|
||||
Some of the CLI tools are tested using special files ending with the extension
|
||||
``.t`` and stored under ``./src/test/cli``. These tests are run using a tool
|
||||
called `cram`_ via a shell script ``./src/test/run-cli-tests``. `cram`_ tests
|
||||
that are not suitable for `make check`_ may also be run by teuthology using
|
||||
the `cram task`_.
|
||||
that are not suitable for :ref:`make check<make-check>` may also be run by
|
||||
teuthology using the `cram task`_.
|
||||
|
||||
.. _`cram`: https://bitheap.org/cram/
|
||||
.. _`cram task`: https://github.com/ceph/ceph/blob/master/qa/tasks/cram.py
|
||||
@ -120,5 +122,4 @@ Unit test caveats
|
||||
linked against something else. This enables tools like valgrind to be used
|
||||
in the tests.
|
||||
|
||||
.. _make check:
|
||||
.. _teuthology framework: https://github.com/ceph/teuthology
|
||||
|
@ -871,6 +871,15 @@ Per mapping (block device) `rbd device map` options:
|
||||
mode (since 5.11). If the daemon denies 'secure' mode in favor of 'crc'
|
||||
mode, agree to 'crc' mode.
|
||||
|
||||
* rxbounce - Use a bounce buffer when receiving data (since 5.17). The default
|
||||
behaviour is to read directly into the destination buffer. A bounce buffer
|
||||
is needed if the destination buffer isn't guaranteed to be stable (i.e. remain
|
||||
unchanged while it is being read to). In particular this is the case for
|
||||
Windows where a system-wide "dummy" (throwaway) page may be mapped into the
|
||||
destination buffer in order to generate a single large I/O. Otherwise,
|
||||
"libceph: ... bad crc/signature" or "libceph: ... integrity error, bad crc"
|
||||
errors and associated performance degradation are expected.
|
||||
|
||||
* udev - Wait for udev device manager to finish executing all matching
|
||||
"add" rules and release the device before exiting (default). This option
|
||||
is not passed to the kernel.
|
||||
|
@ -168,11 +168,11 @@ drive statistics, special series are output like this:
|
||||
|
||||
::
|
||||
|
||||
ceph_disk_occupation{ceph_daemon="osd.0",device="sdd", exported_instance="myhost"}
|
||||
ceph_disk_occupation_human{ceph_daemon="osd.0", device="sdd", exported_instance="myhost"}
|
||||
|
||||
To use this to get disk statistics by OSD ID, use either the ``and`` operator or
|
||||
the ``*`` operator in your prometheus query. All metadata metrics (like ``
|
||||
ceph_disk_occupation`` have the value 1 so they act neutral with ``*``. Using ``*``
|
||||
ceph_disk_occupation_human`` have the value 1 so they act neutral with ``*``. Using ``*``
|
||||
allows to use ``group_left`` and ``group_right`` grouping modifiers, so that
|
||||
the resulting metric has additional labels from one side of the query.
|
||||
|
||||
@ -185,13 +185,24 @@ The goal is to run a query like
|
||||
|
||||
::
|
||||
|
||||
rate(node_disk_bytes_written[30s]) and on (device,instance) ceph_disk_occupation{ceph_daemon="osd.0"}
|
||||
rate(node_disk_bytes_written[30s]) and
|
||||
on (device,instance) ceph_disk_occupation_human{ceph_daemon="osd.0"}
|
||||
|
||||
Out of the box the above query will not return any metrics since the ``instance`` labels of
|
||||
both metrics don't match. The ``instance`` label of ``ceph_disk_occupation``
|
||||
both metrics don't match. The ``instance`` label of ``ceph_disk_occupation_human``
|
||||
will be the currently active MGR node.
|
||||
|
||||
The following two section outline two approaches to remedy this.
|
||||
The following two section outline two approaches to remedy this.
|
||||
|
||||
.. note::
|
||||
|
||||
If you need to group on the `ceph_daemon` label instead of `device` and
|
||||
`instance` labels, using `ceph_disk_occupation_human` may not work reliably.
|
||||
It is advised that you use `ceph_disk_occupation` instead.
|
||||
|
||||
The difference is that `ceph_disk_occupation_human` may group several OSDs
|
||||
into the value of a single `ceph_daemon` label in cases where multiple OSDs
|
||||
share a disk.
|
||||
|
||||
Use label_replace
|
||||
=================
|
||||
@ -204,7 +215,13 @@ To correlate an OSD and its disks write rate, the following query can be used:
|
||||
|
||||
::
|
||||
|
||||
label_replace(rate(node_disk_bytes_written[30s]), "exported_instance", "$1", "instance", "(.*):.*") and on (device,exported_instance) ceph_disk_occupation{ceph_daemon="osd.0"}
|
||||
label_replace(
|
||||
rate(node_disk_bytes_written[30s]),
|
||||
"exported_instance",
|
||||
"$1",
|
||||
"instance",
|
||||
"(.*):.*"
|
||||
) and on (device, exported_instance) ceph_disk_occupation_human{ceph_daemon="osd.0"}
|
||||
|
||||
Configuring Prometheus server
|
||||
=============================
|
||||
|
@ -40,6 +40,8 @@ The following table describes the support status for current Amazon S3 functiona
|
||||
+---------------------------------+-----------------+----------------------------------------+
|
||||
| **Bucket Lifecycle** | Supported | |
|
||||
+---------------------------------+-----------------+----------------------------------------+
|
||||
| **Bucket Replication** | Partial | Only permitted across zones |
|
||||
+---------------------------------+-----------------+----------------------------------------+
|
||||
| **Policy (Buckets, Objects)** | Supported | ACLs & bucket policies are supported |
|
||||
+---------------------------------+-----------------+----------------------------------------+
|
||||
| **Bucket Website** | Supported | |
|
||||
|
@ -303,7 +303,7 @@ For example::
|
||||
|
||||
$ rbd --cluster site-a mirror image snapshot image-pool/image-1
|
||||
|
||||
By default only ``3`` mirror-snapshots will be created per-image. The most
|
||||
By default up to ``5`` mirror-snapshots will be created per-image. The most
|
||||
recent mirror-snapshot is automatically pruned if the limit is reached.
|
||||
The limit can be overridden via the ``rbd_mirroring_max_mirroring_snapshots``
|
||||
configuration option if required. Additionally, mirror-snapshots are
|
||||
|
@ -63,29 +63,31 @@ Memory
|
||||
======
|
||||
|
||||
Bluestore uses its own memory to cache data rather than relying on the
|
||||
operating system page cache. In bluestore you can adjust the amount of memory
|
||||
the OSD attempts to consume with the ``osd_memory_target`` configuration
|
||||
option.
|
||||
operating system's page cache. In Bluestore you can adjust the amount of memory
|
||||
that the OSD attempts to consume by changing the `osd_memory_target`
|
||||
configuration option.
|
||||
|
||||
- Setting the osd_memory_target below 2GB is typically not recommended (it may
|
||||
fail to keep the memory that low and may also cause extremely slow performance.
|
||||
- Setting the `osd_memory_target` below 2GB is typically not
|
||||
recommended (Ceph may fail to keep the memory consumption under 2GB and
|
||||
this may cause extremely slow performance).
|
||||
|
||||
- Setting the memory target between 2GB and 4GB typically works but may result
|
||||
in degraded performance as metadata may be read from disk during IO unless the
|
||||
active data set is relatively small.
|
||||
|
||||
- 4GB is the current default osd_memory_target size and was set that way to try
|
||||
and balance memory requirements and OSD performance for typical use cases.
|
||||
- 4GB is the current default `osd_memory_target` size. This default
|
||||
was chosen for typical use cases, and is intended to balance memory
|
||||
requirements and OSD performance.
|
||||
|
||||
- Setting the osd_memory_target higher than 4GB may improve performance when
|
||||
there are many (small) objects or large (256GB/OSD or more) data sets being
|
||||
processed.
|
||||
- Setting the `osd_memory_target` higher than 4GB can improve
|
||||
performance when there many (small) objects or when large (256GB/OSD
|
||||
or more) data sets are processed.
|
||||
|
||||
.. important:: The OSD memory autotuning is "best effort". While the OSD may
|
||||
unmap memory to allow the kernel to reclaim it, there is no guarantee that
|
||||
the kernel will actually reclaim freed memory within any specific time
|
||||
frame. This is especially true in older versions of Ceph where transparent
|
||||
huge pages can prevent the kernel from reclaiming memory freed from
|
||||
the kernel will actually reclaim freed memory within a specific time
|
||||
frame. This applies especially in older versions of Ceph, where transparent
|
||||
huge pages can prevent the kernel from reclaiming memory that was freed from
|
||||
fragmented huge pages. Modern versions of Ceph disable transparent huge
|
||||
pages at the application level to avoid this, though that still does not
|
||||
guarantee that the kernel will immediately reclaim unmapped memory. The OSD
|
||||
@ -95,9 +97,10 @@ option.
|
||||
kernel. That value may be more or less than needed depending on the exact
|
||||
configuration of the system.
|
||||
|
||||
When using the legacy FileStore backend, the page cache is used for caching
|
||||
data, so no tuning is normally needed, and the OSD memory consumption is
|
||||
generally related to the number of PGs per daemon in the system.
|
||||
When using the legacy FileStore back end, the page cache is used for caching
|
||||
data, so no tuning is normally needed. When using the legacy FileStore backend,
|
||||
the OSD memory consumption is related to the number of PGs per daemon in the
|
||||
system.
|
||||
|
||||
|
||||
Data Storage
|
||||
|
@ -38,8 +38,8 @@ required when running Ceph File System clients.
|
||||
``ceph-osd``) stores data, handles data replication, recovery,
|
||||
rebalancing, and provides some monitoring information to Ceph
|
||||
Monitors and Managers by checking other Ceph OSD Daemons for a
|
||||
heartbeat. At least 3 Ceph OSDs are normally required for redundancy
|
||||
and high availability.
|
||||
heartbeat. At least three Ceph OSDs are normally required for
|
||||
redundancy and high availability.
|
||||
|
||||
- **MDSs**: A :term:`Ceph Metadata Server` (MDS, ``ceph-mds``) stores
|
||||
metadata on behalf of the :term:`Ceph File System` (i.e., Ceph Block
|
||||
|
@ -798,7 +798,7 @@
|
||||
"steppedLine": false,
|
||||
"targets": [
|
||||
{
|
||||
"expr": "label_replace(\n (\n irate(node_disk_writes_completed{instance=~\"($ceph_hosts)([\\\\.:].*)?\"}[5m]) or\n irate(node_disk_writes_completed_total{instance=~\"($ceph_hosts)([\\\\.:].*)?\"}[5m])\n ),\n \"instance\",\n \"$1\",\n \"instance\",\n \"([^:.]*).*\"\n)\n* on(instance, device, ceph_daemon) group_left\n label_replace(\n label_replace(\n ceph_disk_occupation,\n \"device\",\n \"$1\",\n \"device\",\n \"/dev/(.*)\"\n ),\n \"instance\",\n \"$1\",\n \"instance\",\n \"([^:.]*).*\"\n )",
|
||||
"expr": "label_replace(\n (\n irate(node_disk_writes_completed{instance=~\"($ceph_hosts)([\\\\.:].*)?\"}[5m]) or\n irate(node_disk_writes_completed_total{instance=~\"($ceph_hosts)([\\\\.:].*)?\"}[5m])\n ),\n \"instance\",\n \"$1\",\n \"instance\",\n \"([^:.]*).*\"\n)\n* on(instance, device) group_left(ceph_daemon)\n label_replace(\n label_replace(\n ceph_disk_occupation_human,\n \"device\",\n \"$1\",\n \"device\",\n \"/dev/(.*)\"\n ),\n \"instance\",\n \"$1\",\n \"instance\",\n \"([^:.]*).*\"\n )",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "{{device}}({{ceph_daemon}}) writes",
|
||||
@ -807,7 +807,7 @@
|
||||
"textEditor": true
|
||||
},
|
||||
{
|
||||
"expr": "label_replace(\n (irate(node_disk_reads_completed{instance=~\"($ceph_hosts)([\\\\.:].*)?\"}[5m]) or irate(node_disk_reads_completed_total{instance=~\"($ceph_hosts)([\\\\.:].*)?\"}[5m])),\n \"instance\",\n \"$1\",\n \"instance\",\n \"([^:.]*).*\"\n)\n* on(instance, device, ceph_daemon) group_left\n label_replace(\n label_replace(\n ceph_disk_occupation,\n \"device\",\n \"$1\",\n \"device\",\n \"/dev/(.*)\"\n ),\n \"instance\",\n \"$1\",\n \"instance\",\n \"([^:.]*).*\"\n )",
|
||||
"expr": "label_replace(\n (irate(node_disk_reads_completed{instance=~\"($ceph_hosts)([\\\\.:].*)?\"}[5m]) or irate(node_disk_reads_completed_total{instance=~\"($ceph_hosts)([\\\\.:].*)?\"}[5m])),\n \"instance\",\n \"$1\",\n \"instance\",\n \"([^:.]*).*\"\n)\n* on(instance, device) group_left(ceph_daemon)\n label_replace(\n label_replace(\n ceph_disk_occupation_human,\n \"device\",\n \"$1\",\n \"device\",\n \"/dev/(.*)\"\n ),\n \"instance\",\n \"$1\",\n \"instance\",\n \"([^:.]*).*\"\n )",
|
||||
"format": "time_series",
|
||||
"hide": false,
|
||||
"intervalFactor": 1,
|
||||
@ -899,14 +899,14 @@
|
||||
"steppedLine": false,
|
||||
"targets": [
|
||||
{
|
||||
"expr": "label_replace((irate(node_disk_bytes_written{instance=~\"($ceph_hosts)([\\\\.:].*)?\"}[5m]) or irate(node_disk_written_bytes_total{instance=~\"($ceph_hosts)([\\\\.:].*)?\"}[5m])), \"instance\", \"$1\", \"instance\", \"([^:.]*).*\") * on(instance, device, ceph_daemon) group_left label_replace(label_replace(ceph_disk_occupation, \"device\", \"$1\", \"device\", \"/dev/(.*)\"), \"instance\", \"$1\", \"instance\", \"([^:.]*).*\")",
|
||||
"expr": "label_replace((irate(node_disk_bytes_written{instance=~\"($ceph_hosts)([\\\\.:].*)?\"}[5m]) or irate(node_disk_written_bytes_total{instance=~\"($ceph_hosts)([\\\\.:].*)?\"}[5m])), \"instance\", \"$1\", \"instance\", \"([^:.]*).*\") * on(instance, device) group_left(ceph_daemon) label_replace(label_replace(ceph_disk_occupation_human, \"device\", \"$1\", \"device\", \"/dev/(.*)\"), \"instance\", \"$1\", \"instance\", \"([^:.]*).*\")",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "{{device}}({{ceph_daemon}}) write",
|
||||
"refId": "B"
|
||||
},
|
||||
{
|
||||
"expr": "label_replace((irate(node_disk_bytes_read{instance=~\"($ceph_hosts)([\\\\.:].*)?\"}[5m]) or irate(node_disk_read_bytes_total{instance=~\"($ceph_hosts)([\\\\.:].*)?\"}[5m])), \"instance\", \"$1\", \"instance\", \"([^:.]*).*\") * on(instance, device, ceph_daemon) group_left label_replace(label_replace(ceph_disk_occupation, \"device\", \"$1\", \"device\", \"/dev/(.*)\"), \"instance\", \"$1\", \"instance\", \"([^:.]*).*\")",
|
||||
"expr": "label_replace((irate(node_disk_bytes_read{instance=~\"($ceph_hosts)([\\\\.:].*)?\"}[5m]) or irate(node_disk_read_bytes_total{instance=~\"($ceph_hosts)([\\\\.:].*)?\"}[5m])), \"instance\", \"$1\", \"instance\", \"([^:.]*).*\") * on(instance, device) group_left(ceph_daemon) label_replace(label_replace(ceph_disk_occupation_human, \"device\", \"$1\", \"device\", \"/dev/(.*)\"), \"instance\", \"$1\", \"instance\", \"([^:.]*).*\")",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "{{device}}({{ceph_daemon}}) read",
|
||||
@ -992,7 +992,7 @@
|
||||
"steppedLine": false,
|
||||
"targets": [
|
||||
{
|
||||
"expr": "max by(instance,device) (label_replace((irate(node_disk_write_time_seconds_total{ instance=~\"($ceph_hosts)([\\\\.:].*)?\"}[5m]) ) / clamp_min(irate(node_disk_writes_completed_total{ instance=~\"($ceph_hosts)([\\\\.:].*)?\"}[5m]), 0.001) or (irate(node_disk_read_time_seconds_total{ instance=~\"($ceph_hosts)([\\\\.:].*)?\"}[5m]) ) / clamp_min(irate(node_disk_reads_completed_total{ instance=~\"($ceph_hosts)([\\\\.:].*)?\"}[5m]), 0.001), \"instance\", \"$1\", \"instance\", \"([^:.]*).*\")) * on(instance, device, ceph_daemon) group_left label_replace(label_replace(ceph_disk_occupation{instance=~\"($ceph_hosts)([\\\\.:].*)?\"}, \"device\", \"$1\", \"device\", \"/dev/(.*)\"), \"instance\", \"$1\", \"instance\", \"([^:.]*).*\")",
|
||||
"expr": "max by(instance,device) (label_replace((irate(node_disk_write_time_seconds_total{ instance=~\"($ceph_hosts)([\\\\.:].*)?\"}[5m]) ) / clamp_min(irate(node_disk_writes_completed_total{ instance=~\"($ceph_hosts)([\\\\.:].*)?\"}[5m]), 0.001) or (irate(node_disk_read_time_seconds_total{ instance=~\"($ceph_hosts)([\\\\.:].*)?\"}[5m]) ) / clamp_min(irate(node_disk_reads_completed_total{ instance=~\"($ceph_hosts)([\\\\.:].*)?\"}[5m]), 0.001), \"instance\", \"$1\", \"instance\", \"([^:.]*).*\")) * on(instance, device) group_left(ceph_daemon) label_replace(label_replace(ceph_disk_occupation_human{instance=~\"($ceph_hosts)([\\\\.:].*)?\"}, \"device\", \"$1\", \"device\", \"/dev/(.*)\"), \"instance\", \"$1\", \"instance\", \"([^:.]*).*\")",
|
||||
"format": "time_series",
|
||||
"hide": false,
|
||||
"intervalFactor": 1,
|
||||
@ -1083,7 +1083,7 @@
|
||||
"steppedLine": false,
|
||||
"targets": [
|
||||
{
|
||||
"expr": "label_replace(((irate(node_disk_io_time_ms{instance=~\"($ceph_hosts)([\\\\.:].*)?\"}[5m]) / 10 ) or irate(node_disk_io_time_seconds_total{instance=~\"($ceph_hosts)([\\\\.:].*)?\"}[5m]) * 100), \"instance\", \"$1\", \"instance\", \"([^:.]*).*\") * on(instance, device, ceph_daemon) group_left label_replace(label_replace(ceph_disk_occupation{instance=~\"($ceph_hosts)([\\\\.:].*)?\"}, \"device\", \"$1\", \"device\", \"/dev/(.*)\"), \"instance\", \"$1\", \"instance\", \"([^:.]*).*\")",
|
||||
"expr": "label_replace(((irate(node_disk_io_time_ms{instance=~\"($ceph_hosts)([\\\\.:].*)?\"}[5m]) / 10 ) or irate(node_disk_io_time_seconds_total{instance=~\"($ceph_hosts)([\\\\.:].*)?\"}[5m]) * 100), \"instance\", \"$1\", \"instance\", \"([^:.]*).*\") * on(instance, device) group_left(ceph_daemon) label_replace(label_replace(ceph_disk_occupation_human{instance=~\"($ceph_hosts)([\\\\.:].*)?\"}, \"device\", \"$1\", \"device\", \"/dev/(.*)\"), \"instance\", \"$1\", \"instance\", \"([^:.]*).*\")",
|
||||
"format": "time_series",
|
||||
"hide": false,
|
||||
"intervalFactor": 1,
|
||||
|
@ -431,7 +431,7 @@
|
||||
"tableColumn": "",
|
||||
"targets": [
|
||||
{
|
||||
"expr" : "avg (\n label_replace((irate(node_disk_io_time_ms[5m]) / 10 ) or\n (irate(node_disk_io_time_seconds_total[5m]) * 100), \"instance\", \"$1\", \"instance\", \"([^.:]*).*\"\n ) *\n on(instance, device, ceph_daemon) label_replace(label_replace(ceph_disk_occupation{instance=~\"($osd_hosts).*\"}, \"device\", \"$1\", \"device\", \"/dev/(.*)\"), \"instance\", \"$1\", \"instance\", \"([^.:]*).*\")\n)",
|
||||
"expr": "avg (\n label_replace((irate(node_disk_io_time_ms[5m]) / 10 ) or\n (irate(node_disk_io_time_seconds_total[5m]) * 100), \"instance\", \"$1\", \"instance\", \"([^.:]*).*\"\n ) *\n on(instance, device) group_left(ceph_daemon) label_replace(label_replace(ceph_disk_occupation_human{instance=~\"($osd_hosts).*\"}, \"device\", \"$1\", \"device\", \"/dev/(.*)\"), \"instance\", \"$1\", \"instance\", \"([^.:]*).*\")\n)",
|
||||
"format": "time_series",
|
||||
"instant": true,
|
||||
"intervalFactor": 1,
|
||||
|
@ -390,14 +390,14 @@
|
||||
"steppedLine": false,
|
||||
"targets": [
|
||||
{
|
||||
"expr": "(label_replace(irate(node_disk_read_time_seconds_total[1m]) / irate(node_disk_reads_completed_total[1m]), \"instance\", \"$1\", \"instance\", \"([^:.]*).*\") and on (instance, device) label_replace(label_replace(ceph_disk_occupation{ceph_daemon=~\"$osd\"}, \"device\", \"$1\", \"device\", \"/dev/(.*)\"), \"instance\", \"$1\", \"instance\", \"([^:.]*).*\"))",
|
||||
"expr": "(label_replace(irate(node_disk_read_time_seconds_total[1m]) / irate(node_disk_reads_completed_total[1m]), \"instance\", \"$1\", \"instance\", \"([^:.]*).*\") and on (instance, device) label_replace(label_replace(ceph_disk_occupation_human{ceph_daemon=~\"$osd\"}, \"device\", \"$1\", \"device\", \"/dev/(.*)\"), \"instance\", \"$1\", \"instance\", \"([^:.]*).*\"))",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "{{instance}}/{{device}} Reads",
|
||||
"refId": "A"
|
||||
},
|
||||
{
|
||||
"expr": "(label_replace(irate(node_disk_write_time_seconds_total[1m]) / irate(node_disk_writes_completed_total[1m]), \"instance\", \"$1\", \"instance\", \"([^:.]*).*\") and on (instance, device) label_replace(label_replace(ceph_disk_occupation{ceph_daemon=~\"$osd\"}, \"device\", \"$1\", \"device\", \"/dev/(.*)\"), \"instance\", \"$1\", \"instance\", \"([^:.]*).*\"))",
|
||||
"expr": "(label_replace(irate(node_disk_write_time_seconds_total[1m]) / irate(node_disk_writes_completed_total[1m]), \"instance\", \"$1\", \"instance\", \"([^:.]*).*\") and on (instance, device) label_replace(label_replace(ceph_disk_occupation_human{ceph_daemon=~\"$osd\"}, \"device\", \"$1\", \"device\", \"/dev/(.*)\"), \"instance\", \"$1\", \"instance\", \"([^:.]*).*\"))",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "{{instance}}/{{device}} Writes",
|
||||
@ -486,14 +486,14 @@
|
||||
"steppedLine": false,
|
||||
"targets": [
|
||||
{
|
||||
"expr": "label_replace(irate(node_disk_writes_completed_total[1m]), \"instance\", \"$1\", \"instance\", \"([^:.]*).*\") and on (instance, device) label_replace(label_replace(ceph_disk_occupation{ceph_daemon=~\"$osd\"}, \"device\", \"$1\", \"device\", \"/dev/(.*)\"), \"instance\", \"$1\", \"instance\", \"([^:.]*).*\")",
|
||||
"expr": "label_replace(irate(node_disk_writes_completed_total[1m]), \"instance\", \"$1\", \"instance\", \"([^:.]*).*\") and on (instance, device) label_replace(label_replace(ceph_disk_occupation_human{ceph_daemon=~\"$osd\"}, \"device\", \"$1\", \"device\", \"/dev/(.*)\"), \"instance\", \"$1\", \"instance\", \"([^:.]*).*\")",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "{{device}} on {{instance}} Writes",
|
||||
"refId": "A"
|
||||
},
|
||||
{
|
||||
"expr": "label_replace(irate(node_disk_reads_completed_total[1m]), \"instance\", \"$1\", \"instance\", \"([^:.]*).*\") and on (instance, device) label_replace(label_replace(ceph_disk_occupation{ceph_daemon=~\"$osd\"}, \"device\", \"$1\", \"device\", \"/dev/(.*)\"), \"instance\", \"$1\", \"instance\", \"([^:.]*).*\")",
|
||||
"expr": "label_replace(irate(node_disk_reads_completed_total[1m]), \"instance\", \"$1\", \"instance\", \"([^:.]*).*\") and on (instance, device) label_replace(label_replace(ceph_disk_occupation_human{ceph_daemon=~\"$osd\"}, \"device\", \"$1\", \"device\", \"/dev/(.*)\"), \"instance\", \"$1\", \"instance\", \"([^:.]*).*\")",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "{{device}} on {{instance}} Reads",
|
||||
@ -582,14 +582,14 @@
|
||||
"steppedLine": false,
|
||||
"targets": [
|
||||
{
|
||||
"expr": "label_replace(irate(node_disk_read_bytes_total[1m]), \"instance\", \"$1\", \"instance\", \"([^:.]*).*\") and on (instance, device) label_replace(label_replace(ceph_disk_occupation{ceph_daemon=~\"$osd\"}, \"device\", \"$1\", \"device\", \"/dev/(.*)\"), \"instance\", \"$1\", \"instance\", \"([^:.]*).*\")",
|
||||
"expr": "label_replace(irate(node_disk_read_bytes_total[1m]), \"instance\", \"$1\", \"instance\", \"([^:.]*).*\") and on (instance, device) label_replace(label_replace(ceph_disk_occupation_human{ceph_daemon=~\"$osd\"}, \"device\", \"$1\", \"device\", \"/dev/(.*)\"), \"instance\", \"$1\", \"instance\", \"([^:.]*).*\")",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "{{instance}} {{device}} Reads",
|
||||
"refId": "A"
|
||||
},
|
||||
{
|
||||
"expr": "label_replace(irate(node_disk_written_bytes_total[1m]), \"instance\", \"$1\", \"instance\", \"([^:.]*).*\") and on (instance, device) label_replace(label_replace(ceph_disk_occupation{ceph_daemon=~\"$osd\"}, \"device\", \"$1\", \"device\", \"/dev/(.*)\"), \"instance\", \"$1\", \"instance\", \"([^:.]*).*\")",
|
||||
"expr": "label_replace(irate(node_disk_written_bytes_total[1m]), \"instance\", \"$1\", \"instance\", \"([^:.]*).*\") and on (instance, device) label_replace(label_replace(ceph_disk_occupation_human{ceph_daemon=~\"$osd\"}, \"device\", \"$1\", \"device\", \"/dev/(.*)\"), \"instance\", \"$1\", \"instance\", \"([^:.]*).*\")",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "{{instance}} {{device}} Writes",
|
||||
@ -673,7 +673,7 @@
|
||||
"steppedLine": false,
|
||||
"targets": [
|
||||
{
|
||||
"expr": "label_replace(irate(node_disk_io_time_seconds_total[1m]), \"instance\", \"$1\", \"instance\", \"([^:.]*).*\") and on (instance, device) label_replace(label_replace(ceph_disk_occupation{ceph_daemon=~\"$osd\"}, \"device\", \"$1\", \"device\", \"/dev/(.*)\"), \"instance\", \"$1\", \"instance\", \"([^:.]*).*\")",
|
||||
"expr": "label_replace(irate(node_disk_io_time_seconds_total[1m]), \"instance\", \"$1\", \"instance\", \"([^:.]*).*\") and on (instance, device) label_replace(label_replace(ceph_disk_occupation_human{ceph_daemon=~\"$osd\"}, \"device\", \"$1\", \"device\", \"/dev/(.*)\"), \"instance\", \"$1\", \"instance\", \"([^:.]*).*\")",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "{{device}} on {{instance}}",
|
||||
|
@ -1 +0,0 @@
|
||||
master
|
64
ceph/qa/rbd/krbd_default_map_options.t
Normal file
64
ceph/qa/rbd/krbd_default_map_options.t
Normal file
@ -0,0 +1,64 @@
|
||||
Set up
|
||||
|
||||
$ ceph osd pool create rbda
|
||||
pool 'rbda' created
|
||||
$ rbd pool init rbda
|
||||
$ rbd create rbda/image1 --size 1000
|
||||
|
||||
Test at map options level
|
||||
|
||||
$ OPTIONS="alloc_size=65536,lock_on_read"
|
||||
$ EXPECTED="${OPTIONS}"
|
||||
$ DEV=$(sudo rbd map rbda/image1 --options ${OPTIONS})
|
||||
$ sudo grep -q ${EXPECTED} /sys/bus/rbd/devices/${DEV#/dev/rbd}/config_info
|
||||
$ sudo rbd unmap rbda/image1
|
||||
|
||||
Test at global level
|
||||
|
||||
$ OPTIONS="alloc_size=4096,crc"
|
||||
$ EXPECTED="${OPTIONS}"
|
||||
$ rbd config global set global rbd_default_map_options ${OPTIONS}
|
||||
$ DEV=$(sudo rbd map rbda/image1)
|
||||
$ sudo grep -q ${EXPECTED} /sys/bus/rbd/devices/${DEV#/dev/rbd}/config_info
|
||||
$ sudo rbd unmap rbda/image1
|
||||
|
||||
$ OPTIONS="alloc_size=65536,lock_on_read"
|
||||
$ EXPECTED="alloc_size=65536,crc,lock_on_read"
|
||||
$ DEV=$(sudo rbd map rbda/image1 --options ${OPTIONS})
|
||||
$ sudo grep -q ${EXPECTED} /sys/bus/rbd/devices/${DEV#/dev/rbd}/config_info
|
||||
$ sudo rbd unmap rbda/image1
|
||||
|
||||
Test at pool level
|
||||
|
||||
$ OPTIONS="alloc_size=8192,share"
|
||||
$ EXPECTED="${OPTIONS}"
|
||||
$ rbd config pool set rbda rbd_default_map_options ${OPTIONS}
|
||||
$ DEV=$(sudo rbd map rbda/image1)
|
||||
$ sudo grep -q ${EXPECTED} /sys/bus/rbd/devices/${DEV#/dev/rbd}/config_info
|
||||
$ sudo rbd unmap rbda/image1
|
||||
|
||||
$ OPTIONS="lock_on_read,alloc_size=65536"
|
||||
$ EXPECTED="alloc_size=65536,lock_on_read,share"
|
||||
$ DEV=$(sudo rbd map rbda/image1 --options ${OPTIONS})
|
||||
$ sudo grep -q ${EXPECTED} /sys/bus/rbd/devices/${DEV#/dev/rbd}/config_info
|
||||
$ sudo rbd unmap rbda/image1
|
||||
|
||||
Test at image level
|
||||
|
||||
$ OPTIONS="alloc_size=16384,tcp_nodelay"
|
||||
$ EXPECTED="${OPTIONS}"
|
||||
$ rbd config image set rbda/image1 rbd_default_map_options ${OPTIONS}
|
||||
$ DEV=$(sudo rbd map rbda/image1)
|
||||
$ sudo grep -q ${EXPECTED} /sys/bus/rbd/devices/${DEV#/dev/rbd}/config_info
|
||||
$ sudo rbd unmap rbda/image1
|
||||
|
||||
$ OPTIONS="lock_on_read,alloc_size=65536"
|
||||
$ EXPECTED="alloc_size=65536,lock_on_read,tcp_nodelay"
|
||||
$ DEV=$(sudo rbd map rbda/image1 --options ${OPTIONS})
|
||||
$ sudo grep -q ${EXPECTED} /sys/bus/rbd/devices/${DEV#/dev/rbd}/config_info
|
||||
$ sudo rbd unmap rbda/image1
|
||||
|
||||
Teardown
|
||||
|
||||
$ ceph osd pool rm rbda rbda --yes-i-really-really-mean-it
|
||||
pool 'rbda' removed
|
5
ceph/qa/suites/krbd/basic/ms_mode/crc$/crc-rxbounce.yaml
Normal file
5
ceph/qa/suites/krbd/basic/ms_mode/crc$/crc-rxbounce.yaml
Normal file
@ -0,0 +1,5 @@
|
||||
overrides:
|
||||
ceph:
|
||||
conf:
|
||||
client:
|
||||
rbd default map options: ms_mode=crc,rxbounce
|
@ -0,0 +1,5 @@
|
||||
overrides:
|
||||
ceph:
|
||||
conf:
|
||||
client:
|
||||
rbd default map options: ms_mode=legacy,rxbounce
|
5
ceph/qa/suites/krbd/fsx/ms_mode$/crc-rxbounce.yaml
Normal file
5
ceph/qa/suites/krbd/fsx/ms_mode$/crc-rxbounce.yaml
Normal file
@ -0,0 +1,5 @@
|
||||
overrides:
|
||||
ceph:
|
||||
conf:
|
||||
client:
|
||||
rbd default map options: ms_mode=crc,rxbounce,read_from_replica=balance
|
5
ceph/qa/suites/krbd/fsx/ms_mode$/legacy-rxbounce.yaml
Normal file
5
ceph/qa/suites/krbd/fsx/ms_mode$/legacy-rxbounce.yaml
Normal file
@ -0,0 +1,5 @@
|
||||
overrides:
|
||||
ceph:
|
||||
conf:
|
||||
client:
|
||||
rbd default map options: ms_mode=legacy,rxbounce,read_from_replica=balance
|
@ -1,5 +0,0 @@
|
||||
overrides:
|
||||
ceph:
|
||||
conf:
|
||||
client:
|
||||
rbd default map options: ms_mode=prefer-crc,read_from_replica=balance
|
0
ceph/qa/suites/krbd/ms_modeless/%
Normal file
0
ceph/qa/suites/krbd/ms_modeless/%
Normal file
1
ceph/qa/suites/krbd/ms_modeless/.qa
Symbolic link
1
ceph/qa/suites/krbd/ms_modeless/.qa
Symbolic link
@ -0,0 +1 @@
|
||||
../.qa/
|
1
ceph/qa/suites/krbd/ms_modeless/bluestore-bitmap.yaml
Symbolic link
1
ceph/qa/suites/krbd/ms_modeless/bluestore-bitmap.yaml
Symbolic link
@ -0,0 +1 @@
|
||||
.qa/objectstore/bluestore-bitmap.yaml
|
1
ceph/qa/suites/krbd/ms_modeless/ceph/.qa
Symbolic link
1
ceph/qa/suites/krbd/ms_modeless/ceph/.qa
Symbolic link
@ -0,0 +1 @@
|
||||
../.qa/
|
3
ceph/qa/suites/krbd/ms_modeless/ceph/ceph.yaml
Normal file
3
ceph/qa/suites/krbd/ms_modeless/ceph/ceph.yaml
Normal file
@ -0,0 +1,3 @@
|
||||
tasks:
|
||||
- install:
|
||||
- ceph:
|
1
ceph/qa/suites/krbd/ms_modeless/clusters/.qa
Symbolic link
1
ceph/qa/suites/krbd/ms_modeless/clusters/.qa
Symbolic link
@ -0,0 +1 @@
|
||||
../.qa/
|
1
ceph/qa/suites/krbd/ms_modeless/clusters/fixed-3.yaml
Symbolic link
1
ceph/qa/suites/krbd/ms_modeless/clusters/fixed-3.yaml
Symbolic link
@ -0,0 +1 @@
|
||||
.qa/clusters/fixed-3.yaml
|
5
ceph/qa/suites/krbd/ms_modeless/conf.yaml
Normal file
5
ceph/qa/suites/krbd/ms_modeless/conf.yaml
Normal file
@ -0,0 +1,5 @@
|
||||
overrides:
|
||||
ceph:
|
||||
conf:
|
||||
global:
|
||||
ms die on skipped message: false
|
1
ceph/qa/suites/krbd/ms_modeless/tasks/.qa
Symbolic link
1
ceph/qa/suites/krbd/ms_modeless/tasks/.qa
Symbolic link
@ -0,0 +1 @@
|
||||
../.qa/
|
@ -0,0 +1,5 @@
|
||||
tasks:
|
||||
- cram:
|
||||
clients:
|
||||
client.0:
|
||||
- qa/rbd/krbd_default_map_options.t
|
5
ceph/qa/suites/krbd/ms_modeless/tasks/krbd_rxbounce.yaml
Normal file
5
ceph/qa/suites/krbd/ms_modeless/tasks/krbd_rxbounce.yaml
Normal file
@ -0,0 +1,5 @@
|
||||
tasks:
|
||||
- workunit:
|
||||
clients:
|
||||
all:
|
||||
- rbd/krbd_rxbounce.sh
|
1
ceph/qa/suites/krbd/rbd-nomount/ms_mode/crc$/.qa
Symbolic link
1
ceph/qa/suites/krbd/rbd-nomount/ms_mode/crc$/.qa
Symbolic link
@ -0,0 +1 @@
|
||||
../.qa/
|
@ -0,0 +1,5 @@
|
||||
overrides:
|
||||
ceph:
|
||||
conf:
|
||||
client:
|
||||
rbd default map options: ms_mode=crc,rxbounce
|
1
ceph/qa/suites/krbd/rbd-nomount/ms_mode/legacy$/.qa
Symbolic link
1
ceph/qa/suites/krbd/rbd-nomount/ms_mode/legacy$/.qa
Symbolic link
@ -0,0 +1 @@
|
||||
../.qa/
|
@ -0,0 +1,5 @@
|
||||
overrides:
|
||||
ceph:
|
||||
conf:
|
||||
client:
|
||||
rbd default map options: ms_mode=legacy,rxbounce
|
1
ceph/qa/suites/krbd/rbd/ms_mode/crc$/.qa
Symbolic link
1
ceph/qa/suites/krbd/rbd/ms_mode/crc$/.qa
Symbolic link
@ -0,0 +1 @@
|
||||
../.qa/
|
5
ceph/qa/suites/krbd/rbd/ms_mode/crc$/crc-rxbounce.yaml
Normal file
5
ceph/qa/suites/krbd/rbd/ms_mode/crc$/crc-rxbounce.yaml
Normal file
@ -0,0 +1,5 @@
|
||||
overrides:
|
||||
ceph:
|
||||
conf:
|
||||
client:
|
||||
rbd default map options: ms_mode=crc,rxbounce
|
1
ceph/qa/suites/krbd/rbd/ms_mode/legacy$/.qa
Symbolic link
1
ceph/qa/suites/krbd/rbd/ms_mode/legacy$/.qa
Symbolic link
@ -0,0 +1 @@
|
||||
../.qa/
|
@ -0,0 +1,5 @@
|
||||
overrides:
|
||||
ceph:
|
||||
conf:
|
||||
client:
|
||||
rbd default map options: ms_mode=legacy,rxbounce
|
5
ceph/qa/suites/krbd/singleton/ms_mode$/crc-rxbounce.yaml
Normal file
5
ceph/qa/suites/krbd/singleton/ms_mode$/crc-rxbounce.yaml
Normal file
@ -0,0 +1,5 @@
|
||||
overrides:
|
||||
ceph:
|
||||
conf:
|
||||
client:
|
||||
rbd default map options: ms_mode=crc,rxbounce
|
@ -0,0 +1,5 @@
|
||||
overrides:
|
||||
ceph:
|
||||
conf:
|
||||
client:
|
||||
rbd default map options: ms_mode=legacy,rxbounce
|
@ -1,5 +0,0 @@
|
||||
overrides:
|
||||
ceph:
|
||||
conf:
|
||||
client:
|
||||
rbd default map options: ms_mode=prefer-crc
|
5
ceph/qa/suites/krbd/thrash/ms_mode$/crc-rxbounce.yaml
Normal file
5
ceph/qa/suites/krbd/thrash/ms_mode$/crc-rxbounce.yaml
Normal file
@ -0,0 +1,5 @@
|
||||
overrides:
|
||||
ceph:
|
||||
conf:
|
||||
client:
|
||||
rbd default map options: ms_mode=crc,rxbounce
|
5
ceph/qa/suites/krbd/thrash/ms_mode$/legacy-rxbounce.yaml
Normal file
5
ceph/qa/suites/krbd/thrash/ms_mode$/legacy-rxbounce.yaml
Normal file
@ -0,0 +1,5 @@
|
||||
overrides:
|
||||
ceph:
|
||||
conf:
|
||||
client:
|
||||
rbd default map options: ms_mode=legacy,rxbounce
|
@ -1,5 +0,0 @@
|
||||
overrides:
|
||||
ceph:
|
||||
conf:
|
||||
client:
|
||||
rbd default map options: ms_mode=prefer-crc
|
@ -11,6 +11,7 @@ overrides:
|
||||
osd map cache size: 1
|
||||
osd scrub min interval: 60
|
||||
osd scrub max interval: 120
|
||||
osd max backfills: 6
|
||||
tasks:
|
||||
- thrashosds:
|
||||
timeout: 1800
|
||||
|
@ -7,6 +7,7 @@ overrides:
|
||||
osd:
|
||||
osd scrub min interval: 60
|
||||
osd scrub max interval: 120
|
||||
osd max backfills: 6
|
||||
tasks:
|
||||
- thrashosds:
|
||||
timeout: 1200
|
||||
|
@ -3,6 +3,11 @@ roles:
|
||||
- [mon.a, mgr.x, osd.0, osd.1, client.0]
|
||||
tasks:
|
||||
- install:
|
||||
extra_system_packages:
|
||||
rpm:
|
||||
- qemu-kvm-block-rbd
|
||||
deb:
|
||||
- qemu-block-extra
|
||||
- ceph:
|
||||
fs: xfs
|
||||
conf:
|
||||
|
@ -3,6 +3,11 @@ roles:
|
||||
- [mon.a, mgr.x, osd.0, osd.1, client.0]
|
||||
tasks:
|
||||
- install:
|
||||
extra_system_packages:
|
||||
rpm:
|
||||
- qemu-kvm-block-rbd
|
||||
deb:
|
||||
- qemu-block-extra
|
||||
- ceph:
|
||||
fs: xfs
|
||||
conf:
|
||||
|
@ -3,6 +3,11 @@ roles:
|
||||
- [mon.a, mgr.x, osd.0, osd.1, client.0]
|
||||
tasks:
|
||||
- install:
|
||||
extra_system_packages:
|
||||
rpm:
|
||||
- qemu-kvm-block-rbd
|
||||
deb:
|
||||
- qemu-block-extra
|
||||
- ceph:
|
||||
fs: xfs
|
||||
conf:
|
||||
|
@ -3,6 +3,11 @@ roles:
|
||||
- [mon.a, mgr.x, osd.0, osd.1, client.0]
|
||||
tasks:
|
||||
- install:
|
||||
extra_system_packages:
|
||||
rpm:
|
||||
- qemu-kvm-block-rbd
|
||||
deb:
|
||||
- qemu-block-extra
|
||||
- ceph:
|
||||
fs: xfs
|
||||
conf:
|
||||
|
@ -48,6 +48,7 @@ tasks:
|
||||
- .*test_container_staticweb.StaticWebTest.test_web_listing_css
|
||||
- .*test_container_synchronization.*
|
||||
- .*test_object_services.PublicObjectTest.test_access_public_container_object_without_using_creds
|
||||
- .*test_object_services.ObjectTest.test_create_object_with_transfer_encoding
|
||||
|
||||
overrides:
|
||||
ceph:
|
||||
|
@ -1 +1 @@
|
||||
../.qa/
|
||||
../../.qa
|
@ -1,8 +1,8 @@
|
||||
meta:
|
||||
- desc: |
|
||||
Insatll and run ceph on one node,
|
||||
Install and run ceph on one node,
|
||||
with a separate client 1.
|
||||
Upgrade client 1 to octopus
|
||||
Upgrade client 1 to pacific
|
||||
Run tests against old cluster
|
||||
roles:
|
||||
- - mon.a
|
||||
@ -16,6 +16,4 @@ roles:
|
||||
- - client.1
|
||||
overrides:
|
||||
ceph:
|
||||
#log-ignorelist:
|
||||
#- failed to encode map
|
||||
fs: xfs
|
||||
|
@ -6,6 +6,6 @@ tasks:
|
||||
- install.upgrade:
|
||||
exclude_packages: ['ceph-test', 'ceph-test-dbg','libcephfs1', 'python-ceph']
|
||||
client.1:
|
||||
- print: "**** done install.upgrade to -x on client.0"
|
||||
- print: "**** done install.upgrade to -x on client.1"
|
||||
- ceph:
|
||||
- print: "**** done ceph task"
|
@ -1,6 +0,0 @@
|
||||
overrides:
|
||||
ceph:
|
||||
conf:
|
||||
client:
|
||||
rbd default features: 61
|
||||
|
@ -1,6 +0,0 @@
|
||||
overrides:
|
||||
ceph:
|
||||
conf:
|
||||
client:
|
||||
rbd default features: 1
|
||||
|
@ -0,0 +1 @@
|
||||
../.qa/
|
1
ceph/qa/suites/upgrade-clients/client-upgrade-octopus-quincy/.qa
Symbolic link
1
ceph/qa/suites/upgrade-clients/client-upgrade-octopus-quincy/.qa
Symbolic link
@ -0,0 +1 @@
|
||||
../../.qa
|
@ -0,0 +1 @@
|
||||
../../.qa
|
@ -0,0 +1 @@
|
||||
../.qa/
|
@ -0,0 +1,4 @@
|
||||
openstack:
|
||||
- volumes: # attached to each instance
|
||||
count: 4
|
||||
size: 30 # GB
|
@ -0,0 +1,19 @@
|
||||
meta:
|
||||
- desc: |
|
||||
Install and run ceph on one node,
|
||||
with a separate client 1.
|
||||
Upgrade client 1 to quincy
|
||||
Run tests against old cluster
|
||||
roles:
|
||||
- - mon.a
|
||||
- mon.b
|
||||
- mon.c
|
||||
- osd.0
|
||||
- osd.1
|
||||
- osd.2
|
||||
- client.0
|
||||
- mgr.x
|
||||
- - client.1
|
||||
overrides:
|
||||
ceph:
|
||||
fs: xfs
|
@ -0,0 +1 @@
|
||||
../.qa/
|
@ -0,0 +1,11 @@
|
||||
tasks:
|
||||
- install:
|
||||
branch: octopus
|
||||
exclude_packages: ['ceph-mgr','libcephfs2','libcephfs-devel','libcephfs-dev','python34-cephfs','python34-rados']
|
||||
- print: "**** done install octopus"
|
||||
- install.upgrade:
|
||||
exclude_packages: ['ceph-test', 'ceph-test-dbg','libcephfs1', 'python-ceph']
|
||||
client.1:
|
||||
- print: "**** done install.upgrade to -x on client.1"
|
||||
- ceph:
|
||||
- print: "**** done ceph task"
|
@ -0,0 +1 @@
|
||||
../.qa/
|
@ -0,0 +1,34 @@
|
||||
tasks:
|
||||
- parallel:
|
||||
- workunit:
|
||||
branch: octopus
|
||||
clients:
|
||||
client.0:
|
||||
- rbd/notify_master.sh
|
||||
env:
|
||||
RBD_FEATURES: "61"
|
||||
- workunit:
|
||||
branch: quincy
|
||||
clients:
|
||||
client.1:
|
||||
- rbd/notify_slave.sh
|
||||
env:
|
||||
RBD_FEATURES: "61"
|
||||
RBD_DISABLE_UPDATE_FEATURES: "1"
|
||||
- print: "**** done rbd: old librbd -> new librbd"
|
||||
- parallel:
|
||||
- workunit:
|
||||
branch: octopus
|
||||
clients:
|
||||
client.0:
|
||||
- rbd/notify_slave.sh
|
||||
env:
|
||||
RBD_FEATURES: "61"
|
||||
- workunit:
|
||||
branch: quincy
|
||||
clients:
|
||||
client.1:
|
||||
- rbd/notify_master.sh
|
||||
env:
|
||||
RBD_FEATURES: "61"
|
||||
- print: "**** done rbd: new librbd -> old librbd"
|
@ -0,0 +1 @@
|
||||
../.qa/
|
@ -0,0 +1 @@
|
||||
../../../../../../distros/all/ubuntu_20.04.yaml
|
@ -43,11 +43,13 @@ upgrade-sequence:
|
||||
duration: 60
|
||||
- ceph.restart:
|
||||
daemons: [osd.8, osd.9, osd.10, osd.11]
|
||||
wait-for-healthy: true
|
||||
wait-for-healthy: false
|
||||
wait-for-osds-up: true
|
||||
- sleep:
|
||||
duration: 60
|
||||
- ceph.restart:
|
||||
daemons: [rgw.*]
|
||||
wait-for-healthy: true
|
||||
wait-for-healthy: false
|
||||
wait-for-osds-up: true
|
||||
- sleep:
|
||||
duration: 60
|
||||
|
@ -17,6 +17,7 @@ import time
|
||||
import gevent
|
||||
import re
|
||||
import socket
|
||||
import yaml
|
||||
|
||||
from paramiko import SSHException
|
||||
from tasks.ceph_manager import CephManager, write_conf
|
||||
@ -72,11 +73,26 @@ def generate_caps(type_):
|
||||
yield capability
|
||||
|
||||
|
||||
def update_archive_setting(ctx, key, value):
|
||||
with open(os.path.join(ctx.archive, 'info.yaml'), 'r+') as info_file:
|
||||
info_yaml = yaml.safe_load(info_file)
|
||||
info_file.seek(0)
|
||||
if 'archive' in info_yaml:
|
||||
info_yaml['archive'][key] = value
|
||||
else:
|
||||
info_yaml['archive'] = {key: value}
|
||||
yaml.safe_dump(info_yaml, info_file, default_flow_style=False)
|
||||
|
||||
|
||||
@contextlib.contextmanager
|
||||
def ceph_crash(ctx, config):
|
||||
"""
|
||||
Gather crash dumps from /var/lib/crash
|
||||
Gather crash dumps from /var/lib/ceph/crash
|
||||
"""
|
||||
|
||||
# Add crash directory to job's archive
|
||||
update_archive_setting(ctx, 'crash', '/var/lib/ceph/crash')
|
||||
|
||||
try:
|
||||
yield
|
||||
|
||||
@ -146,6 +162,9 @@ def ceph_log(ctx, config):
|
||||
)
|
||||
)
|
||||
|
||||
# Add logs directory to job's info log file
|
||||
update_archive_setting(ctx, 'log', '/var/log/ceph')
|
||||
|
||||
class Rotater(object):
|
||||
stop_event = gevent.event.Event()
|
||||
|
||||
@ -1523,7 +1542,7 @@ def restart(ctx, config):
|
||||
ctx.managers[cluster].mark_down_osd(id_)
|
||||
ctx.daemons.get_daemon(type_, id_, cluster).restart()
|
||||
clusters.add(cluster)
|
||||
|
||||
|
||||
if config.get('wait-for-healthy', True):
|
||||
for cluster in clusters:
|
||||
healthy(ctx=ctx, config=dict(cluster=cluster))
|
||||
|
@ -548,7 +548,7 @@ class TestDataScan(CephFSTestCase):
|
||||
|
||||
pg_count = self.fs.pgs_per_fs_pool
|
||||
for pg_n in range(0, pg_count):
|
||||
pg_str = "{0}.{1}".format(self.fs.get_data_pool_id(), pg_n)
|
||||
pg_str = "{0}.{1:x}".format(self.fs.get_data_pool_id(), pg_n)
|
||||
out = self.fs.data_scan(["pg_files", "mydir", pg_str])
|
||||
lines = [l for l in out.split("\n") if l]
|
||||
log.info("{0}: {1}".format(pg_str, lines))
|
||||
|
@ -344,6 +344,35 @@ class TestStrays(CephFSTestCase):
|
||||
|
||||
self.await_data_pool_empty()
|
||||
|
||||
def test_reintegration_limit(self):
|
||||
"""
|
||||
That the reintegration is not blocked by full directories.
|
||||
"""
|
||||
|
||||
LOW_LIMIT = 50
|
||||
self.config_set('mds', 'mds_bal_fragment_size_max', str(LOW_LIMIT))
|
||||
time.sleep(10) # for config to reach MDS; async create is fast!!
|
||||
|
||||
last_reintegrated = self.get_mdc_stat("strays_reintegrated")
|
||||
self.mount_a.run_shell_payload("""
|
||||
mkdir a b
|
||||
for i in `seq 1 50`; do
|
||||
touch a/"$i"
|
||||
ln a/"$i" b/"$i"
|
||||
done
|
||||
sync -f a b
|
||||
rm a/*
|
||||
""")
|
||||
|
||||
self.wait_until_equal(
|
||||
lambda: self.get_mdc_stat("num_strays"),
|
||||
expect_val=0,
|
||||
timeout=60
|
||||
)
|
||||
curr_reintegrated = self.get_mdc_stat("strays_reintegrated")
|
||||
self.assertGreater(curr_reintegrated, last_reintegrated)
|
||||
|
||||
|
||||
def test_hardlink_reintegration(self):
|
||||
"""
|
||||
That removal of primary dentry of hardlinked inode results
|
||||
|
@ -2120,17 +2120,20 @@ class TestVolumes(CephFSTestCase):
|
||||
expected_mode2 = "777"
|
||||
|
||||
# create group
|
||||
self._fs_cmd("subvolumegroup", "create", self.volname, group2, f"--mode={expected_mode2}")
|
||||
self._fs_cmd("subvolumegroup", "create", self.volname, group1)
|
||||
self._fs_cmd("subvolumegroup", "create", self.volname, group2, "--mode", "777")
|
||||
|
||||
group1_path = self._get_subvolume_group_path(self.volname, group1)
|
||||
group2_path = self._get_subvolume_group_path(self.volname, group2)
|
||||
volumes_path = os.path.dirname(group1_path)
|
||||
|
||||
# check group's mode
|
||||
actual_mode1 = self.mount_a.run_shell(['stat', '-c' '%a', group1_path]).stdout.getvalue().strip()
|
||||
actual_mode2 = self.mount_a.run_shell(['stat', '-c' '%a', group2_path]).stdout.getvalue().strip()
|
||||
actual_mode3 = self.mount_a.run_shell(['stat', '-c' '%a', volumes_path]).stdout.getvalue().strip()
|
||||
self.assertEqual(actual_mode1, expected_mode1)
|
||||
self.assertEqual(actual_mode2, expected_mode2)
|
||||
self.assertEqual(actual_mode3, expected_mode1)
|
||||
|
||||
self._fs_cmd("subvolumegroup", "rm", self.volname, group1)
|
||||
self._fs_cmd("subvolumegroup", "rm", self.volname, group2)
|
||||
@ -2160,6 +2163,36 @@ class TestVolumes(CephFSTestCase):
|
||||
# remove group
|
||||
self._fs_cmd("subvolumegroup", "rm", self.volname, subvolgroupname)
|
||||
|
||||
def test_subvolume_create_with_desired_mode(self):
|
||||
subvol1 = self._generate_random_subvolume_name()
|
||||
|
||||
# default mode
|
||||
default_mode = "755"
|
||||
# desired mode
|
||||
desired_mode = "777"
|
||||
|
||||
self._fs_cmd("subvolume", "create", self.volname, subvol1, "--mode", "777")
|
||||
|
||||
subvol1_path = self._get_subvolume_path(self.volname, subvol1)
|
||||
|
||||
# check subvolumegroup's mode
|
||||
subvol_par_path = os.path.dirname(subvol1_path)
|
||||
group_path = os.path.dirname(subvol_par_path)
|
||||
actual_mode1 = self.mount_a.run_shell(['stat', '-c' '%a', group_path]).stdout.getvalue().strip()
|
||||
self.assertEqual(actual_mode1, default_mode)
|
||||
# check /volumes mode
|
||||
volumes_path = os.path.dirname(group_path)
|
||||
actual_mode2 = self.mount_a.run_shell(['stat', '-c' '%a', volumes_path]).stdout.getvalue().strip()
|
||||
self.assertEqual(actual_mode2, default_mode)
|
||||
# check subvolume's mode
|
||||
actual_mode3 = self.mount_a.run_shell(['stat', '-c' '%a', subvol1_path]).stdout.getvalue().strip()
|
||||
self.assertEqual(actual_mode3, desired_mode)
|
||||
|
||||
self._fs_cmd("subvolume", "rm", self.volname, subvol1)
|
||||
|
||||
# verify trash dir is clean
|
||||
self._wait_for_trash_empty()
|
||||
|
||||
def test_subvolume_create_with_desired_mode_in_group(self):
|
||||
subvol1, subvol2, subvol3 = self._generate_random_subvolume_name(3)
|
||||
|
||||
@ -3694,6 +3727,40 @@ class TestVolumes(CephFSTestCase):
|
||||
# verify trash dir is clean
|
||||
self._wait_for_trash_empty()
|
||||
|
||||
def test_subvolume_snapshot_clone_quota_exceeded(self):
|
||||
subvolume = self._generate_random_subvolume_name()
|
||||
snapshot = self._generate_random_snapshot_name()
|
||||
clone = self._generate_random_clone_name()
|
||||
|
||||
# create subvolume with 20MB quota
|
||||
osize = self.DEFAULT_FILE_SIZE*1024*1024*20
|
||||
self._fs_cmd("subvolume", "create", self.volname, subvolume,"--mode=777", "--size", str(osize))
|
||||
|
||||
# do IO, write 50 files of 1MB each to exceed quota. This mostly succeeds as quota enforcement takes time.
|
||||
self._do_subvolume_io(subvolume, number_of_files=50)
|
||||
|
||||
# snapshot subvolume
|
||||
self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot)
|
||||
|
||||
# schedule a clone
|
||||
self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone)
|
||||
|
||||
# check clone status
|
||||
self._wait_for_clone_to_complete(clone)
|
||||
|
||||
# verify clone
|
||||
self._verify_clone(subvolume, snapshot, clone)
|
||||
|
||||
# remove snapshot
|
||||
self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot)
|
||||
|
||||
# remove subvolumes
|
||||
self._fs_cmd("subvolume", "rm", self.volname, subvolume)
|
||||
self._fs_cmd("subvolume", "rm", self.volname, clone)
|
||||
|
||||
# verify trash dir is clean
|
||||
self._wait_for_trash_empty()
|
||||
|
||||
def test_subvolume_snapshot_clone(self):
|
||||
subvolume = self._generate_random_subvolume_name()
|
||||
snapshot = self._generate_random_snapshot_name()
|
||||
@ -3788,6 +3855,25 @@ class TestVolumes(CephFSTestCase):
|
||||
max_concurrent_clones = int(self.config_get('mgr', 'mgr/volumes/max_concurrent_clones'))
|
||||
self.assertEqual(max_concurrent_clones, 2)
|
||||
|
||||
def test_subvolume_snapshot_config_snapshot_clone_delay(self):
|
||||
"""
|
||||
Validate 'snapshot_clone_delay' config option
|
||||
"""
|
||||
|
||||
# get the default delay before starting the clone
|
||||
default_timeout = int(self.config_get('mgr', 'mgr/volumes/snapshot_clone_delay'))
|
||||
self.assertEqual(default_timeout, 0)
|
||||
|
||||
# Insert delay of 2 seconds at the beginning of the snapshot clone
|
||||
self.config_set('mgr', 'mgr/volumes/snapshot_clone_delay', 2)
|
||||
default_timeout = int(self.config_get('mgr', 'mgr/volumes/snapshot_clone_delay'))
|
||||
self.assertEqual(default_timeout, 2)
|
||||
|
||||
# Decrease number of cloner threads
|
||||
self.config_set('mgr', 'mgr/volumes/max_concurrent_clones', 2)
|
||||
max_concurrent_clones = int(self.config_get('mgr', 'mgr/volumes/max_concurrent_clones'))
|
||||
self.assertEqual(max_concurrent_clones, 2)
|
||||
|
||||
def test_subvolume_snapshot_clone_pool_layout(self):
|
||||
subvolume = self._generate_random_subvolume_name()
|
||||
snapshot = self._generate_random_snapshot_name()
|
||||
@ -4120,6 +4206,9 @@ class TestVolumes(CephFSTestCase):
|
||||
# ensure metadata file is in legacy location, with required version v1
|
||||
self._assert_meta_location_and_version(self.volname, subvolume, version=1, legacy=True)
|
||||
|
||||
# Insert delay at the beginning of snapshot clone
|
||||
self.config_set('mgr', 'mgr/volumes/snapshot_clone_delay', 2)
|
||||
|
||||
# schedule a clone
|
||||
self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone)
|
||||
|
||||
@ -4164,6 +4253,9 @@ class TestVolumes(CephFSTestCase):
|
||||
# snapshot subvolume
|
||||
self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot)
|
||||
|
||||
# Insert delay at the beginning of snapshot clone
|
||||
self.config_set('mgr', 'mgr/volumes/snapshot_clone_delay', 2)
|
||||
|
||||
# schedule a clone
|
||||
self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone)
|
||||
|
||||
@ -4210,6 +4302,9 @@ class TestVolumes(CephFSTestCase):
|
||||
# snapshot subvolume
|
||||
self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot)
|
||||
|
||||
# Insert delay at the beginning of snapshot clone
|
||||
self.config_set('mgr', 'mgr/volumes/snapshot_clone_delay', 2)
|
||||
|
||||
# schedule a clone
|
||||
self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone)
|
||||
|
||||
@ -4255,6 +4350,9 @@ class TestVolumes(CephFSTestCase):
|
||||
# snapshot subvolume
|
||||
self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot)
|
||||
|
||||
# Insert delay at the beginning of snapshot clone
|
||||
self.config_set('mgr', 'mgr/volumes/snapshot_clone_delay', 2)
|
||||
|
||||
# schedule a clone
|
||||
self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone)
|
||||
|
||||
@ -4468,6 +4566,9 @@ class TestVolumes(CephFSTestCase):
|
||||
# snapshot subvolume
|
||||
self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot)
|
||||
|
||||
# Insert delay at the beginning of snapshot clone
|
||||
self.config_set('mgr', 'mgr/volumes/snapshot_clone_delay', 2)
|
||||
|
||||
# schedule a clone
|
||||
self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone)
|
||||
|
||||
@ -4553,3 +4654,142 @@ class TestVolumes(CephFSTestCase):
|
||||
|
||||
# verify trash dir is clean
|
||||
self._wait_for_trash_empty()
|
||||
|
||||
def test_malicious_metafile_on_legacy_to_v1_upgrade(self):
|
||||
"""
|
||||
Validate handcrafted .meta file on legacy subvol root doesn't break the system
|
||||
on legacy subvol upgrade to v1
|
||||
poor man's upgrade test -- theme continues...
|
||||
"""
|
||||
subvol1, subvol2 = self._generate_random_subvolume_name(2)
|
||||
|
||||
# emulate a old-fashioned subvolume in the default group
|
||||
createpath1 = os.path.join(".", "volumes", "_nogroup", subvol1)
|
||||
self.mount_a.run_shell(['mkdir', '-p', createpath1])
|
||||
|
||||
# add required xattrs to subvolume
|
||||
default_pool = self.mount_a.getfattr(".", "ceph.dir.layout.pool")
|
||||
self.mount_a.setfattr(createpath1, 'ceph.dir.layout.pool', default_pool)
|
||||
|
||||
# create v2 subvolume
|
||||
self._fs_cmd("subvolume", "create", self.volname, subvol2)
|
||||
|
||||
# Create malicious .meta file in legacy subvolume root. Copy v2 subvolume
|
||||
# .meta into legacy subvol1's root
|
||||
subvol2_metapath = os.path.join(".", "volumes", "_nogroup", subvol2, ".meta")
|
||||
self.mount_a.run_shell(["cp", subvol2_metapath, createpath1])
|
||||
|
||||
# Upgrade legacy subvol1 to v1
|
||||
subvolpath1 = self._fs_cmd("subvolume", "getpath", self.volname, subvol1)
|
||||
self.assertNotEqual(subvolpath1, None)
|
||||
subvolpath1 = subvolpath1.rstrip()
|
||||
|
||||
# the subvolume path returned should not be of subvol2 from handcrafted
|
||||
# .meta file
|
||||
self.assertEqual(createpath1[1:], subvolpath1)
|
||||
|
||||
# ensure metadata file is in legacy location, with required version v1
|
||||
self._assert_meta_location_and_version(self.volname, subvol1, version=1, legacy=True)
|
||||
|
||||
# Authorize alice authID read-write access to subvol1. Verify it authorizes subvol1 path and not subvol2
|
||||
# path whose '.meta' file is copied to subvol1 root
|
||||
authid1 = "alice"
|
||||
self._fs_cmd("subvolume", "authorize", self.volname, subvol1, authid1)
|
||||
|
||||
# Validate that the mds path added is of subvol1 and not of subvol2
|
||||
out = json.loads(self.fs.mon_manager.raw_cluster_cmd("auth", "get", "client.alice", "--format=json-pretty"))
|
||||
self.assertEqual("client.alice", out[0]["entity"])
|
||||
self.assertEqual("allow rw path={0}".format(createpath1[1:]), out[0]["caps"]["mds"])
|
||||
|
||||
# remove subvolume
|
||||
self._fs_cmd("subvolume", "rm", self.volname, subvol1)
|
||||
self._fs_cmd("subvolume", "rm", self.volname, subvol2)
|
||||
|
||||
# verify trash dir is clean
|
||||
self._wait_for_trash_empty()
|
||||
|
||||
def test_binary_metafile_on_legacy_to_v1_upgrade(self):
|
||||
"""
|
||||
Validate binary .meta file on legacy subvol root doesn't break the system
|
||||
on legacy subvol upgrade to v1
|
||||
poor man's upgrade test -- theme continues...
|
||||
"""
|
||||
subvol = self._generate_random_subvolume_name()
|
||||
group = self._generate_random_group_name()
|
||||
|
||||
# emulate a old-fashioned subvolume -- in a custom group
|
||||
createpath = os.path.join(".", "volumes", group, subvol)
|
||||
self.mount_a.run_shell(['mkdir', '-p', createpath])
|
||||
|
||||
# add required xattrs to subvolume
|
||||
default_pool = self.mount_a.getfattr(".", "ceph.dir.layout.pool")
|
||||
self.mount_a.setfattr(createpath, 'ceph.dir.layout.pool', default_pool)
|
||||
|
||||
# Create unparseable binary .meta file on legacy subvol's root
|
||||
meta_contents = os.urandom(4096)
|
||||
meta_filepath = os.path.join(self.mount_a.mountpoint, createpath, ".meta")
|
||||
sudo_write_file(self.mount_a.client_remote, meta_filepath, meta_contents)
|
||||
|
||||
# Upgrade legacy subvol to v1
|
||||
subvolpath = self._fs_cmd("subvolume", "getpath", self.volname, subvol, group)
|
||||
self.assertNotEqual(subvolpath, None)
|
||||
subvolpath = subvolpath.rstrip()
|
||||
|
||||
# The legacy subvolume path should be returned for subvol.
|
||||
# Should ignore unparseable binary .meta file in subvol's root
|
||||
self.assertEqual(createpath[1:], subvolpath)
|
||||
|
||||
# ensure metadata file is in legacy location, with required version v1
|
||||
self._assert_meta_location_and_version(self.volname, subvol, subvol_group=group, version=1, legacy=True)
|
||||
|
||||
# remove subvolume
|
||||
self._fs_cmd("subvolume", "rm", self.volname, subvol, group)
|
||||
|
||||
# verify trash dir is clean
|
||||
self._wait_for_trash_empty()
|
||||
|
||||
# remove group
|
||||
self._fs_cmd("subvolumegroup", "rm", self.volname, group)
|
||||
|
||||
def test_unparseable_metafile_on_legacy_to_v1_upgrade(self):
|
||||
"""
|
||||
Validate unparseable text .meta file on legacy subvol root doesn't break the system
|
||||
on legacy subvol upgrade to v1
|
||||
poor man's upgrade test -- theme continues...
|
||||
"""
|
||||
subvol = self._generate_random_subvolume_name()
|
||||
group = self._generate_random_group_name()
|
||||
|
||||
# emulate a old-fashioned subvolume -- in a custom group
|
||||
createpath = os.path.join(".", "volumes", group, subvol)
|
||||
self.mount_a.run_shell(['mkdir', '-p', createpath])
|
||||
|
||||
# add required xattrs to subvolume
|
||||
default_pool = self.mount_a.getfattr(".", "ceph.dir.layout.pool")
|
||||
self.mount_a.setfattr(createpath, 'ceph.dir.layout.pool', default_pool)
|
||||
|
||||
# Create unparseable text .meta file on legacy subvol's root
|
||||
meta_contents = "unparseable config\nfile ...\nunparseable config\nfile ...\n"
|
||||
meta_filepath = os.path.join(self.mount_a.mountpoint, createpath, ".meta")
|
||||
sudo_write_file(self.mount_a.client_remote, meta_filepath, meta_contents)
|
||||
|
||||
# Upgrade legacy subvol to v1
|
||||
subvolpath = self._fs_cmd("subvolume", "getpath", self.volname, subvol, group)
|
||||
self.assertNotEqual(subvolpath, None)
|
||||
subvolpath = subvolpath.rstrip()
|
||||
|
||||
# The legacy subvolume path should be returned for subvol.
|
||||
# Should ignore unparseable binary .meta file in subvol's root
|
||||
self.assertEqual(createpath[1:], subvolpath)
|
||||
|
||||
# ensure metadata file is in legacy location, with required version v1
|
||||
self._assert_meta_location_and_version(self.volname, subvol, subvol_group=group, version=1, legacy=True)
|
||||
|
||||
# remove subvolume
|
||||
self._fs_cmd("subvolume", "rm", self.volname, subvol, group)
|
||||
|
||||
# verify trash dir is clean
|
||||
self._wait_for_trash_empty()
|
||||
|
||||
# remove group
|
||||
self._fs_cmd("subvolumegroup", "rm", self.volname, group)
|
||||
|
@ -50,11 +50,12 @@ class DaemonWatchdog(Greenlet):
|
||||
|
||||
def bark(self):
|
||||
self.log("BARK! unmounting mounts and killing all daemons")
|
||||
for mount in self.ctx.mounts.values():
|
||||
try:
|
||||
mount.umount_wait(force=True)
|
||||
except:
|
||||
self.logger.exception("ignoring exception:")
|
||||
if hasattr(self.ctx, 'mounts'):
|
||||
for mount in self.ctx.mounts.values():
|
||||
try:
|
||||
mount.umount_wait(force=True)
|
||||
except:
|
||||
self.logger.exception("ignoring exception:")
|
||||
daemons = []
|
||||
daemons.extend(filter(lambda daemon: daemon.running() and not daemon.proc.finished, self.ctx.daemons.iter_daemons_of_role('osd', cluster=self.cluster)))
|
||||
daemons.extend(filter(lambda daemon: daemon.running() and not daemon.proc.finished, self.ctx.daemons.iter_daemons_of_role('mds', cluster=self.cluster)))
|
||||
|
@ -14,6 +14,7 @@ from teuthology import contextutil
|
||||
from teuthology import misc as teuthology
|
||||
from teuthology.config import config as teuth_config
|
||||
from teuthology.orchestra import run
|
||||
from teuthology.packaging import install_package, remove_package
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
@ -94,6 +95,25 @@ def create_dirs(ctx, config):
|
||||
]
|
||||
)
|
||||
|
||||
@contextlib.contextmanager
|
||||
def install_block_rbd_driver(ctx, config):
|
||||
"""
|
||||
Make sure qemu rbd block driver (block-rbd.so) is installed
|
||||
"""
|
||||
for client, client_config in config.items():
|
||||
(remote,) = ctx.cluster.only(client).remotes.keys()
|
||||
if remote.os.package_type == 'rpm':
|
||||
block_rbd_pkg = 'qemu-kvm-block-rbd'
|
||||
else:
|
||||
block_rbd_pkg = 'qemu-block-extra'
|
||||
install_package(block_rbd_pkg, remote)
|
||||
try:
|
||||
yield
|
||||
finally:
|
||||
for client, client_config in config.items():
|
||||
(remote,) = ctx.cluster.only(client).remotes.keys()
|
||||
remove_package(block_rbd_pkg, remote)
|
||||
|
||||
@contextlib.contextmanager
|
||||
def generate_iso(ctx, config):
|
||||
"""Execute system commands to generate iso"""
|
||||
@ -572,6 +592,7 @@ def task(ctx, config):
|
||||
create_images(ctx=ctx, config=config, managers=managers)
|
||||
managers.extend([
|
||||
lambda: create_dirs(ctx=ctx, config=config),
|
||||
lambda: install_block_rbd_driver(ctx=ctx, config=config),
|
||||
lambda: generate_iso(ctx=ctx, config=config),
|
||||
lambda: download_image(ctx=ctx, config=config),
|
||||
])
|
||||
|
@ -51,7 +51,7 @@ def task(ctx, config):
|
||||
# set versions for cloning the repo
|
||||
apache_maven = 'apache-maven-{maven_version}-bin.tar.gz'.format(
|
||||
maven_version=maven_version)
|
||||
maven_link = 'http://www-us.apache.org/dist/maven/' + \
|
||||
maven_link = 'http://archive.apache.org/dist/maven/' + \
|
||||
'{maven_major}/{maven_version}/binaries/'.format(maven_major=maven_major, maven_version=maven_version) + apache_maven
|
||||
hadoop_git = 'https://github.com/apache/hadoop'
|
||||
hadoop_rel = 'hadoop-{ver} rel/release-{ver}'.format(ver=hadoop_ver)
|
||||
|
@ -2171,13 +2171,14 @@ function test_mon_pg()
|
||||
function test_mon_osd_pool_set()
|
||||
{
|
||||
TEST_POOL_GETSET=pool_getset
|
||||
ceph osd pool create $TEST_POOL_GETSET 1
|
||||
expect_false ceph osd pool create $TEST_POOL_GETSET 1 --target_size_ratio -0.3
|
||||
expect_true ceph osd pool create $TEST_POOL_GETSET 1 --target_size_ratio 1
|
||||
ceph osd pool application enable $TEST_POOL_GETSET rados
|
||||
ceph osd pool set $TEST_POOL_GETSET pg_autoscale_mode off
|
||||
wait_for_clean
|
||||
ceph osd pool get $TEST_POOL_GETSET all
|
||||
|
||||
for s in pg_num pgp_num size min_size crush_rule; do
|
||||
for s in pg_num pgp_num size min_size crush_rule target_size_ratio; do
|
||||
ceph osd pool get $TEST_POOL_GETSET $s
|
||||
done
|
||||
|
||||
@ -2250,6 +2251,12 @@ function test_mon_osd_pool_set()
|
||||
ceph osd pool set $TEST_POOL_GETSET scrub_priority 0
|
||||
ceph osd pool get $TEST_POOL_GETSET scrub_priority | expect_false grep '.'
|
||||
|
||||
expect_false ceph osd pool set $TEST_POOL_GETSET target_size_ratio -3
|
||||
expect_false ceph osd pool set $TEST_POOL_GETSET target_size_ratio abc
|
||||
expect_true ceph osd pool set $TEST_POOL_GETSET target_size_ratio 0.1
|
||||
expect_true ceph osd pool set $TEST_POOL_GETSET target_size_ratio 1
|
||||
ceph osd pool get $TEST_POOL_GETSET target_size_ratio | grep 'target_size_ratio: 1'
|
||||
|
||||
ceph osd pool set $TEST_POOL_GETSET nopgchange 1
|
||||
expect_false ceph osd pool set $TEST_POOL_GETSET pg_num 10
|
||||
expect_false ceph osd pool set $TEST_POOL_GETSET pgp_num 10
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user