diff --git a/ceph/.github/CODEOWNERS b/ceph/.github/CODEOWNERS index 23749a1d3..565275481 100644 --- a/ceph/.github/CODEOWNERS +++ b/ceph/.github/CODEOWNERS @@ -49,3 +49,88 @@ COPYING* @ceph/doc-writers /doc/ @ceph/doc-writers README* @ceph/doc-writers *.rst @ceph/doc-writers + +# core +/doc/man/8/ceph-authtool.rst @ceph/core +/doc/man/8/ceph-conf.rst @ceph/core +/doc/man/8/ceph-create-keys.rst @ceph/core +/doc/man/8/ceph-kvstore-tool.rst @ceph/core +/doc/man/8/ceph-mon.rst @ceph/core +/doc/man/8/ceph-objectstore-tool.rst @ceph/core +/doc/man/8/ceph-osd.rst @ceph/core +/doc/man/8/ceph.rst @ceph/core +/doc/man/8/crushtool.rst @ceph/core +/doc/man/8/monmaptool.rst @ceph/core +/doc/man/8/rados.rst @ceph/core +/doc/rados @ceph/core +/qa/standalone @ceph/core +/qa/suites/rados @ceph/core +/qa/workunits/erasure-code @ceph/core +/qa/workunits/mgr @ceph/core +/qa/workunits/mon @ceph/core +/qa/workunits/objectstore @ceph/core +/qa/workunits/rados @ceph/core +/src/ceph.in @ceph/core +/src/ceph_osd.cc @ceph/core +/src/ceph_mon.cc @ceph/core +/src/blk @ceph/core +/src/crush @ceph/core +/src/erasure-code @ceph/core +/src/kv @ceph/core +/src/librados @ceph/core +/src/mgr @ceph/core +/src/mon @ceph/core +/src/msg @ceph/core +/src/os @ceph/core +/src/osd @ceph/core +/src/tools/rados @ceph/core +/src/test/osd @ceph/core + +# rbd +/doc/dev/rbd* @ceph/rbd +/doc/man/8/ceph-rbdnamer.rst @ceph/rbd +/doc/man/8/rbd* @ceph/rbd +/doc/rbd @ceph/rbd +/doc/start/quick-rbd.rst @ceph/rbd +/qa/rbd @ceph/rbd +/qa/run_xfstests* @ceph/rbd +/qa/suites/krbd @ceph/rbd +/qa/suites/rbd @ceph/rbd +/qa/tasks/ceph_iscsi_client.py @ceph/rbd +/qa/tasks/metadata.yaml @ceph/rbd +/qa/tasks/qemu.py @ceph/rbd +/qa/tasks/rbd* @ceph/rbd +/qa/tasks/userdata* @ceph/rbd +/qa/workunits/cls/test_cls_journal.sh @ceph/rbd +/qa/workunits/cls/test_cls_lock.sh @ceph/rbd +/qa/workunits/cls/test_cls_rbd.sh @ceph/rbd +/qa/workunits/rbd @ceph/rbd +/src/ceph-rbdnamer @ceph/rbd +/src/cls/journal @ceph/rbd +/src/cls/lock @ceph/rbd +/src/cls/rbd @ceph/rbd +/src/common/options/rbd* @ceph/rbd +/src/etc-rbdmap @ceph/rbd +/src/include/krbd.h @ceph/rbd +/src/include/rbd* @ceph/rbd +/src/journal @ceph/rbd +/src/krbd.cc @ceph/rbd +/src/librbd @ceph/rbd +/src/ocf @ceph/rbd +/src/pybind/mgr/rbd_support @ceph/rbd +/src/pybind/rbd @ceph/rbd +/src/rbd* @ceph/rbd +/src/test/cli/rbd @ceph/rbd +/src/test/cli-integration/rbd @ceph/rbd +/src/test/cls_journal @ceph/rbd +/src/test/cls_lock @ceph/rbd +/src/test/cls_rbd @ceph/rbd +/src/test/journal @ceph/rbd +/src/test/librbd @ceph/rbd +/src/test/pybind/test_rbd.py @ceph/rbd +/src/test/rbd* @ceph/rbd +/src/test/run-rbd* @ceph/rbd +/src/test/test_rbd* @ceph/rbd +/src/tools/rbd* @ceph/rbd +/systemd/rbdmap.service.in @ceph/rbd +/udev/50-rbd.rules @ceph/rbd diff --git a/ceph/.github/workflows/pr-triage.yml b/ceph/.github/workflows/pr-triage.yml index 77fcff462..481f75331 100644 --- a/ceph/.github/workflows/pr-triage.yml +++ b/ceph/.github/workflows/pr-triage.yml @@ -12,13 +12,13 @@ jobs: with: sync-labels: '' repo-token: "${{ secrets.GITHUB_TOKEN }}" + - name: Assign to Dashboard project + uses: srggrs/assign-one-project-github-action@65a8ddab497df42ef268001e67bbf976f8fd39e1 + if: contains(github.event.pull_request.labels.*.name, 'dashboard') + with: + project: https://github.com/ceph/ceph/projects/6 - name: Assign milestone based on target brach name uses: iyu/actions-milestone@dbf7e5348844c9ddc6b803a5721b85fa70fe3bb9 with: configuration-path: .github/milestone.yml repo-token: "${{ secrets.GITHUB_TOKEN }}" - - name: Assign to Dashboard project - uses: srggrs/assign-one-project-github-action@65a8ddab497df42ef268001e67bbf976f8fd39e1 - if: contains(github.event.pull_request.labels.*.name, 'dashboard') - with: - project: https://github.com/ceph/ceph/projects/6 diff --git a/ceph/.readthedocs.yml b/ceph/.readthedocs.yml index 8f83166d6..f51969084 100644 --- a/ceph/.readthedocs.yml +++ b/ceph/.readthedocs.yml @@ -5,9 +5,13 @@ version: 2 formats: [] build: - image: latest + os: ubuntu-22.04 + tools: + python: "3.8" + apt_packages: + - ditaa + - graphviz python: - version: 3 install: - requirements: admin/doc-requirements.txt - requirements: admin/doc-read-the-docs.txt diff --git a/ceph/CMakeLists.txt b/ceph/CMakeLists.txt index 989ee24d2..78cae841b 100644 --- a/ceph/CMakeLists.txt +++ b/ceph/CMakeLists.txt @@ -2,7 +2,7 @@ cmake_minimum_required(VERSION 3.10.2) # remove cmake/modules/FindPython* once 3.12 is required project(ceph - VERSION 16.2.10 + VERSION 16.2.11 LANGUAGES CXX C ASM) foreach(policy @@ -36,7 +36,15 @@ if(WIN32) # the targeted Windows version. The availability of certain functions and # structures will depend on it. set(WIN32_WINNT "0x0A00" CACHE STRING "Targeted Windows version.") - add_definitions(-D_WIN32_WINNT=${WIN32_WINNT}) + # In order to avoid known winpthread issues, we're using the boost + # shared mutex implementation. + # https://github.com/msys2/MINGW-packages/issues/3319 + add_definitions( + -D_WIN32_WINNT=${WIN32_WINNT} + -DBOOST_THREAD_PROVIDES_GENERIC_SHARED_MUTEX_ON_WIN + -DBOOST_THREAD_V2_SHARED_MUTEX + ) + set(Boost_THREADAPI "win32") endif() if(MINGW) diff --git a/ceph/PendingReleaseNotes b/ceph/PendingReleaseNotes index 1ff5d73ba..ea3a21e5d 100644 --- a/ceph/PendingReleaseNotes +++ b/ceph/PendingReleaseNotes @@ -32,9 +32,33 @@ in certain recovery scenarios, e.g., monitor database lost and rebuilt, and the restored file system is expected to have the same ID as before. +>=16.2.11 +-------- + +* Cephfs: The 'AT_NO_ATTR_SYNC' macro is deprecated, please use the standard + 'AT_STATX_DONT_SYNC' macro. The 'AT_NO_ATTR_SYNC' macro will be removed in + the future. +* Trimming of PGLog dups is now controlled by the size instead of the version. + This fixes the PGLog inflation issue that was happening when the on-line + (in OSD) trimming got jammed after a PG split operation. Also, a new off-line + mechanism has been added: `ceph-objectstore-tool` got `trim-pg-log-dups` op + that targets situations where OSD is unable to boot due to those inflated dups. + If that is the case, in OSD logs the "You can be hit by THE DUPS BUG" warning + will be visible. + Relevant tracker: https://tracker.ceph.com/issues/53729 +* RBD: `rbd device unmap` command gained `--namespace` option. Support for + namespaces was added to RBD in Nautilus 14.2.0 and it has been possible to + map and unmap images in namespaces using the `image-spec` syntax since then + but the corresponding option available in most other commands was missing. + >=16.2.8 -------- +* RGW: The behavior for Multipart Upload was modified so that only + CompleteMultipartUpload notification is sent at the end of the multipart upload. + The POST notification at the beginning of the upload, and PUT notifications that + were sent on each part are not sent anymore. + * MON/MGR: Pools can now be created with `--bulk` flag. Any pools created with `bulk` will use a profile of the `pg_autoscaler` that provides more performance from the start. However, any pools created without the `--bulk` flag will remain using it's old behavior diff --git a/ceph/ceph.spec b/ceph/ceph.spec index 1bcd68750..0fcdfcf99 100644 --- a/ceph/ceph.spec +++ b/ceph/ceph.spec @@ -29,7 +29,11 @@ %else %bcond_without tcmalloc %endif +%if 0%{?rhel} >= 9 +%bcond_without system_pmdk +%else %bcond_with system_pmdk +%endif %if 0%{?fedora} || 0%{?rhel} %bcond_without selinux %ifarch x86_64 ppc64le @@ -120,11 +124,18 @@ # disable dwz which compresses the debuginfo %global _find_debuginfo_dwz_opts %{nil} +%if 0%{with seastar} +# disable -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1, as gcc-toolset-{9,10}-annobin +# do not provide gcc-annobin.so anymore, despite that they provide annobin.so. but +# redhat-rpm-config still passes -fplugin=gcc-annobin to the compiler. +%undefine _annotated_build +%endif + ################################################################################# # main package definition ################################################################################# Name: ceph -Version: 16.2.10 +Version: 16.2.11 Release: 0%{?dist} %if 0%{?fedora} || 0%{?rhel} Epoch: 2 @@ -140,7 +151,7 @@ License: LGPL-2.1 and LGPL-3.0 and CC-BY-SA-3.0 and GPL-2.0 and BSL-1.0 and BSD- Group: System/Filesystems %endif URL: http://ceph.com/ -Source0: %{?_remote_tarball_prefix}ceph-16.2.10.tar.bz2 +Source0: %{?_remote_tarball_prefix}ceph-16.2.11.tar.bz2 %if 0%{?suse_version} # _insert_obs_source_lines_here ExclusiveArch: x86_64 aarch64 ppc64le s390x @@ -229,7 +240,6 @@ BuildRequires: %{luarocks_package_name} BuildRequires: jq BuildRequires: libuuid-devel BuildRequires: python%{python3_pkgversion}-bcrypt -BuildRequires: python%{python3_pkgversion}-nose BuildRequires: python%{python3_pkgversion}-pecan BuildRequires: python%{python3_pkgversion}-requests BuildRequires: python%{python3_pkgversion}-dateutil @@ -304,6 +314,7 @@ BuildRequires: rdma-core-devel BuildRequires: liblz4-devel >= 1.7 # for prometheus-alerts BuildRequires: golang-github-prometheus-prometheus +BuildRequires: jsonnet %endif %if 0%{?fedora} || 0%{?rhel} Requires: systemd @@ -345,6 +356,7 @@ BuildRequires: python%{python3_pkgversion}-pyOpenSSL %endif %if 0%{?suse_version} BuildRequires: golang-github-prometheus-prometheus +BuildRequires: jsonnet BuildRequires: libxmlsec1-1 BuildRequires: libxmlsec1-nss1 BuildRequires: libxmlsec1-openssl1 @@ -548,6 +560,7 @@ Group: System/Filesystems Requires: ceph-mgr = %{_epoch_prefix}%{version}-%{release} Requires: ceph-grafana-dashboards = %{_epoch_prefix}%{version}-%{release} Requires: ceph-prometheus-alerts = %{_epoch_prefix}%{version}-%{release} +Requires: python%{python3_pkgversion}-setuptools %if 0%{?fedora} || 0%{?rhel} Requires: python%{python3_pkgversion}-cherrypy Requires: python%{python3_pkgversion}-jwt @@ -597,6 +610,7 @@ Requires: python%{python3_pkgversion}-pecan Requires: python%{python3_pkgversion}-pyOpenSSL Requires: python%{python3_pkgversion}-requests Requires: python%{python3_pkgversion}-dateutil +Requires: python%{python3_pkgversion}-setuptools %if 0%{?fedora} || 0%{?rhel} >= 8 Requires: python%{python3_pkgversion}-cherrypy Requires: python%{python3_pkgversion}-pyyaml @@ -1194,12 +1208,14 @@ This package provides Ceph default alerts for Prometheus. # common ################################################################################# %prep -%autosetup -p1 -n ceph-16.2.10 +%autosetup -p1 -n ceph-16.2.11 %build -# LTO can be enabled as soon as the following GCC bug is fixed: -# https://gcc.gnu.org/bugzilla/show_bug.cgi?id=48200 +# Disable lto on systems that do not support symver attribute +# See https://gcc.gnu.org/bugzilla/show_bug.cgi?id=48200 for details +%if ( 0%{?rhel} && 0%{?rhel} < 9 ) || ( 0%{?suse_version} && 0%{?suse_version} <= 1500 ) %define _lto_cflags %{nil} +%endif %if 0%{with seastar} && 0%{?rhel} . /opt/rh/gcc-toolset-9/enable @@ -1433,6 +1449,9 @@ install -m 644 -D monitoring/ceph-mixin/prometheus_alerts.yml %{buildroot}/etc/p %clean rm -rf %{buildroot} +# built binaries are no longer necessary at this point, +# but are consuming ~17GB of disk in the build environment +rm -rf build ################################################################################# # files and systemd scriptlets @@ -1528,8 +1547,7 @@ exit 0 %if ! 0%{?suse_version} %postun -n cephadm -userdel -r cephadm || true -exit 0 +[ $1 -ne 0 ] || userdel cephadm || : %endif %files -n cephadm @@ -1566,6 +1584,8 @@ exit 0 %{_bindir}/rbd-replay-prep %endif %{_bindir}/ceph-post-file +%dir %{_libdir}/ceph/denc +%{_libdir}/ceph/denc/denc-mod-*.so %{_tmpfilesdir}/ceph-common.conf %{_mandir}/man8/ceph-authtool.8* %{_mandir}/man8/ceph-conf.8* diff --git a/ceph/ceph.spec.in b/ceph/ceph.spec.in index cfd993cfa..82fc6b358 100644 --- a/ceph/ceph.spec.in +++ b/ceph/ceph.spec.in @@ -29,7 +29,11 @@ %else %bcond_without tcmalloc %endif +%if 0%{?rhel} >= 9 +%bcond_without system_pmdk +%else %bcond_with system_pmdk +%endif %if 0%{?fedora} || 0%{?rhel} %bcond_without selinux %ifarch x86_64 ppc64le @@ -120,6 +124,13 @@ # disable dwz which compresses the debuginfo %global _find_debuginfo_dwz_opts %{nil} +%if 0%{with seastar} +# disable -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1, as gcc-toolset-{9,10}-annobin +# do not provide gcc-annobin.so anymore, despite that they provide annobin.so. but +# redhat-rpm-config still passes -fplugin=gcc-annobin to the compiler. +%undefine _annotated_build +%endif + ################################################################################# # main package definition ################################################################################# @@ -229,7 +240,6 @@ BuildRequires: %{luarocks_package_name} BuildRequires: jq BuildRequires: libuuid-devel BuildRequires: python%{python3_pkgversion}-bcrypt -BuildRequires: python%{python3_pkgversion}-nose BuildRequires: python%{python3_pkgversion}-pecan BuildRequires: python%{python3_pkgversion}-requests BuildRequires: python%{python3_pkgversion}-dateutil @@ -304,6 +314,7 @@ BuildRequires: rdma-core-devel BuildRequires: liblz4-devel >= 1.7 # for prometheus-alerts BuildRequires: golang-github-prometheus-prometheus +BuildRequires: jsonnet %endif %if 0%{?fedora} || 0%{?rhel} Requires: systemd @@ -345,6 +356,7 @@ BuildRequires: python%{python3_pkgversion}-pyOpenSSL %endif %if 0%{?suse_version} BuildRequires: golang-github-prometheus-prometheus +BuildRequires: jsonnet BuildRequires: libxmlsec1-1 BuildRequires: libxmlsec1-nss1 BuildRequires: libxmlsec1-openssl1 @@ -548,6 +560,7 @@ Group: System/Filesystems Requires: ceph-mgr = %{_epoch_prefix}%{version}-%{release} Requires: ceph-grafana-dashboards = %{_epoch_prefix}%{version}-%{release} Requires: ceph-prometheus-alerts = %{_epoch_prefix}%{version}-%{release} +Requires: python%{python3_pkgversion}-setuptools %if 0%{?fedora} || 0%{?rhel} Requires: python%{python3_pkgversion}-cherrypy Requires: python%{python3_pkgversion}-jwt @@ -597,6 +610,7 @@ Requires: python%{python3_pkgversion}-pecan Requires: python%{python3_pkgversion}-pyOpenSSL Requires: python%{python3_pkgversion}-requests Requires: python%{python3_pkgversion}-dateutil +Requires: python%{python3_pkgversion}-setuptools %if 0%{?fedora} || 0%{?rhel} >= 8 Requires: python%{python3_pkgversion}-cherrypy Requires: python%{python3_pkgversion}-pyyaml @@ -1197,9 +1211,11 @@ This package provides Ceph default alerts for Prometheus. %autosetup -p1 -n @TARBALL_BASENAME@ %build -# LTO can be enabled as soon as the following GCC bug is fixed: -# https://gcc.gnu.org/bugzilla/show_bug.cgi?id=48200 +# Disable lto on systems that do not support symver attribute +# See https://gcc.gnu.org/bugzilla/show_bug.cgi?id=48200 for details +%if ( 0%{?rhel} && 0%{?rhel} < 9 ) || ( 0%{?suse_version} && 0%{?suse_version} <= 1500 ) %define _lto_cflags %{nil} +%endif %if 0%{with seastar} && 0%{?rhel} . /opt/rh/gcc-toolset-9/enable @@ -1433,6 +1449,9 @@ install -m 644 -D monitoring/ceph-mixin/prometheus_alerts.yml %{buildroot}/etc/p %clean rm -rf %{buildroot} +# built binaries are no longer necessary at this point, +# but are consuming ~17GB of disk in the build environment +rm -rf build ################################################################################# # files and systemd scriptlets @@ -1528,8 +1547,7 @@ exit 0 %if ! 0%{?suse_version} %postun -n cephadm -userdel -r cephadm || true -exit 0 +[ $1 -ne 0 ] || userdel cephadm || : %endif %files -n cephadm @@ -1566,6 +1584,8 @@ exit 0 %{_bindir}/rbd-replay-prep %endif %{_bindir}/ceph-post-file +%dir %{_libdir}/ceph/denc +%{_libdir}/ceph/denc/denc-mod-*.so %{_tmpfilesdir}/ceph-common.conf %{_mandir}/man8/ceph-authtool.8* %{_mandir}/man8/ceph-conf.8* diff --git a/ceph/changelog.upstream b/ceph/changelog.upstream index a94dd1ca0..3a47e940a 100644 --- a/ceph/changelog.upstream +++ b/ceph/changelog.upstream @@ -1,7 +1,13 @@ -ceph (16.2.10-1focal) focal; urgency=medium +ceph (16.2.11-1focal) focal; urgency=medium - -- Jenkins Build Slave User Thu, 21 Jul 2022 17:38:01 +0000 + -- Jenkins Build Slave User Tue, 24 Jan 2023 21:28:06 +0000 + +ceph (16.2.11-1) stable; urgency=medium + + * New upstream release + + -- Ceph Release Team Tue, 24 Jan 2023 20:43:11 +0000 ceph (16.2.10-1) stable; urgency=medium diff --git a/ceph/cmake/modules/BuildBoost.cmake b/ceph/cmake/modules/BuildBoost.cmake index 468ae419c..f84428921 100644 --- a/ceph/cmake/modules/BuildBoost.cmake +++ b/ceph/cmake/modules/BuildBoost.cmake @@ -11,6 +11,9 @@ # Boost_USE_MULTITHREADED : boolean (default: OFF) # BOOST_J: integer (defanult 1) +# CMAKE_CURRENT_FUNCTION_LIST_DIR is introduced by cmake 3.17, but ubuntu comes with 3.16 +set(_build_boost_list_dir "${CMAKE_CURRENT_LIST_DIR}") + function(check_boost_version source_dir expected_version) set(version_hpp "${source_dir}/boost/version.hpp") if(NOT EXISTS ${version_hpp}) @@ -70,7 +73,7 @@ function(do_build_boost version) if(c MATCHES "^python([0-9])\$") set(with_python_version "${CMAKE_MATCH_1}") list(APPEND boost_with_libs "python") - elseif(c MATCHES "^python([0-9])\\.?([0-9])\$") + elseif(c MATCHES "^python([0-9])\\.?([0-9]+)\$") set(with_python_version "${CMAKE_MATCH_1}.${CMAKE_MATCH_2}") list(APPEND boost_with_libs "python") else() @@ -167,10 +170,12 @@ function(do_build_boost version) URL_HASH SHA256=${boost_sha256} DOWNLOAD_NO_PROGRESS 1) endif() + find_program(PATCH_EXECUTABLE patch) # build all components in a single shot include(ExternalProject) ExternalProject_Add(Boost ${source_dir} + PATCH_COMMAND ${PATCH_EXECUTABLE} -p3 -i ${_build_boost_list_dir}/boost-python-use-public-api-for-filename.patch CONFIGURE_COMMAND CC=${CMAKE_C_COMPILER} CXX=${CMAKE_CXX_COMPILER} ${configure_command} BUILD_COMMAND CC=${CMAKE_C_COMPILER} CXX=${CMAKE_CXX_COMPILER} ${build_command} BUILD_IN_SOURCE 1 diff --git a/ceph/cmake/modules/BuildFIO.cmake b/ceph/cmake/modules/BuildFIO.cmake index d3288cc33..a64d63922 100644 --- a/ceph/cmake/modules/BuildFIO.cmake +++ b/ceph/cmake/modules/BuildFIO.cmake @@ -9,14 +9,15 @@ function(build_fio) include(FindMake) find_make("MAKE_EXECUTABLE" "make_cmd") + set(source_dir ${CMAKE_BINARY_DIR}/src/fio) + file(MAKE_DIRECTORY ${source_dir}) ExternalProject_Add(fio_ext - DOWNLOAD_DIR ${CMAKE_BINARY_DIR}/src/ UPDATE_COMMAND "" # this disables rebuild on each run - GIT_REPOSITORY "https://github.com/axboe/fio.git" + GIT_REPOSITORY "https://github.com/ceph/fio.git" GIT_CONFIG advice.detachedHead=false GIT_SHALLOW 1 - GIT_TAG "fio-3.15" - SOURCE_DIR ${CMAKE_BINARY_DIR}/src/fio + GIT_TAG "fio-3.27-cxx" + SOURCE_DIR ${source_dir} BUILD_IN_SOURCE 1 CONFIGURE_COMMAND /configure BUILD_COMMAND ${make_cmd} fio EXTFLAGS=-Wno-format-truncation ${FIO_EXTLIBS} @@ -25,5 +26,6 @@ function(build_fio) add_library(fio INTERFACE IMPORTED) add_dependencies(fio fio_ext) set_target_properties(fio PROPERTIES - INTERFACE_INCLUDE_DIRECTORIES ${CMAKE_BINARY_DIR}/src/fio) + INTERFACE_INCLUDE_DIRECTORIES ${source_dir} + INTERFACE_COMPILE_OPTIONS "-include;${source_dir}/config-host.h;$<$:-std=gnu99>$<$:-std=gnu++17>") endfunction() diff --git a/ceph/cmake/modules/Buildpmem.cmake b/ceph/cmake/modules/Buildpmem.cmake index ead5c80ae..61c5ba601 100644 --- a/ceph/cmake/modules/Buildpmem.cmake +++ b/ceph/cmake/modules/Buildpmem.cmake @@ -21,6 +21,7 @@ function(build_pmem) set(PMDK_LIB_DIR "nondebug") endif() + set(pmdk_cflags "-Wno-error -fno-lto") include(ExternalProject) ExternalProject_Add(pmdk_ext ${source_dir_args} @@ -29,7 +30,7 @@ function(build_pmem) # build system tests statically linking to librbd (which uses # libpmemobj) will not link (because we don't build the ndctl # static library here). - BUILD_COMMAND ${make_cmd} CC=${CMAKE_C_COMPILER} NDCTL_ENABLE=n BUILD_EXAMPLES=n BUILD_BENCHMARKS=n DOC=n + BUILD_COMMAND ${make_cmd} CC=${CMAKE_C_COMPILER} "EXTRA_CFLAGS=${pmdk_cflags}" NDCTL_ENABLE=n BUILD_EXAMPLES=n BUILD_BENCHMARKS=n DOC=n BUILD_IN_SOURCE 1 BUILD_BYPRODUCTS "/src/${PMDK_LIB_DIR}/libpmem.a" "/src/${PMDK_LIB_DIR}/libpmemobj.a" INSTALL_COMMAND "") diff --git a/ceph/cmake/modules/CephChecks.cmake b/ceph/cmake/modules/CephChecks.cmake index 6a9483f1a..701ecb47c 100644 --- a/ceph/cmake/modules/CephChecks.cmake +++ b/ceph/cmake/modules/CephChecks.cmake @@ -144,6 +144,34 @@ else(NOT CMAKE_CROSSCOMPILING) message(STATUS "Assuming unaligned access is supported") endif(NOT CMAKE_CROSSCOMPILING) +set(version_script_source "v1 { }; v2 { } v1;") +file(WRITE ${CMAKE_CURRENT_BINARY_DIR}/version_script.txt "${version_script_source}") +cmake_push_check_state(RESET) +set(CMAKE_REQUIRED_FLAGS "-Werror -Wl,--version-script=${CMAKE_CURRENT_BINARY_DIR}/version_script.txt") +check_c_source_compiles(" +__attribute__((__symver__ (\"func@v1\"))) void func_v1() {}; +__attribute__((__symver__ (\"func@v2\"))) void func_v2() {}; + +int main() {}" + HAVE_ATTR_SYMVER) + if(NOT HAVE_ATTR_SYMVER) + if(CMAKE_CXX_FLAGS MATCHES "-flto" AND NOT CMAKE_CXX_FLAGS MATCHES "-flto-partition=none") + # https://tracker.ceph.com/issues/40060 + message(FATAL_ERROR "please pass -flto-partition=none as part of CXXFLAGS") + endif() + endif() +set(CMAKE_REQUIRED_FLAGS -Wl,--version-script=${CMAKE_CURRENT_BINARY_DIR}/version_script.txt) +check_c_source_compiles(" +void func_v1() {} +__asm__(\".symver func_v1, func@v1\"); +void func_v2() {} +__asm__(\".symver func_v2, func@v2\"); + +int main() {}" + HAVE_ASM_SYMVER) +file(REMOVE ${CMAKE_CURRENT_BINARY_DIR}/version_script.txt) +cmake_pop_check_state() + # should use LINK_OPTIONS instead of LINK_LIBRARIES, if we can use cmake v3.14+ try_compile(HAVE_LINK_VERSION_SCRIPT ${CMAKE_CURRENT_BINARY_DIR} diff --git a/ceph/cmake/modules/Distutils.cmake b/ceph/cmake/modules/Distutils.cmake index 01a6fc500..fe3ca410e 100644 --- a/ceph/cmake/modules/Distutils.cmake +++ b/ceph/cmake/modules/Distutils.cmake @@ -65,14 +65,13 @@ function(distutils_add_cython_module target name src) # This little bit of magic wipes out __Pyx_check_single_interpreter() # Note: this is reproduced in distutils_install_cython_module list(APPEND cflags -D'void0=dead_function\(void\)') - list(APPEND cflags -D'__Pyx_check_single_interpreter\(ARG\)=ARG \#\# 0') + list(APPEND cflags -D'__Pyx_check_single_interpreter\(ARG\)=ARG\#\#0') set(PY_CC ${compiler_launcher} ${CMAKE_C_COMPILER} ${c_compiler_arg1} ${cflags}) set(PY_CXX ${compiler_launcher} ${CMAKE_CXX_COMPILER} ${cxx_compiler_arg1}) set(PY_LDSHARED ${link_launcher} ${CMAKE_C_COMPILER} ${c_compiler_arg1} "-shared") - set(suffix_var "EXT_SUFFIX") execute_process(COMMAND "${Python3_EXECUTABLE}" -c - "from distutils import sysconfig; print(sysconfig.get_config_var('${suffix_var}'))" + "import sysconfig; print(sysconfig.get_config_var('EXT_SUFFIX'))" RESULT_VARIABLE result OUTPUT_VARIABLE ext_suffix ERROR_VARIABLE error @@ -113,7 +112,7 @@ function(distutils_install_cython_module name) set(ENV{LDSHARED} \"${PY_LDSHARED}\") set(ENV{CPPFLAGS} \"-iquote${CMAKE_SOURCE_DIR}/src/include -D'void0=dead_function\(void\)' \ - -D'__Pyx_check_single_interpreter\(ARG\)=ARG \#\# 0'\") + -D'__Pyx_check_single_interpreter\(ARG\)=ARG\#\#0'\") set(ENV{LDFLAGS} \"-L${CMAKE_LIBRARY_OUTPUT_DIRECTORY}\") set(ENV{CYTHON_BUILD_DIR} \"${CMAKE_CURRENT_BINARY_DIR}\") set(ENV{CEPH_LIBDIR} \"${CMAKE_LIBRARY_OUTPUT_DIRECTORY}\") diff --git a/ceph/cmake/modules/boost-python-use-public-api-for-filename.patch b/ceph/cmake/modules/boost-python-use-public-api-for-filename.patch new file mode 100644 index 000000000..899a02384 --- /dev/null +++ b/ceph/cmake/modules/boost-python-use-public-api-for-filename.patch @@ -0,0 +1,38 @@ +From d9f06052e28873037db7f98629bce72182a42410 Mon Sep 17 00:00:00 2001 +From: Pat Riehecky +Date: Mon, 29 Jun 2020 10:51:58 -0500 +Subject: [PATCH] Convert Python 3.1+ to use public C API for filenames +--- + src/exec.cpp | 16 ++++++++++++---- + 1 file changed, 12 insertions(+), 4 deletions(-) +diff --git a/src/exec.cpp b/src/exec.cpp +index 171c6f4189..b2eabe59f6 100644 +--- a/src/boost/libs/python/src/exec.cpp ++++ b/src/boost/libs/python/src/exec.cpp +@@ -104,14 +104,22 @@ object BOOST_PYTHON_DECL exec_file(char const *filename, object global, object l + if (local.is_none()) local = global; + // should be 'char const *' but older python versions don't use 'const' yet. + char *f = const_cast(filename); +- // Let python open the file to avoid potential binary incompatibilities. +-#if PY_VERSION_HEX >= 0x03040000 +- FILE *fs = _Py_fopen(f, "r"); ++#if PY_VERSION_HEX >= 0x03010000 ++ // Let python manage any UTF bits to avoid potential incompatibilities. ++ PyObject *fo = Py_BuildValue("s", f); ++ PyObject *fb = Py_None; ++ PyUnicode_FSConverter(fo, &fb); ++ f = PyBytes_AsString(fb); ++ FILE *fs = fopen(f, "r"); ++ Py_DECREF(fo); ++ Py_DECREF(fb); + #elif PY_VERSION_HEX >= 0x03000000 ++ // Let python open the file to avoid potential binary incompatibilities. + PyObject *fo = Py_BuildValue("s", f); +- FILE *fs = _Py_fopen(fo, "r"); ++ FILE *fs = _Py_fopen(fo, "r"); // Private CPython API + Py_DECREF(fo); + #else ++ // Let python open the file to avoid potential binary incompatibilities. + PyObject *pyfile = PyFile_FromString(f, const_cast("r")); + if (!pyfile) throw std::invalid_argument(std::string(f) + " : no such file"); + python::handle<> file(pyfile); diff --git a/ceph/debian/ceph-common.install b/ceph/debian/ceph-common.install index b84a3164d..b1c5769fd 100755 --- a/ceph/debian/ceph-common.install +++ b/ceph/debian/ceph-common.install @@ -23,6 +23,7 @@ usr/bin/rbd-replay* usr/bin/ceph-post-file usr/sbin/mount.ceph sbin usr/lib/ceph/compressor/* +usr/lib/ceph/denc/* usr/lib/ceph/crypto/* [amd64] usr/share/man/man8/ceph-authtool.8 usr/share/man/man8/ceph-conf.8 diff --git a/ceph/debian/control b/ceph/debian/control index b5ef58f07..bf89deb47 100644 --- a/ceph/debian/control +++ b/ceph/debian/control @@ -24,6 +24,7 @@ Build-Depends: automake, g++ (>= 7), javahelper, jq , + jsonnet , junit4, libaio-dev, libbabeltrace-ctf-dev, @@ -37,7 +38,7 @@ Build-Depends: automake, libcurl4-openssl-dev, # Jaeger libevent-dev, libexpat1-dev, -# Make-Check libffi-dev [!amd64], + libffi-dev [!amd64] , libfuse-dev, libgoogle-perftools-dev [i386 amd64 arm64], # Crimson libgnutls28-dev, @@ -68,44 +69,44 @@ Build-Depends: automake, librabbitmq-dev, librdkafka-dev, luarocks, -# Make-Check libxmlsec1, -# Make-Check libxmlsec1-nss, -# Make-Check libxmlsec1-openssl, -# Make-Check libxmlsec1-dev, + libxmlsec1 , + libxmlsec1-nss , + libxmlsec1-openssl , + libxmlsec1-dev , # Crimson libyaml-cpp-dev, # Jaeger nlohmann-json-dev | nlohmann-json3-dev, parted, patch, pkg-config, -# Make-Check prometheus, + prometheus , # Crimson protobuf-compiler, python3-all-dev, python3-cherrypy3, -# Make-Check python3-jwt, -# Make-Check python3-nose, -# Make-Check python3-pecan, -# Make-Check python3-bcrypt, -# Make-Check tox, -# Make-Check python3-coverage, -# Make-Check python3-dateutil, -# Make-Check python3-openssl, -# Make-Check python3-prettytable, -# Make-Check python3-requests, -# Make-Check python3-scipy, + python3-jwt , + python3-pecan , + python3-bcrypt , + tox , + python3-coverage , + python3-dateutil , + python3-pkg-resources , + python3-openssl , + python3-prettytable , + python3-requests , + python3-scipy , python3-setuptools, python3-sphinx, -# Make-Check python3-werkzeug, + python3-werkzeug , python3-setuptools, python3-venv, # Crimson ragel, -# Make-Check socat, + socat , # Crimson systemtap-sdt-dev, -# Make-Check uuid-dev, + uuid-dev , uuid-runtime, valgrind, xfslibs-dev, -# Make-Check xfsprogs, -# Make-Check xmlstarlet, + xfsprogs , + xmlstarlet , nasm [amd64], zlib1g-dev, # Jaeger Built-Using: libyaml-cpp-dev (>= 0.6), @@ -234,6 +235,8 @@ Depends: ceph-base (= ${binary:Version}), python3-pecan, python3-requests, python3-werkzeug, + libsqlite3-mod-ceph (= ${binary:Version}), + librados2 (= ${binary:Version}), ${misc:Depends}, ${python:Depends}, ${shlibs:Depends}, @@ -258,6 +261,7 @@ Depends: ceph-mgr (= ${binary:Version}), python3-cherrypy3, python3-jwt, python3-bcrypt, + python3-pkg-resources, python3-werkzeug, python3-routes, ${misc:Depends}, @@ -735,7 +739,8 @@ Description: RADOS distributed object store client C++ library (development file Package: libsqlite3-mod-ceph Architecture: any Section: libs -Depends: ${misc:Depends}, +Depends: librados2 (= ${binary:Version}), + ${misc:Depends}, ${shlibs:Depends}, Description: SQLite3 VFS for Ceph A SQLite3 VFS for storing and manipulating databases stored on Ceph's RADOS @@ -1249,3 +1254,4 @@ Description: prometheus alerts for the ceph dashboard . This package contains alerts used for prometheus to interact with the Ceph Dashboard. + diff --git a/ceph/debian/python3-ceph-argparse.install b/ceph/debian/python3-ceph-argparse.install old mode 100644 new mode 100755 index 274b8b4f7..52479f935 --- a/ceph/debian/python3-ceph-argparse.install +++ b/ceph/debian/python3-ceph-argparse.install @@ -1,2 +1,4 @@ -usr/lib/python3*/dist-packages/ceph_argparse.py -usr/lib/python3*/dist-packages/ceph_daemon.py +#! /usr/bin/dh-exec + +usr/lib/python3*/*-packages/ceph_argparse.py /usr/lib/python3/dist-packages/ +usr/lib/python3*/*-packages/ceph_daemon.py /usr/lib/python3/dist-packages/ diff --git a/ceph/debian/python3-cephfs.install b/ceph/debian/python3-cephfs.install old mode 100644 new mode 100755 index 6eb883670..ac88db221 --- a/ceph/debian/python3-cephfs.install +++ b/ceph/debian/python3-cephfs.install @@ -1,3 +1,5 @@ -usr/lib/python3*/dist-packages/ceph_volume_client.py +#! /usr/bin/dh-exec + +usr/lib/python3*/*-packages/ceph_volume_client.py /usr/lib/python3/dist-packages/ usr/lib/python3*/dist-packages/cephfs-*.egg-info usr/lib/python3*/dist-packages/cephfs.cpython*.so diff --git a/ceph/do_cmake.sh b/ceph/do_cmake.sh index 4cb07c106..25fb827b4 100755 --- a/ceph/do_cmake.sh +++ b/ceph/do_cmake.sh @@ -36,6 +36,13 @@ if [ -r /etc/os-release ]; then ARGS+=" -DWITH_RADOSGW_AMQP_ENDPOINT=OFF" ARGS+=" -DWITH_RADOSGW_KAFKA_ENDPOINT=OFF" ;; + ubuntu) + MAJOR_VER=$(echo "$VERSION_ID" | sed -e 's/\..*$//') + if [ "$MAJOR_VER" -ge "22" ] ; then + PYBUILD="3.10" + fi + ;; + esac elif [ "$(uname)" == FreeBSD ] ; then PYBUILD="3" diff --git a/ceph/doc/_static/css/custom.css b/ceph/doc/_static/css/custom.css index c44ccb450..2a37cab99 100644 --- a/ceph/doc/_static/css/custom.css +++ b/ceph/doc/_static/css/custom.css @@ -1,3 +1,23 @@ +dt { + scroll-margin-top: 3em; +} + +h2 { + scroll-margin-top: 4em; +} + +h3 { + scroll-margin-top: 4em; +} + +section { + scroll-margin-top: 4em; +} + +span { + scroll-margin-top: 2em; +} + ul.simple > li > ul > li:last-child { margin-block-end : 1em; } diff --git a/ceph/doc/architecture.rst b/ceph/doc/architecture.rst index 33558c0a8..46be74603 100644 --- a/ceph/doc/architecture.rst +++ b/ceph/doc/architecture.rst @@ -13,6 +13,7 @@ replicate and redistribute data dynamically. .. image:: images/stack.png +.. _arch-ceph-storage-cluster: The Ceph Storage Cluster ======================== @@ -59,7 +60,7 @@ service interfaces built on top of ``librados``. Storing Data ------------ -The Ceph Storage Cluster receives data from :term:`Ceph Clients`--whether it +The Ceph Storage Cluster receives data from :term:`Ceph Client`\s--whether it comes through a :term:`Ceph Block Device`, :term:`Ceph Object Storage`, the :term:`Ceph File System` or a custom implementation you create using ``librados``-- which is stored as RADOS objects. Each object is stored on an @@ -80,7 +81,7 @@ stored in a monolithic database-like fashion. Ceph OSD Daemons store data as objects in a flat namespace (e.g., no hierarchy of directories). An object has an identifier, binary data, and metadata consisting of a set of name/value pairs. The semantics are completely -up to :term:`Ceph Clients`. For example, CephFS uses metadata to store file +up to :term:`Ceph Client`\s. For example, CephFS uses metadata to store file attributes such as the file owner, created date, last modified date, and so forth. @@ -135,6 +136,8 @@ Placement of Replicated Data`_. .. index:: architecture; cluster map +.. _architecture_cluster_map: + Cluster Map ~~~~~~~~~~~ @@ -581,7 +584,7 @@ objects. Peering and Sets ~~~~~~~~~~~~~~~~ -In previous sections, we noted that Ceph OSD Daemons check each others +In previous sections, we noted that Ceph OSD Daemons check each other's heartbeats and report back to the Ceph Monitor. Another thing Ceph OSD daemons do is called 'peering', which is the process of bringing all of the OSDs that store a Placement Group (PG) into agreement about the state of all of the @@ -1619,13 +1622,13 @@ instance for high availability. -.. _RADOS - A Scalable, Reliable Storage Service for Petabyte-scale Storage Clusters: https://ceph.com/wp-content/uploads/2016/08/weil-rados-pdsw07.pdf +.. _RADOS - A Scalable, Reliable Storage Service for Petabyte-scale Storage Clusters: https://ceph.io/assets/pdfs/weil-rados-pdsw07.pdf .. _Paxos: https://en.wikipedia.org/wiki/Paxos_(computer_science) .. _Monitor Config Reference: ../rados/configuration/mon-config-ref .. _Monitoring OSDs and PGs: ../rados/operations/monitoring-osd-pg .. _Heartbeats: ../rados/configuration/mon-osd-interaction .. _Monitoring OSDs: ../rados/operations/monitoring-osd-pg/#monitoring-osds -.. _CRUSH - Controlled, Scalable, Decentralized Placement of Replicated Data: https://ceph.com/wp-content/uploads/2016/08/weil-crush-sc06.pdf +.. _CRUSH - Controlled, Scalable, Decentralized Placement of Replicated Data: https://ceph.io/assets/pdfs/weil-crush-sc06.pdf .. _Data Scrubbing: ../rados/configuration/osd-config-ref#scrubbing .. _Report Peering Failure: ../rados/configuration/mon-osd-interaction#osds-report-peering-failure .. _Troubleshooting Peering Failure: ../rados/troubleshooting/troubleshooting-pg#placement-group-down-peering-failure diff --git a/ceph/doc/ceph-volume/lvm/prepare.rst b/ceph/doc/ceph-volume/lvm/prepare.rst index 21cae4ee5..ae6aac414 100644 --- a/ceph/doc/ceph-volume/lvm/prepare.rst +++ b/ceph/doc/ceph-volume/lvm/prepare.rst @@ -2,25 +2,22 @@ ``prepare`` =========== -This subcommand allows a :term:`filestore` or :term:`bluestore` setup. It is -recommended to pre-provision a logical volume before using it with -``ceph-volume lvm``. +Before you run ``ceph-volume lvm prepare``, we recommend that you provision a +logical volume. Then you can run ``prepare`` on that logical volume. -Logical volumes are not altered except for adding extra metadata. +``prepare`` adds metadata to logical volumes but does not alter them in any +other way. -.. note:: This is part of a two step process to deploy an OSD. If looking for - a single-call way, please see :ref:`ceph-volume-lvm-create` +.. note:: This is part of a two-step process to deploy an OSD. If you prefer + to deploy an OSD by using only one command, see :ref:`ceph-volume-lvm-create`. -To help identify volumes, the process of preparing a volume (or volumes) to -work with Ceph, the tool will assign a few pieces of metadata information using -:term:`LVM tags`. - -:term:`LVM tags` makes volumes easy to discover later, and help identify them as -part of a Ceph system, and what role they have (journal, filestore, bluestore, -etc...) - -Although :term:`bluestore` is the default, the back end can be specified with: +``prepare`` uses :term:`LVM tags` to assign several pieces of metadata to a +logical volume. Volumes tagged in this way are easier to identify and easier to +use with Ceph. :term:`LVM tags` identify logical volumes by the role that they +play in the Ceph cluster (for example: BlueStore data or BlueStore WAL+DB). +:term:`BlueStore` is the default backend. Ceph permits changing +the backend, which can be done by using the following flags and arguments: * :ref:`--filestore ` * :ref:`--bluestore ` @@ -29,50 +26,58 @@ Although :term:`bluestore` is the default, the back end can be specified with: ``bluestore`` ------------- -The :term:`bluestore` objectstore is the default for new OSDs. It offers a bit -more flexibility for devices compared to :term:`filestore`. -Bluestore supports the following configurations: +:term:`Bluestore` is the default backend for new OSDs. It +offers more flexibility for devices than :term:`filestore` does. Bluestore +supports the following configurations: -* A block device, a block.wal, and a block.db device -* A block device and a block.wal device -* A block device and a block.db device -* A single block device +* a block device, a block.wal device, and a block.db device +* a block device and a block.wal device +* a block device and a block.db device +* a single block device -The bluestore subcommand accepts physical block devices, partitions on -physical block devices or logical volumes as arguments for the various device parameters -If a physical device is provided, a logical volume will be created. A volume group will -either be created or reused it its name begins with ``ceph``. -This allows a simpler approach at using LVM but at the cost of flexibility: -there are no options or configurations to change how the LV is created. +The ``bluestore`` subcommand accepts physical block devices, partitions on physical +block devices, or logical volumes as arguments for the various device +parameters. If a physical block device is provided, a logical volume will be +created. If the provided volume group's name begins with `ceph`, it will be +created if it does not yet exist and it will be clobbered and reused if it +already exists. This allows for a simpler approach to using LVM but at the +cost of flexibility: no option or configuration can be used to change how the +logical volume is created. The ``block`` is specified with the ``--data`` flag, and in its simplest use -case it looks like:: +case it looks like: + +.. prompt:: bash # ceph-volume lvm prepare --bluestore --data vg/lv -A raw device can be specified in the same way:: +A raw device can be specified in the same way: + +.. prompt:: bash # ceph-volume lvm prepare --bluestore --data /path/to/device -For enabling :ref:`encryption `, the ``--dmcrypt`` flag is required:: +For enabling :ref:`encryption `, the ``--dmcrypt`` flag is required: + +.. prompt:: bash # ceph-volume lvm prepare --bluestore --dmcrypt --data vg/lv -If a ``block.db`` or a ``block.wal`` is needed (they are optional for -bluestore) they can be specified with ``--block.db`` and ``--block.wal`` -accordingly. These can be a physical device, a partition or -a logical volume. +If a ``block.db`` device or a ``block.wal`` device is needed, it can be +specified with ``--block.db`` or ``--block.wal``. These can be physical +devices, partitions, or logical volumes. ``block.db`` and ``block.wal`` are +optional for bluestore. -For both ``block.db`` and ``block.wal`` partitions aren't made logical volumes -because they can be used as-is. +For both ``block.db`` and ``block.wal``, partitions can be used as-is, and +therefore are not made into logical volumes. -While creating the OSD directory, the process will use a ``tmpfs`` mount to -place all the files needed for the OSD. These files are initially created by -``ceph-osd --mkfs`` and are fully ephemeral. +While creating the OSD directory, the process uses a ``tmpfs`` mount to hold +the files needed for the OSD. These files are created by ``ceph-osd --mkfs`` +and are ephemeral. -A symlink is always created for the ``block`` device, and optionally for -``block.db`` and ``block.wal``. For a cluster with a default name, and an OSD -id of 0, the directory could look like:: +A symlink is created for the ``block`` device, and is optional for ``block.db`` +and ``block.wal``. For a cluster with a default name and an OSD ID of 0, the +directory looks like this:: # ls -l /var/lib/ceph/osd/ceph-0 lrwxrwxrwx. 1 ceph ceph 93 Oct 20 13:05 block -> /dev/ceph-be2b6fbd-bcf2-4c51-b35d-a35a162a02f0/osd-block-25cf0a05-2bc6-44ef-9137-79d65bd7ad62 @@ -85,11 +90,11 @@ id of 0, the directory could look like:: -rw-------. 1 ceph ceph 10 Oct 20 13:05 type -rw-------. 1 ceph ceph 2 Oct 20 13:05 whoami -In the above case, a device was used for ``block`` so ``ceph-volume`` create -a volume group and a logical volume using the following convention: +In the above case, a device was used for ``block``, so ``ceph-volume`` created +a volume group and a logical volume using the following conventions: -* volume group name: ``ceph-{cluster fsid}`` or if the vg exists already - ``ceph-{random uuid}`` +* volume group name: ``ceph-{cluster fsid}`` (or if the volume group already + exists: ``ceph-{random uuid}``) * logical volume name: ``osd-block-{osd_fsid}`` @@ -98,78 +103,100 @@ a volume group and a logical volume using the following convention: ``filestore`` ------------- -This is the OSD backend that allows preparation of logical volumes for -a :term:`filestore` objectstore OSD. +``Filestore`` is the OSD backend that prepares logical volumes for a +:term:`filestore`-backed object-store OSD. -It can use a logical volume for the OSD data and a physical device, a partition -or logical volume for the journal. A physical device will have a logical volume -created on it. A volume group will either be created or reused it its name begins -with ``ceph``. No special preparation is needed for these volumes other than -following the minimum size requirements for data and journal. -The CLI call looks like this of a basic standalone filestore OSD:: +``Filestore`` uses a logical volume to store OSD data and it uses +physical devices, partitions, or logical volumes to store the journal. If a +physical device is used to create a filestore backend, a logical volume will be +created on that physical device. If the provided volume group's name begins +with `ceph`, it will be created if it does not yet exist and it will be +clobbered and reused if it already exists. No special preparation is needed for +these volumes, but be sure to meet the minimum size requirements for OSD data and +for the journal. - ceph-volume lvm prepare --filestore --data +Use the following command to create a basic filestore OSD: -To deploy file store with an external journal:: +.. prompt:: bash # - ceph-volume lvm prepare --filestore --data --journal + ceph-volume lvm prepare --filestore --data -For enabling :ref:`encryption `, the ``--dmcrypt`` flag is required:: +Use this command to deploy filestore with an external journal: - ceph-volume lvm prepare --filestore --dmcrypt --data --journal +.. prompt:: bash # -Both the journal and data block device can take three forms: + ceph-volume lvm prepare --filestore --data --journal + +Use this command to enable :ref:`encryption `, and note that the ``--dmcrypt`` flag is required: + +.. prompt:: bash # + + ceph-volume lvm prepare --filestore --dmcrypt --data --journal + +The data block device and the journal can each take one of three forms: * a physical block device * a partition on a physical block device * a logical volume -When using logical volumes the value *must* be of the format -``volume_group/logical_volume``. Since logical volume names -are not enforced for uniqueness, this prevents accidentally -choosing the wrong volume. +If you use a logical volume to deploy filestore, the value that you pass in the +command *must* be of the format ``volume_group/logical_volume_name``. Since logical +volume names are not enforced for uniqueness, using this format is an important +safeguard against accidentally choosing the wrong volume (and clobbering its data). -When using a partition, it *must* contain a ``PARTUUID``, that can be -discovered by ``blkid``. THis ensure it can later be identified correctly -regardless of the device name (or path). +If you use a partition to deploy filestore, the partition *must* contain a +``PARTUUID`` that can be discovered by ``blkid``. This ensures that the +partition can be identified correctly regardless of the device's name (or path). -For example: passing a logical volume for data and a partition ``/dev/sdc1`` for -the journal:: +For example, to use a logical volume for OSD data and a partition +(``/dev/sdc1``) for the journal, run a command of this form: - ceph-volume lvm prepare --filestore --data volume_group/lv_name --journal /dev/sdc1 +.. prompt:: bash # -Passing a bare device for data and a logical volume ias the journal:: + ceph-volume lvm prepare --filestore --data volume_group/logical_volume_name --journal /dev/sdc1 - ceph-volume lvm prepare --filestore --data /dev/sdc --journal volume_group/journal_lv +Or, to use a bare device for data and a logical volume for the journal: -A generated uuid is used to ask the cluster for a new OSD. These two pieces are -crucial for identifying an OSD and will later be used throughout the -:ref:`ceph-volume-lvm-activate` process. +.. prompt:: bash # + + ceph-volume lvm prepare --filestore --data /dev/sdc --journal volume_group/journal_lv + +A generated UUID is used when asking the cluster for a new OSD. These two +pieces of information (the OSD ID and the OSD UUID) are necessary for +identifying a given OSD and will later be used throughout the +:ref:`activation` process. The OSD data directory is created using the following convention:: /var/lib/ceph/osd/- -At this point the data volume is mounted at this location, and the journal -volume is linked:: +To link the journal volume to the mounted data volume, use this command: - ln -s /path/to/journal /var/lib/ceph/osd/-/journal +.. prompt:: bash # -The monmap is fetched using the bootstrap key from the OSD:: + ln -s /path/to/journal /var/lib/ceph/osd/-/journal - /usr/bin/ceph --cluster ceph --name client.bootstrap-osd - --keyring /var/lib/ceph/bootstrap-osd/ceph.keyring - mon getmap -o /var/lib/ceph/osd/-/activate.monmap +To fetch the monmap by using the bootstrap key from the OSD, use this command: -``ceph-osd`` will be called to populate the OSD directory, that is already -mounted, re-using all the pieces of information from the initial steps:: +.. prompt:: bash # + + /usr/bin/ceph --cluster ceph --name client.bootstrap-osd --keyring + /var/lib/ceph/bootstrap-osd/ceph.keyring mon getmap -o + /var/lib/ceph/osd/-/activate.monmap + +To populate the OSD directory (which has already been mounted), use this ``ceph-osd`` command: +.. prompt:: bash # + + ceph-osd --cluster ceph --mkfs --mkkey -i \ --monmap + /var/lib/ceph/osd/-/activate.monmap --osd-data \ + /var/lib/ceph/osd/- --osd-journal + /var/lib/ceph/osd/-/journal \ --osd-uuid + --keyring /var/lib/ceph/osd/-/keyring \ --setuser ceph + --setgroup ceph + +All of the information from the previous steps is used in the above command. - ceph-osd --cluster ceph --mkfs --mkkey -i \ - --monmap /var/lib/ceph/osd/-/activate.monmap --osd-data \ - /var/lib/ceph/osd/- --osd-journal /var/lib/ceph/osd/-/journal \ - --osd-uuid --keyring /var/lib/ceph/osd/-/keyring \ - --setuser ceph --setgroup ceph .. _ceph-volume-lvm-partitions: diff --git a/ceph/doc/cephadm/adoption.rst b/ceph/doc/cephadm/adoption.rst index d43aa2492..78d1343eb 100644 --- a/ceph/doc/cephadm/adoption.rst +++ b/ceph/doc/cephadm/adoption.rst @@ -113,15 +113,15 @@ Adoption process ssh-copy-id -f -i ~/ceph.pub root@ .. note:: - It is also possible to import an existing ssh key. See - :ref:`ssh errors ` in the troubleshooting + It is also possible to import an existing SSH key. See + :ref:`SSH errors ` in the troubleshooting document for instructions that describe how to import existing - ssh keys. + SSH keys. .. note:: - It is also possible to have cephadm use a non-root user to ssh + It is also possible to have cephadm use a non-root user to SSH into cluster hosts. This user needs to have passwordless sudo access. - Use ``ceph cephadm set-user `` and copy the ssh key to that user. + Use ``ceph cephadm set-user `` and copy the SSH key to that user. See :ref:`cephadm-ssh-user` #. Tell cephadm which hosts to manage: diff --git a/ceph/doc/cephadm/compatibility.rst b/ceph/doc/cephadm/compatibility.rst index 7c75b7445..7d9c763bb 100644 --- a/ceph/doc/cephadm/compatibility.rst +++ b/ceph/doc/cephadm/compatibility.rst @@ -8,11 +8,10 @@ Compatibility and Stability Compatibility with Podman Versions ---------------------------------- -Podman and Ceph have different end-of-life strategies that -might make it challenging to find compatible Podman and Ceph -versions +Podman and Ceph have different end-of-life strategies. This means that care +must be taken in finding a version of Podman that is compatible with Ceph. -Those versions are expected to work: +These versions are expected to work: +-----------+---------------------------------------+ @@ -28,7 +27,13 @@ Those versions are expected to work: +-----------+-------+-------+-------+-------+-------+ .. warning:: - Only podman versions that are 2.0.0 and higher work with Ceph Pacific, with the exception of podman version 2.2.1, which does not work with Ceph Pacific. kubic stable is known to work with Ceph Pacific, but it must be run with a newer kernel. + + To use Podman with Ceph Pacific, you must use **a version of Podman that + is 2.0.0 or higher**. However, **Podman version 2.2.1 does not work with + Ceph Pacific**. + + "Kubic stable" is known to work with Ceph Pacific, but it must be run + with a newer kernel. .. _cephadm-stability: @@ -36,19 +41,18 @@ Those versions are expected to work: Stability --------- -Cephadm is actively in development. Please be aware that some -functionality is still rough around the edges. Especially the -following components are working with cephadm, but the -documentation is not as complete as we would like, and there may be some -changes in the near future: +Cephadm is under development. Some functionality is incomplete. Be aware +that some of the components of Ceph may not work perfectly with cephadm. +These include: - RGW -Cephadm support for the following features is still under development and may see breaking -changes in future releases: +Cephadm support remains under development for the following features: - Ingress - Cephadm exporter daemon - cephfs-mirror -In case you encounter issues, see also :ref:`cephadm-pause`. +If a cephadm command fails or a service stops running properly, see +:ref:`cephadm-pause` for instructions on how to pause the Ceph cluster's +background activity and how to disable cephadm. diff --git a/ceph/doc/cephadm/host-management.rst b/ceph/doc/cephadm/host-management.rst index db99e47d1..df9525ca8 100644 --- a/ceph/doc/cephadm/host-management.rst +++ b/ceph/doc/cephadm/host-management.rst @@ -4,17 +4,26 @@ Host Management =============== -To list hosts associated with the cluster: +Listing Hosts +============= + +Run a command of this form to list hosts associated with the cluster: .. prompt:: bash # - ceph orch host ls [--format yaml] [--host-pattern ] [--label