From d500a7f9ffe563584e4f587fdc044fe368cf567e Mon Sep 17 00:00:00 2001 From: Thomas Lamprecht Date: Mon, 5 Jul 2021 19:42:40 +0200 Subject: [PATCH] import preliminary last stable release 14.2.22 Signed-off-by: Thomas Lamprecht --- ceph/.gitmodules | 3 + ceph/CMakeLists.txt | 3 +- ceph/PendingReleaseNotes | 15 +- ceph/alpine/APKBUILD | 6 +- ceph/ceph.spec | 15 +- ceph/ceph.spec.in | 9 +- ceph/changelog.upstream | 16 +- ceph/cmake/modules/BuildBoost.cmake | 1 + ceph/cmake/modules/BuildZstd.cmake | 22 + ceph/cmake/modules/FindZstd.cmake | 51 + ceph/debian/ceph-common.postinst | 4 +- ceph/debian/control | 37 +- ceph/debian/radosgw.install | 2 + ceph/do_cmake.sh | 2 +- ceph/doc/man/8/ceph-volume.rst | 67 +- ceph/doc/man/8/ceph.rst | 9 +- .../rados/configuration/mon-config-ref.rst | 8 + ceph/doc/rados/configuration/msgr2.rst | 2 +- .../configuration/network-config-ref.rst | 30 +- ceph/install-deps.sh | 20 +- ceph/make-dist | 18 +- .../grafana/dashboards/host-details.json | 38 +- .../grafana/dashboards/hosts-overview.json | 4 +- .../dashboards/radosgw-sync-overview.json | 440 + .../prometheus/alerts/test_alerts.yml | 769 ++ .../tasks/cfuse_workunit_suites_ffsb.yaml | 1 + ceph/qa/rbd/krbd_blkroset.t | 24 +- ceph/qa/standalone/crush/crush-classes.sh | 28 + ceph/qa/standalone/misc/ok-to-stop.sh | 7 + .../fs/basic_functional/tasks/volumes.yaml | 5 +- .../tasks/3-compat_client/mimic.yaml | 2 + .../tasks/4-compat_client.yaml | 2 + .../tasks/kclient_workunit_suites_ffsb.yaml | 3 + .../kclient_workunit_suites_ffsb.yaml | 1 + .../nautilus-client-x/.qa | 1 + .../nautilus-client-x/rbd/% | 0 .../nautilus-client-x/rbd/.qa | 1 + .../nautilus-client-x/rbd/0-cluster/+ | 0 .../nautilus-client-x/rbd/0-cluster/.qa | 1 + .../rbd/0-cluster/openstack.yaml | 4 + .../rbd/0-cluster/start.yaml | 21 + .../nautilus-client-x/rbd/1-install/.qa | 1 + .../rbd/1-install/nautilus-client-x.yaml | 11 + .../nautilus-client-x/rbd/2-features/.qa | 1 + .../rbd/2-features/defaults.yaml | 6 + .../rbd/2-features/layering.yaml | 6 + .../nautilus-client-x/rbd/3-workload/.qa | 1 + .../3-workload/rbd_notification_tests.yaml | 34 + .../nautilus-client-x/rbd/supported/.qa | 1 + .../rbd/supported/ubuntu_18.04.yaml | 1 + .../point-to-point-upgrade.yaml | 18 +- .../1-ceph-install/nautilus.yaml | 6 +- .../7-final-workload/rbd-python.yaml | 2 +- ceph/qa/tasks/ceph_manager.py | 24 +- ceph/qa/tasks/cephfs/cephfs_test_case.py | 3 + ceph/qa/tasks/cephfs/test_scrub_checks.py | 112 +- ceph/qa/tasks/cephfs/test_volume_client.py | 3 + ceph/qa/tasks/cephfs/test_volumes.py | 42 + ceph/qa/tasks/mgr/dashboard/test_rgw.py | 10 + ceph/qa/tasks/mgr/test_progress.py | 25 +- ceph/qa/tasks/userdata_setup.yaml | 2 + ceph/qa/tasks/vstart_runner.py | 5 +- ceph/qa/workunits/rbd/cli_generic.sh | 145 +- ceph/qa/workunits/rgw/test_rgw_orphan_list.sh | 20 +- ceph/run-make-check.sh | 17 +- ceph/src/.git_version | 4 +- ceph/src/CMakeLists.txt | 8 +- ceph/src/ceph-volume/ceph_volume/api/lvm.py | 12 + .../ceph_volume/devices/lvm/batch.py | 3 +- .../ceph_volume/devices/lvm/main.py | 4 + .../ceph_volume/devices/lvm/migrate.py | 674 ++ .../tests/devices/lvm/test_migrate.py | 1504 ++++ ceph/src/ceph-volume/ceph_volume/util/disk.py | 8 +- ceph/src/ceph.in | 25 +- ceph/src/ceph_mon.cc | 8 + ceph/src/client/Client.cc | 188 +- ceph/src/client/Client.h | 5 +- ceph/src/client/Inode.h | 2 +- ceph/src/cls/CMakeLists.txt | 4 + ceph/src/cls/rgw/cls_rgw.cc | 88 +- ceph/src/common/AsyncOpTracker.h | 22 +- ceph/src/common/CMakeLists.txt | 2 +- ceph/src/common/LogClient.cc | 17 + ceph/src/common/LogClient.h | 5 +- ceph/src/common/async/yield_context.h | 17 +- ceph/src/common/buffer.cc | 8 +- ceph/src/common/config.cc | 3 - ceph/src/common/config_values.h | 1 - ceph/src/common/ipaddr.cc | 118 +- ceph/src/common/legacy_config_opts.h | 1 + ceph/src/common/options.cc | 23 +- ceph/src/common/pick_address.cc | 308 +- ceph/src/common/pick_address.h | 14 + ceph/src/compressor/zstd/CMakeLists.txt | 39 +- ceph/src/compressor/zstd/ZstdCompressor.h | 2 +- ceph/src/crush/CrushLocation.cc | 1 - ceph/src/crush/CrushWrapper.cc | 12 + ceph/src/global/global_init.cc | 5 - ceph/src/global/signal_handler.h | 4 +- ceph/src/include/ceph_fs.h | 12 +- ceph/src/include/cephfs/libcephfs.h | 15 + ceph/src/include/ipaddr.h | 15 +- ceph/src/include/mempool.h | 2 +- ceph/src/libcephfs.cc | 17 +- ceph/src/librados/RadosClient.cc | 3 +- ceph/src/librbd/ObjectMap.cc | 12 +- ceph/src/librbd/ObjectMap.h | 3 + ceph/src/librbd/api/Trash.cc | 189 +- ceph/src/librbd/deep_copy/ImageCopyRequest.cc | 7 +- .../src/librbd/deep_copy/ObjectCopyRequest.cc | 13 +- ceph/src/librbd/deep_copy/ObjectCopyRequest.h | 8 +- ceph/src/librbd/deep_copy/Types.h | 5 + ceph/src/librbd/io/CopyupRequest.cc | 7 +- ceph/src/librbd/operation/MigrateRequest.cc | 7 +- ceph/src/librbd/trash/MoveRequest.cc | 5 +- ceph/src/mds/CDir.cc | 44 +- ceph/src/mds/CDir.h | 5 +- ceph/src/mds/CInode.h | 2 +- ceph/src/mds/DamageTable.cc | 7 +- ceph/src/mds/MDCache.cc | 139 +- ceph/src/mds/MDCache.h | 3 +- ceph/src/mds/MDSRank.cc | 15 +- ceph/src/mds/MDSRank.h | 3 +- ceph/src/mds/ScrubStack.h | 4 + ceph/src/mds/SnapRealm.cc | 2 +- ceph/src/mds/SnapServer.h | 2 +- ceph/src/mds/mdstypes.h | 14 +- ceph/src/mgr/ActivePyModules.cc | 490 +- ceph/src/mgr/ActivePyModules.h | 1 + ceph/src/mgr/CMakeLists.txt | 1 + ceph/src/mgr/ClusterState.h | 24 +- ceph/src/mgr/DaemonHealthMetricCollector.cc | 24 - ceph/src/mgr/DaemonHealthMetricCollector.h | 2 +- ceph/src/mgr/DaemonKey.cc | 35 + ceph/src/mgr/DaemonKey.h | 24 + ceph/src/mgr/DaemonServer.cc | 320 +- ceph/src/mgr/DaemonServer.h | 78 +- ceph/src/mgr/DaemonState.cc | 26 +- ceph/src/mgr/DaemonState.h | 8 +- ceph/src/mgr/Mgr.cc | 54 +- ceph/src/mgr/MgrClient.cc | 1 + ceph/src/mgr/MgrCommands.h | 3 +- ceph/src/mgr/PyModule.cc | 8 +- ceph/src/mon/ConfigMap.h | 2 + ceph/src/mon/ConfigMonitor.cc | 6 +- ceph/src/mon/MonClient.cc | 21 +- ceph/src/mon/MonClient.h | 14 +- ceph/src/mon/Monitor.cc | 8 +- ceph/src/mon/OSDMonitor.cc | 30 +- ceph/src/mon/OSDMonitor.h | 1 + ceph/src/mon/PaxosService.cc | 35 +- ceph/src/msg/async/ProtocolV2.cc | 2 +- ceph/src/msg/async/Stack.cc | 4 +- ceph/src/msg/msg_types.cc | 8 + ceph/src/msg/msg_types.h | 1 + ceph/src/os/bluestore/AvlAllocator.cc | 43 +- ceph/src/os/bluestore/BlueFS.cc | 16 +- ceph/src/os/bluestore/BlueStore.cc | 57 +- ceph/src/os/bluestore/BlueStore.h | 30 +- ceph/src/os/bluestore/HybridAllocator.cc | 2 + ceph/src/os/bluestore/StupidAllocator.cc | 4 + ceph/src/os/bluestore/bluestore_tool.cc | 38 +- .../src/os/filestore/BtrfsFileStoreBackend.cc | 12 +- ceph/src/os/filestore/FileStore.cc | 12 +- ceph/src/os/filestore/HashIndex.cc | 48 +- ceph/src/os/filestore/LFNIndex.cc | 28 +- ceph/src/osd/OSD.cc | 15 +- ceph/src/osd/OSDMap.cc | 2 +- ceph/src/osd/PG.cc | 24 +- ceph/src/pybind/ceph_daemon.py | 9 +- ceph/src/pybind/ceph_volume_client.py | 2 +- ceph/src/pybind/cephfs/cephfs.pyx | 25 +- ceph/src/pybind/cephfs/setup.py | 49 +- ceph/src/pybind/mgr/balancer/module.py | 7 +- ceph/src/pybind/mgr/dashboard/.pylintrc | 4 +- ceph/src/pybind/mgr/dashboard/CMakeLists.txt | 2 +- ceph/src/pybind/mgr/dashboard/__init__.py | 33 +- .../mgr/dashboard/controllers/cephfs.py | 4 +- .../controllers/cluster_configuration.py | 3 + .../pybind/mgr/dashboard/controllers/docs.py | 32 +- .../pybind/mgr/dashboard/controllers/iscsi.py | 4 +- .../mgr/dashboard/controllers/nfsganesha.py | 2 +- .../pybind/mgr/dashboard/controllers/rbd.py | 33 +- .../dashboard/controllers/rbd_mirroring.py | 2 +- .../pybind/mgr/dashboard/controllers/rgw.py | 12 + ceph/src/pybind/mgr/dashboard/exceptions.py | 2 +- .../dist/en-US/2.22b12bf9358f95829e55.js | 1 - .../dist/en-US/2.44961b8d762dc0e4cac6.js | 1 + .../dist/en-US/6.db2c4fdf5b9136aa0756.js | 1 + .../dist/en-US/6.f054f1f260c137d64363.js | 1 - .../dist/en-US/7.76846491986b0ea6c0bd.js | 1 - .../dist/en-US/7.e3a7bd88bd1897000455.js | 1 + .../dashboard/frontend/dist/en-US/index.html | 5 +- ...d1b79f.js => main.7b0f89a21c505f20dbd6.js} | 2 +- ...c51.js => runtime.78a4cc7309d6fb4c4558.js} | 2 +- .../dashboard/frontend/src/app/app.module.ts | 5 - .../rbd-details/rbd-details.component.html | 6 +- .../app/ceph/block/rbd-form/rbd-form.model.ts | 3 + .../block/rbd-list/rbd-list.component.html | 22 + .../block/rbd-list/rbd-list.component.scss | 5 + .../block/rbd-list/rbd-list.component.spec.ts | 26 + .../ceph/block/rbd-list/rbd-list.component.ts | 37 +- .../rgw-bucket-list.component.spec.ts | 12 +- .../rgw-bucket-list.component.ts | 20 +- .../rgw-daemon-list.component.html | 9 + .../rgw-daemon-list.component.spec.ts | 40 +- .../rgw-daemon-list.component.ts | 20 +- .../dashboard-help.component.html | 7 +- .../dashboard-help.component.ts | 8 +- .../navigation/navigation.component.html | 4 +- .../app/shared/api/rgw-site.service.spec.ts | 40 + .../src/app/shared/api/rgw-site.service.ts | 24 + .../app/shared/models/prometheus-alerts.ts | 3 +- .../prometheus-alert-formatter.spec.ts | 6 +- .../services/prometheus-alert-formatter.ts | 4 +- .../services/prometheus-alert.service.spec.ts | 22 + .../services/prometheus-alert.service.ts | 6 + .../mgr/dashboard/frontend/src/index.html | 3 +- .../frontend/src/testing/unit-test-helper.ts | 4 +- ceph/src/pybind/mgr/dashboard/module.py | 10 +- .../pybind/mgr/dashboard/requirements-py3.txt | 4 +- .../mgr/dashboard/services/access_control.py | 35 +- .../mgr/dashboard/services/ceph_service.py | 2 +- .../pybind/mgr/dashboard/services/cephfs.py | 2 +- .../mgr/dashboard/services/exception.py | 4 +- ceph/src/pybind/mgr/dashboard/services/rbd.py | 2 +- .../mgr/dashboard/services/rgw_client.py | 12 +- .../dashboard/tests/test_access_control.py | 83 +- .../mgr/dashboard/tests/test_exceptions.py | 2 +- .../pybind/mgr/dashboard/tests/test_rbd.py | 107 + .../mgr/dashboard/tests/test_rgw_client.py | 22 + ceph/src/pybind/mgr/dashboard/tools.py | 20 + ceph/src/pybind/mgr/mgr_module.py | 8 + ceph/src/pybind/mgr/progress/module.py | 12 +- ceph/src/pybind/mgr/prometheus/module.py | 28 + ceph/src/pybind/mgr/telemetry/module.py | 6 +- .../src/pybind/mgr/volumes/fs/async_cloner.py | 3 +- ceph/src/pybind/mgr/volumes/fs/async_job.py | 47 +- ceph/src/pybind/mgr/volumes/fs/volume.py | 8 +- ceph/src/pybind/rados/rados.pyx | 11 +- ceph/src/pybind/rados/setup.py | 52 +- ceph/src/pybind/rbd/setup.py | 49 +- ceph/src/pybind/rgw/setup.py | 48 +- ceph/src/rgw/CMakeLists.txt | 17 +- ceph/src/rgw/rgw-gap-list | 379 + ceph/src/rgw/rgw-gap-list-comparator | 119 + ceph/src/rgw/rgw-orphan-list | 12 +- ceph/src/rgw/rgw_admin.cc | 13 + ceph/src/rgw/rgw_asio_frontend.cc | 26 +- ceph/src/rgw/rgw_auth.cc | 7 + ceph/src/rgw/rgw_auth_keystone.cc | 198 +- ceph/src/rgw/rgw_auth_keystone.h | 72 +- ceph/src/rgw/rgw_auth_s3.h | 4 +- ceph/src/rgw/rgw_common.cc | 33 + ceph/src/rgw/rgw_common.h | 3 + ceph/src/rgw/rgw_cors.cc | 5 +- ceph/src/rgw/rgw_gc.cc | 8 +- ceph/src/rgw/rgw_http_client.cc | 22 +- ceph/src/rgw/rgw_http_client.h | 8 + ceph/src/rgw/rgw_op.cc | 133 +- ceph/src/rgw/rgw_op.h | 7 +- ceph/src/rgw/rgw_orphan.cc | 63 +- ceph/src/rgw/rgw_orphan.h | 20 +- ceph/src/rgw/rgw_reshard.cc | 15 +- ceph/src/rgw/rgw_rest_s3.cc | 6 + ceph/src/rgw/rgw_rest_swift.cc | 10 +- ceph/src/script/run-make.sh | 8 + ceph/src/spawn/.gitmodules | 3 + ceph/src/spawn/CMakeLists.txt | 28 + ceph/src/spawn/LICENSE_1_0.txt | 23 + ceph/src/spawn/README.md | 8 + .../spawn/detail/is_stack_allocator.hpp | 30 + ceph/src/spawn/include/spawn/detail/net.hpp | 38 + ceph/src/spawn/include/spawn/impl/spawn.hpp | 420 + ceph/src/spawn/include/spawn/spawn.hpp | 292 + ceph/src/spawn/test/CMakeLists.txt | 21 + ceph/src/spawn/test/dependency/CMakeLists.txt | 1 + .../test/dependency/googletest/.clang-format | 4 + .../test/dependency/googletest/.travis.yml | 73 + .../test/dependency/googletest/BUILD.bazel | 179 + .../test/dependency/googletest/CMakeLists.txt | 36 + .../dependency/googletest/CONTRIBUTING.md | 142 + .../spawn/test/dependency/googletest/LICENSE | 28 + .../test/dependency/googletest/README.md | 134 + .../test/dependency/googletest/WORKSPACE | 23 + .../test/dependency/googletest/appveyor.yml | 154 + .../googletest/ci/build-linux-bazel.sh | 37 + .../googletest/ci/build-platformio.sh | 2 + .../dependency/googletest/ci/env-linux.sh | 41 + .../test/dependency/googletest/ci/env-osx.sh | 47 + .../googletest/ci/get-nprocessors.sh | 48 + .../dependency/googletest/ci/install-linux.sh | 49 + .../dependency/googletest/ci/install-osx.sh | 40 + .../googletest/ci/install-platformio.sh | 5 + .../dependency/googletest/ci/log-config.sh | 51 + .../test/dependency/googletest/ci/travis.sh | 44 + .../googletest/googlemock/CMakeLists.txt | 233 + .../googletest/googlemock/CONTRIBUTORS | 40 + .../dependency/googletest/googlemock/LICENSE | 28 + .../googletest/googlemock/README.md | 44 + .../googletest/googlemock/cmake/gmock.pc.in | 11 + .../googlemock/cmake/gmock_main.pc.in | 11 + .../googletest/googlemock/docs/cheat_sheet.md | 770 ++ .../googletest/googlemock/docs/cook_book.md | 4269 ++++++++++ .../googletest/googlemock/docs/for_dummies.md | 700 ++ .../googletest/googlemock/docs/gmock_faq.md | 396 + .../googlemock/include/gmock/gmock-actions.h | 1141 +++ .../include/gmock/gmock-cardinalities.h | 153 + .../include/gmock/gmock-function-mocker.h | 253 + .../include/gmock/gmock-generated-actions.h | 1884 +++++ .../gmock/gmock-generated-actions.h.pump | 627 ++ .../gmock/gmock-generated-function-mockers.h | 752 ++ .../gmock-generated-function-mockers.h.pump | 227 + .../include/gmock/gmock-generated-matchers.h | 1097 +++ .../gmock/gmock-generated-matchers.h.pump | 346 + .../googlemock/include/gmock/gmock-matchers.h | 4567 ++++++++++ .../include/gmock/gmock-more-actions.h | 162 + .../include/gmock/gmock-more-matchers.h | 92 + .../include/gmock/gmock-nice-strict.h | 215 + .../include/gmock/gmock-spec-builders.h | 1982 +++++ .../googlemock/include/gmock/gmock.h | 101 + .../include/gmock/internal/custom/README.md | 16 + .../internal/custom/gmock-generated-actions.h | 10 + .../custom/gmock-generated-actions.h.pump | 12 + .../gmock/internal/custom/gmock-matchers.h | 36 + .../gmock/internal/custom/gmock-port.h | 39 + .../gmock/internal/gmock-internal-utils.h | 518 ++ .../include/gmock/internal/gmock-port.h | 87 + .../include/gmock/internal/gmock-pp.h | 317 + .../googlemock/scripts/fuse_gmock_files.py | 240 + .../googlemock/scripts/generator/LICENSE | 203 + .../googlemock/scripts/generator/README | 34 + .../scripts/generator/README.cppclean | 115 + .../scripts/generator/cpp/__init__.py | 0 .../googlemock/scripts/generator/cpp/ast.py | 1736 ++++ .../scripts/generator/cpp/gmock_class.py | 227 + .../scripts/generator/cpp/gmock_class_test.py | 466 + .../scripts/generator/cpp/keywords.py | 59 + .../scripts/generator/cpp/tokenize.py | 287 + .../googlemock/scripts/generator/cpp/utils.py | 41 + .../googlemock/scripts/generator/gmock_gen.py | 31 + .../googlemock/scripts/gmock-config.in | 303 + .../googlemock/scripts/gmock_doctor.py | 640 ++ .../googletest/googlemock/scripts/upload.py | 1387 +++ .../googlemock/scripts/upload_gmock.py | 78 + .../googletest/googlemock/src/gmock-all.cc | 46 + .../googlemock/src/gmock-cardinalities.cc | 155 + .../googlemock/src/gmock-internal-utils.cc | 200 + .../googlemock/src/gmock-matchers.cc | 462 + .../googlemock/src/gmock-spec-builders.cc | 887 ++ .../googletest/googlemock/src/gmock.cc | 213 + .../googletest/googlemock/src/gmock_main.cc | 65 + .../googletest/googlemock/test/BUILD.bazel | 110 + .../googlemock/test/gmock-actions_test.cc | 1445 ++++ .../test/gmock-cardinalities_test.cc | 427 + .../test/gmock-function-mocker_nc.cc | 16 + .../test/gmock-function-mocker_nc_test.py | 43 + .../test/gmock-function-mocker_test.cc | 660 ++ .../test/gmock-generated-actions_test.cc | 1064 +++ .../gmock-generated-function-mockers_test.cc | 659 ++ .../test/gmock-generated-matchers_test.cc | 1324 +++ .../test/gmock-internal-utils_test.cc | 740 ++ .../googlemock/test/gmock-matchers_test.cc | 6792 +++++++++++++++ .../test/gmock-more-actions_test.cc | 699 ++ .../googlemock/test/gmock-nice-strict_test.cc | 500 ++ .../googlemock/test/gmock-port_test.cc | 42 + .../googlemock/test/gmock-pp-string_test.cc | 206 + .../googlemock/test/gmock-pp_test.cc | 73 + .../test/gmock-spec-builders_test.cc | 2773 ++++++ .../googlemock/test/gmock_all_test.cc | 49 + .../googlemock/test/gmock_ex_test.cc | 80 + .../googlemock/test/gmock_leak_test.py | 104 + .../googlemock/test/gmock_leak_test_.cc | 99 + .../googlemock/test/gmock_link2_test.cc | 39 + .../googlemock/test/gmock_link_test.cc | 39 + .../googlemock/test/gmock_link_test.h | 690 ++ .../googlemock/test/gmock_output_test.py | 183 + .../googlemock/test/gmock_output_test_.cc | 309 + .../test/gmock_output_test_golden.txt | 317 + .../googlemock/test/gmock_stress_test.cc | 240 + .../googletest/googlemock/test/gmock_test.cc | 181 + .../googlemock/test/gmock_test_utils.py | 108 + .../googletest/googletest/CMakeLists.txt | 328 + .../googletest/googletest/CONTRIBUTORS | 37 + .../dependency/googletest/googletest/LICENSE | 28 + .../googletest/googletest/README.md | 244 + .../googletest/cmake/Config.cmake.in | 9 + .../googletest/googletest/cmake/gtest.pc.in | 10 + .../googletest/cmake/gtest_main.pc.in | 11 + .../googletest/cmake/internal_utils.cmake | 358 + .../googletest/cmake/libgtest.la.in | 21 + .../googletest/googletest/docs/advanced.md | 2566 ++++++ .../googletest/googletest/docs/faq.md | 753 ++ .../googletest/googletest/docs/pkgconfig.md | 141 + .../googletest/googletest/docs/primer.md | 568 ++ .../googletest/googletest/docs/pump_manual.md | 190 + .../googletest/googletest/docs/samples.md | 22 + .../include/gtest/gtest-death-test.h | 343 + .../googletest/include/gtest/gtest-matchers.h | 750 ++ .../googletest/include/gtest/gtest-message.h | 218 + .../include/gtest/gtest-param-test.h | 503 ++ .../googletest/include/gtest/gtest-printers.h | 928 ++ .../googletest/include/gtest/gtest-spi.h | 238 + .../include/gtest/gtest-test-part.h | 184 + .../include/gtest/gtest-typed-test.h | 330 + .../googletest/include/gtest/gtest.h | 2477 ++++++ .../include/gtest/gtest_pred_impl.h | 359 + .../googletest/include/gtest/gtest_prod.h | 61 + .../include/gtest/internal/custom/README.md | 56 + .../gtest/internal/custom/gtest-port.h | 37 + .../gtest/internal/custom/gtest-printers.h | 42 + .../include/gtest/internal/custom/gtest.h | 37 + .../internal/gtest-death-test-internal.h | 304 + .../include/gtest/internal/gtest-filepath.h | 211 + .../include/gtest/internal/gtest-internal.h | 1400 +++ .../include/gtest/internal/gtest-param-util.h | 883 ++ .../include/gtest/internal/gtest-port-arch.h | 107 + .../include/gtest/internal/gtest-port.h | 2237 +++++ .../include/gtest/internal/gtest-string.h | 170 + .../include/gtest/internal/gtest-type-util.h | 3335 ++++++++ .../gtest/internal/gtest-type-util.h.pump | 302 + .../googletest/samples/prime_tables.h | 126 + .../googletest/googletest/samples/sample1.cc | 66 + .../googletest/googletest/samples/sample1.h | 41 + .../googletest/samples/sample10_unittest.cc | 139 + .../googletest/samples/sample1_unittest.cc | 151 + .../googletest/googletest/samples/sample2.cc | 54 + .../googletest/googletest/samples/sample2.h | 81 + .../googletest/samples/sample2_unittest.cc | 107 + .../googletest/samples/sample3-inl.h | 172 + .../googletest/samples/sample3_unittest.cc | 149 + .../googletest/googletest/samples/sample4.cc | 54 + .../googletest/googletest/samples/sample4.h | 53 + .../googletest/samples/sample4_unittest.cc | 53 + .../googletest/samples/sample5_unittest.cc | 196 + .../googletest/samples/sample6_unittest.cc | 224 + .../googletest/samples/sample7_unittest.cc | 117 + .../googletest/samples/sample8_unittest.cc | 154 + .../googletest/samples/sample9_unittest.cc | 156 + .../googletest/googletest/scripts/common.py | 83 + .../googletest/scripts/fuse_gtest_files.py | 253 + .../googletest/scripts/gen_gtest_pred_impl.py | 730 ++ .../googletest/scripts/gtest-config.in | 274 + .../googletest/googletest/scripts/pump.py | 855 ++ .../googletest/scripts/release_docs.py | 158 + .../googletest/scripts/test/Makefile | 59 + .../googletest/googletest/scripts/upload.py | 1387 +++ .../googletest/scripts/upload_gtest.py | 78 + .../googletest/googletest/src/gtest-all.cc | 48 + .../googletest/src/gtest-death-test.cc | 1653 ++++ .../googletest/src/gtest-filepath.cc | 379 + .../googletest/src/gtest-internal-inl.h | 1210 +++ .../googletest/src/gtest-matchers.cc | 97 + .../googletest/googletest/src/gtest-port.cc | 1402 +++ .../googletest/src/gtest-printers.cc | 442 + .../googletest/src/gtest-test-part.cc | 104 + .../googletest/src/gtest-typed-test.cc | 118 + .../googletest/googletest/src/gtest.cc | 6181 ++++++++++++++ .../googletest/googletest/src/gtest_main.cc | 47 + .../googletest/googletest/test/BUILD.bazel | 521 ++ .../googletest-break-on-failure-unittest.py | 208 + .../googletest-break-on-failure-unittest_.cc | 86 + .../test/googletest-catch-exceptions-test.py | 236 + .../test/googletest-catch-exceptions-test_.cc | 293 + .../googletest/test/googletest-color-test.py | 127 + .../googletest/test/googletest-color-test_.cc | 62 + .../test/googletest-death-test-test.cc | 1516 ++++ .../test/googletest-death-test_ex_test.cc | 92 + .../test/googletest-env-var-test.py | 117 + .../test/googletest-env-var-test_.cc | 122 + .../test/googletest-filepath-test.cc | 649 ++ .../test/googletest-filter-unittest.py | 639 ++ .../test/googletest-filter-unittest_.cc | 137 + .../test/googletest-json-outfiles-test.py | 191 + .../test/googletest-json-output-unittest.py | 778 ++ .../test/googletest-list-tests-unittest.py | 205 + .../test/googletest-list-tests-unittest_.cc | 156 + .../test/googletest-listener-test.cc | 518 ++ .../test/googletest-message-test.cc | 158 + .../test/googletest-options-test.cc | 216 + .../googletest-output-test-golden-lin.txt | 1140 +++ .../googletest/test/googletest-output-test.py | 346 + .../test/googletest-output-test_.cc | 1157 +++ ...oogletest-param-test-invalid-name1-test.py | 63 + ...ogletest-param-test-invalid-name1-test_.cc | 50 + ...oogletest-param-test-invalid-name2-test.py | 62 + ...ogletest-param-test-invalid-name2-test_.cc | 55 + .../test/googletest-param-test-test.cc | 1055 +++ .../test/googletest-param-test-test.h | 51 + .../test/googletest-param-test2-test.cc | 61 + .../googletest/test/googletest-port-test.cc | 1272 +++ .../test/googletest-printers-test.cc | 1620 ++++ .../test/googletest-shuffle-test.py | 323 + .../test/googletest-shuffle-test_.cc | 101 + .../test/googletest-test-part-test.cc | 230 + .../googletest/test/googletest-test2_test.cc | 61 + .../test/googletest-throw-on-failure-test.py | 168 + .../test/googletest-throw-on-failure-test_.cc | 71 + .../test/googletest-uninitialized-test.py | 67 + .../test/googletest-uninitialized-test_.cc | 42 + .../googletest/test/gtest-typed-test2_test.cc | 44 + .../googletest/test/gtest-typed-test_test.cc | 462 + .../googletest/test/gtest-typed-test_test.h | 65 + .../test/gtest-unittest-api_test.cc | 340 + .../googletest/test/gtest_all_test.cc | 46 + .../test/gtest_assert_by_exception_test.cc | 116 + .../googletest/test/gtest_environment_test.cc | 188 + .../googletest/test/gtest_help_test.py | 170 + .../googletest/test/gtest_help_test_.cc | 45 + .../googletest/test/gtest_json_test_utils.py | 60 + .../test/gtest_list_output_unittest.py | 141 + .../test/gtest_list_output_unittest_.cc | 51 + .../googletest/test/gtest_main_unittest.cc | 44 + .../googletest/test/gtest_no_test_unittest.cc | 54 + .../test/gtest_pred_impl_unittest.cc | 2427 ++++++ .../test/gtest_premature_exit_test.cc | 126 + .../googletest/test/gtest_prod_test.cc | 56 + .../googletest/test/gtest_repeat_test.cc | 233 + ...test_skip_environment_check_output_test.py | 54 + .../gtest_skip_in_environment_setup_test.cc | 49 + .../googletest/test/gtest_skip_test.cc | 55 + .../googletest/test/gtest_sole_header_test.cc | 56 + .../googletest/test/gtest_stress_test.cc | 248 + .../gtest_test_macro_stack_footprint_test.cc | 89 + .../googletest/test/gtest_test_utils.py | 313 + .../googletest/test/gtest_testbridge_test.py | 63 + .../googletest/test/gtest_testbridge_test_.cc | 43 + .../test/gtest_throw_on_failure_ex_test.cc | 90 + .../googletest/test/gtest_unittest.cc | 7519 +++++++++++++++++ .../test/gtest_xml_outfile1_test_.cc | 43 + .../test/gtest_xml_outfile2_test_.cc | 43 + .../test/gtest_xml_outfiles_test.py | 135 + .../test/gtest_xml_output_unittest.py | 389 + .../test/gtest_xml_output_unittest_.cc | 188 + .../googletest/test/gtest_xml_test_utils.py | 196 + .../googletest/googletest/test/production.cc | 35 + .../googletest/googletest/test/production.h | 54 + .../test/dependency/googletest/library.json | 59 + .../test/dependency/googletest/platformio.ini | 31 + ceph/src/spawn/test/test_spawn.cc | 238 + ceph/src/test/CMakeLists.txt | 29 +- ceph/src/test/cli/radosgw-admin/help.t | 6 + ceph/src/test/cls_rgw/test_cls_rgw.cc | 10 +- ceph/src/test/fio/CMakeLists.txt | 8 +- ceph/src/test/fs/test_ino_release_cb.cc | 2 +- ceph/src/test/libcephfs/test.cc | 165 +- ceph/src/test/librados/CMakeLists.txt | 3 + ceph/src/test/librados/watch_notify.cc | 5 +- .../deep_copy/test_mock_ImageCopyRequest.cc | 2 +- .../deep_copy/test_mock_ObjectCopyRequest.cc | 2 +- .../test/librbd/io/test_mock_CopyupRequest.cc | 5 +- ceph/src/test/mon/CMakeLists.txt | 22 +- ceph/src/test/objectstore/Allocator_test.cc | 58 + ceph/src/test/objectstore/store_test.cc | 81 +- ceph/src/test/osd/TestOSDScrub.cc | 1 + ceph/src/test/osd/safe-to-destroy.sh | 6 +- ceph/src/test/pybind/test_ceph_argparse.py | 2 +- ceph/src/test/pybind/test_ceph_daemon.py | 2 +- ceph/src/test/pybind/test_cephfs.py | 28 +- .../rbd_mirror/test_mock_InstanceReplayer.cc | 14 +- ceph/src/test/rgw/CMakeLists.txt | 8 +- ceph/src/test/rgw/rgw_multi/tests.py | 26 +- ceph/src/test/rgw/test_rgw_reshard_wait.cc | 19 +- ceph/src/test/run-promtool-unittests.sh | 13 + ceph/src/test/test_ipaddr.cc | 133 +- ceph/src/test/test_mempool.cc | 17 +- ceph/src/tools/ceph_monstore_tool.cc | 23 +- ceph/src/tools/cephfs/DataScan.cc | 11 +- ceph/src/tools/monmaptool.cc | 3 +- ceph/src/tools/rbd/action/Trash.cc | 20 +- ceph/src/tools/rbd_mirror/ImageReplayer.cc | 40 +- ceph/src/tools/rbd_mirror/ImageReplayer.h | 14 +- ceph/src/tools/rbd_mirror/InstanceReplayer.cc | 32 +- ceph/src/vstart.sh | 1 + ceph/src/zstd/.circleci/config.yml | 169 + .../zstd/.circleci/images/primary/Dockerfile | 9 + ceph/src/zstd/.cirrus.yml | 11 + ceph/src/zstd/.gitattributes | 3 - .../zstd/.github/ISSUE_TEMPLATE/bug_report.md | 35 + .../.github/ISSUE_TEMPLATE/feature_request.md | 20 + ceph/src/zstd/.github/workflows/main.yml | 23 + ceph/src/zstd/.travis.yml | 317 +- ceph/src/zstd/{NEWS => CHANGELOG} | 221 +- ceph/src/zstd/CODE_OF_CONDUCT.md | 5 + ceph/src/zstd/CONTRIBUTING.md | 352 +- ceph/src/zstd/Makefile | 188 +- ceph/src/zstd/README.md | 166 +- ceph/src/zstd/TESTING.md | 6 +- ceph/src/zstd/appveyor.yml | 67 +- ceph/src/zstd/build/LICENSE | 0 .../build/VS2008/fullbench/fullbench.vcproj | 52 + .../zstd/build/VS2008/fuzzer/fuzzer.vcproj | 64 +- ceph/src/zstd/build/VS2008/zstd/zstd.vcproj | 70 +- .../zstd/build/VS2008/zstdlib/zstdlib.vcproj | 56 +- .../zstd/build/VS2010/datagen/datagen.vcxproj | 3 +- .../fullbench-dll/fullbench-dll.vcxproj | 6 +- .../build/VS2010/fullbench/fullbench.vcxproj | 16 + .../zstd/build/VS2010/fuzzer/fuzzer.vcxproj | 24 +- .../VS2010/libzstd-dll/libzstd-dll.vcxproj | 23 +- .../zstd/build/VS2010/libzstd/libzstd.vcxproj | 43 +- ceph/src/zstd/build/VS2010/zstd/zstd.vcxproj | 29 +- ceph/src/zstd/build/VS_scripts/README.md | 44 +- .../zstd/build/VS_scripts/build.VS2017.cmd | 7 + .../VS_scripts/build.VS2017Community.cmd | 7 + .../VS_scripts/build.VS2017Enterprise.cmd | 7 + .../VS_scripts/build.VS2017Professional.cmd | 7 + .../zstd/build/VS_scripts/build.generic.cmd | 16 +- ceph/src/zstd/build/cmake/CMakeLists.txt | 206 +- .../AddZstdCompilationFlags.cmake | 32 +- .../build/cmake/CMakeModules/FindLibLZ4.cmake | 49 + .../CMakeModules/GetZstdLibraryVersion.cmake | 11 +- ceph/src/zstd/build/cmake/README.md | 104 + .../zstd/build/cmake/contrib/CMakeLists.txt | 6 +- .../cmake/contrib/gen_html/CMakeLists.txt | 24 +- .../build/cmake/contrib/pzstd/CMakeLists.txt | 34 +- ceph/src/zstd/build/cmake/lib/CMakeLists.txt | 232 +- .../build/cmake/lib/cmake_uninstall.cmake.in | 10 +- ceph/src/zstd/build/cmake/lib/pkgconfig.cmake | 2 +- .../zstd/build/cmake/programs/CMakeLists.txt | 176 +- .../src/zstd/build/cmake/tests/CMakeLists.txt | 85 +- ceph/src/zstd/build/cmake/zstdConfig.cmake | 1 + .../zstd/build/meson/GetZstdLibraryVersion.py | 39 + ceph/src/zstd/build/meson/InstallSymlink.py | 55 + ceph/src/zstd/build/meson/README.md | 38 + .../build/meson/contrib/gen_html/meson.build | 30 + ceph/src/zstd/build/meson/contrib/meson.build | 12 + .../build/meson/contrib/pzstd/meson.build | 24 + ceph/src/zstd/build/meson/lib/meson.build | 132 + ceph/src/zstd/build/meson/meson.build | 147 + ceph/src/zstd/build/meson/meson_options.txt | 36 + .../src/zstd/build/meson/programs/meson.build | 104 + ceph/src/zstd/build/meson/tests/meson.build | 206 + .../zstd/build/meson/tests/valgrindTest.py | 90 + ceph/src/zstd/circle.yml | 80 - .../contrib/adaptive-compression/Makefile | 76 - .../contrib/adaptive-compression/README.md | 91 - .../zstd/contrib/adaptive-compression/adapt.c | 1137 --- .../contrib/adaptive-compression/datagencli.c | 129 - .../adaptive-compression/test-correctness.sh | 252 - .../adaptive-compression/test-performance.sh | 59 - .../zstd/contrib/diagnose_corruption/Makefile | 35 + .../diagnose_corruption/check_flipped_bits.c | 400 + ceph/src/zstd/contrib/docker/Dockerfile | 20 + ceph/src/zstd/contrib/docker/README.md | 20 + ceph/src/zstd/contrib/gen_html/Makefile | 8 +- ceph/src/zstd/contrib/largeNbDicts/Makefile | 58 + ceph/src/zstd/contrib/largeNbDicts/README.md | 25 + .../zstd/contrib/largeNbDicts/largeNbDicts.c | 979 +++ .../0002-lib-Add-zstd-modules.patch | 16 +- ...0006-squashfs-tools-Add-zstd-support.patch | 4 +- .../contrib/linux-kernel/lib/zstd/compress.c | 4 +- .../zstd/contrib/linux-kernel/lib/zstd/fse.h | 2 +- .../linux-kernel/lib/zstd/fse_compress.c | 4 +- .../zstd/contrib/linux-kernel/lib/zstd/huf.h | 2 +- .../test/include/linux/compiler.h | 6 +- .../contrib/long_distance_matching/Makefile | 36 - .../contrib/long_distance_matching/README.md | 102 - .../zstd/contrib/long_distance_matching/ldm.c | 857 -- .../zstd/contrib/long_distance_matching/ldm.h | 197 - .../long_distance_matching/ldm_common.c | 109 - .../long_distance_matching/ldm_params.h | 12 - .../contrib/long_distance_matching/main.c | 269 - ceph/src/zstd/contrib/match_finders/README.md | 42 + .../zstd/contrib/match_finders/zstd_edist.c | 558 ++ .../zstd/contrib/match_finders/zstd_edist.h | 70 + ceph/src/zstd/contrib/meson/README | 3 - ceph/src/zstd/contrib/meson/meson.build | 79 - ceph/src/zstd/contrib/meson/meson_options.txt | 2 - ceph/src/zstd/contrib/premake/premake4.lua | 6 + ceph/src/zstd/contrib/premake/zstd.lua | 80 + ceph/src/zstd/contrib/pzstd/Makefile | 12 +- ceph/src/zstd/contrib/pzstd/Options.cpp | 27 +- ceph/src/zstd/contrib/pzstd/Pzstd.cpp | 11 +- ceph/src/zstd/contrib/pzstd/utils/Range.h | 2 +- .../zstd/contrib/pzstd/utils/ResourcePool.h | 2 +- .../contrib/seekable_format/examples/Makefile | 17 +- .../examples/parallel_processing.c | 16 +- .../examples/seekable_compression.c | 7 +- .../examples/seekable_decompression.c | 6 +- .../examples/seekable_decompression_mem.c | 144 + .../contrib/seekable_format/zstd_seekable.h | 4 +- .../seekable_format/zstdseek_compress.c | 37 +- .../seekable_format/zstdseek_decompress.c | 55 +- .../zstd/contrib/single_file_libs/README.md | 33 + .../single_file_libs/build_decoder_test.sh | 55 + .../single_file_libs/build_library_test.sh | 61 + .../zstd/contrib/single_file_libs/combine.sh | 211 + .../create_single_file_decoder.sh | 14 + .../create_single_file_library.sh | 14 + .../single_file_libs/examples/README.md | 11 + .../single_file_libs/examples/emscripten.c | 340 + .../single_file_libs/examples/roundtrip.c | 83 + .../single_file_libs/examples/shell.html | 31 + .../single_file_libs/examples/simple.c | 75 + .../examples/testcard-dxt1.inl | 2731 ++++++ .../examples/testcard-zstd.inl | 261 + .../single_file_libs/examples/testcard.png | Bin 0 -> 12675 bytes .../zstd/contrib/single_file_libs/zstd-in.c | 76 + .../contrib/single_file_libs/zstddeclib-in.c | 54 + ceph/src/zstd/contrib/snap/snapcraft.yaml | 28 + ceph/src/zstd/doc/README.md | 19 +- .../src/zstd/doc/educational_decoder/Makefile | 56 +- .../zstd/doc/educational_decoder/README.md | 7 + .../zstd/doc/educational_decoder/harness.c | 128 +- .../doc/educational_decoder/zstd_decompress.c | 295 +- .../doc/educational_decoder/zstd_decompress.h | 7 +- ceph/src/zstd/doc/images/CSpeed2.png | Bin 0 -> 73335 bytes ceph/src/zstd/doc/images/Cspeed4.png | Bin 71276 -> 0 bytes ceph/src/zstd/doc/images/DSpeed3.png | Bin 0 -> 27123 bytes ceph/src/zstd/doc/images/Dspeed4.png | Bin 24692 -> 0 bytes ceph/src/zstd/doc/images/cdict_v136.png | Bin 0 -> 33330 bytes ceph/src/zstd/doc/images/dict-cr.png | Bin 90047 -> 90412 bytes ceph/src/zstd/doc/images/dict-cs.png | Bin 93837 -> 91518 bytes ceph/src/zstd/doc/images/dict-ds.png | Bin 89590 -> 98316 bytes ceph/src/zstd/doc/images/ldmCspeed.png | Bin 72251 -> 0 bytes ceph/src/zstd/doc/images/ldmDspeed.png | Bin 27594 -> 0 bytes .../src/zstd/doc/images/zstd_cdict_v1_3_5.png | Bin 0 -> 93969 bytes ceph/src/zstd/doc/images/zstd_logo86.png | Bin 0 -> 5963 bytes ceph/src/zstd/doc/zstd_compression_format.md | 527 +- ceph/src/zstd/doc/zstd_manual.html | 2136 +++-- ceph/src/zstd/examples/Makefile | 43 +- ceph/src/zstd/examples/README.md | 10 +- ceph/src/zstd/examples/common.h | 234 + .../zstd/examples/dictionary_compression.c | 90 +- .../zstd/examples/dictionary_decompression.c | 104 +- .../examples/multiple_simple_compression.c | 116 + .../examples/multiple_streaming_compression.c | 126 +- ceph/src/zstd/examples/simple_compression.c | 89 +- ceph/src/zstd/examples/simple_decompression.c | 87 +- .../src/zstd/examples/streaming_compression.c | 136 +- .../zstd/examples/streaming_decompression.c | 110 +- .../zstd/examples/streaming_memory_usage.c | 132 +- ceph/src/zstd/lib/BUCK | 70 +- ceph/src/zstd/lib/Makefile | 301 +- ceph/src/zstd/lib/README.md | 149 +- ceph/src/zstd/lib/common/bitstream.h | 127 +- ceph/src/zstd/lib/common/compiler.h | 113 +- ceph/src/zstd/lib/common/cpu.h | 215 + ceph/src/zstd/lib/common/debug.c | 24 + ceph/src/zstd/lib/common/debug.h | 114 + ceph/src/zstd/lib/common/entropy_common.c | 63 +- ceph/src/zstd/lib/common/error_private.c | 10 +- ceph/src/zstd/lib/common/error_private.h | 8 +- ceph/src/zstd/lib/common/fse.h | 136 +- ceph/src/zstd/lib/common/fse_decompress.c | 53 +- ceph/src/zstd/lib/common/huf.h | 320 +- ceph/src/zstd/lib/common/mem.h | 127 +- ceph/src/zstd/lib/common/pool.c | 207 +- ceph/src/zstd/lib/common/pool.h | 63 +- ceph/src/zstd/lib/common/threading.c | 58 +- ceph/src/zstd/lib/common/threading.h | 64 +- ceph/src/zstd/lib/common/xxhash.c | 63 +- ceph/src/zstd/lib/common/xxhash.h | 42 +- ceph/src/zstd/lib/common/zstd_common.c | 13 +- ceph/src/zstd/lib/common/zstd_errors.h | 27 +- ceph/src/zstd/lib/common/zstd_internal.h | 476 +- ceph/src/zstd/lib/compress/fse_compress.c | 347 +- ceph/src/zstd/lib/compress/hist.c | 183 + ceph/src/zstd/lib/compress/hist.h | 75 + ceph/src/zstd/lib/compress/huf_compress.c | 418 +- ceph/src/zstd/lib/compress/zstd_compress.c | 4619 ++++++---- ceph/src/zstd/lib/compress/zstd_compress.h | 307 - .../lib/compress/zstd_compress_internal.h | 1125 +++ .../lib/compress/zstd_compress_literals.c | 158 + .../lib/compress/zstd_compress_literals.h | 29 + .../lib/compress/zstd_compress_sequences.c | 419 + .../lib/compress/zstd_compress_sequences.h | 54 + .../lib/compress/zstd_compress_superblock.c | 845 ++ .../lib/compress/zstd_compress_superblock.h | 32 + ceph/src/zstd/lib/compress/zstd_cwksp.h | 525 ++ ceph/src/zstd/lib/compress/zstd_double_fast.c | 505 +- ceph/src/zstd/lib/compress/zstd_double_fast.h | 22 +- ceph/src/zstd/lib/compress/zstd_fast.c | 574 +- ceph/src/zstd/lib/compress/zstd_fast.h | 23 +- ceph/src/zstd/lib/compress/zstd_lazy.c | 1033 ++- ceph/src/zstd/lib/compress/zstd_lazy.h | 57 +- ceph/src/zstd/lib/compress/zstd_ldm.c | 776 +- ceph/src/zstd/lib/compress/zstd_ldm.h | 107 +- ceph/src/zstd/lib/compress/zstd_opt.c | 1791 ++-- ceph/src/zstd/lib/compress/zstd_opt.h | 40 +- ceph/src/zstd/lib/compress/zstdmt_compress.c | 2066 +++-- ceph/src/zstd/lib/compress/zstdmt_compress.h | 138 +- ceph/src/zstd/lib/decompress/huf_decompress.c | 914 +- ceph/src/zstd/lib/decompress/zstd_ddict.c | 244 + ceph/src/zstd/lib/decompress/zstd_ddict.h | 44 + .../src/zstd/lib/decompress/zstd_decompress.c | 2452 ++---- .../lib/decompress/zstd_decompress_block.c | 1432 ++++ .../lib/decompress/zstd_decompress_block.h | 59 + .../lib/decompress/zstd_decompress_internal.h | 189 + ceph/src/zstd/lib/deprecated/zbuff.h | 17 +- ceph/src/zstd/lib/deprecated/zbuff_common.c | 4 +- ceph/src/zstd/lib/deprecated/zbuff_compress.c | 3 +- .../zstd/lib/deprecated/zbuff_decompress.c | 2 +- ceph/src/zstd/lib/dictBuilder/cover.c | 497 +- ceph/src/zstd/lib/dictBuilder/cover.h | 157 + ceph/src/zstd/lib/dictBuilder/divsufsort.c | 6 +- ceph/src/zstd/lib/dictBuilder/fastcover.c | 757 ++ ceph/src/zstd/lib/dictBuilder/zdict.c | 310 +- ceph/src/zstd/lib/dictBuilder/zdict.h | 236 +- ceph/src/zstd/lib/dll/example/Makefile | 3 +- .../zstd/lib/dll/example/build_package.bat | 5 +- ceph/src/zstd/lib/dll/libzstd.def | 88 - ceph/src/zstd/lib/legacy/zstd_legacy.h | 66 +- ceph/src/zstd/lib/legacy/zstd_v01.c | 267 +- ceph/src/zstd/lib/legacy/zstd_v01.h | 21 +- ceph/src/zstd/lib/legacy/zstd_v02.c | 160 +- ceph/src/zstd/lib/legacy/zstd_v02.h | 21 +- ceph/src/zstd/lib/legacy/zstd_v03.c | 161 +- ceph/src/zstd/lib/legacy/zstd_v03.h | 21 +- ceph/src/zstd/lib/legacy/zstd_v04.c | 349 +- ceph/src/zstd/lib/legacy/zstd_v04.h | 21 +- ceph/src/zstd/lib/legacy/zstd_v05.c | 385 +- ceph/src/zstd/lib/legacy/zstd_v05.h | 23 +- ceph/src/zstd/lib/legacy/zstd_v06.c | 184 +- ceph/src/zstd/lib/legacy/zstd_v06.h | 17 +- ceph/src/zstd/lib/legacy/zstd_v07.c | 213 +- ceph/src/zstd/lib/legacy/zstd_v07.h | 17 +- ceph/src/zstd/lib/libzstd.pc.in | 5 +- ceph/src/zstd/lib/zstd.h | 2564 ++++-- ceph/src/zstd/programs/BUCK | 41 +- ceph/src/zstd/programs/Makefile | 223 +- ceph/src/zstd/programs/README.md | 200 +- ceph/src/zstd/programs/bench.c | 656 -- ceph/src/zstd/programs/bench.h | 35 - ceph/src/zstd/programs/benchfn.c | 256 + ceph/src/zstd/programs/benchfn.h | 183 + ceph/src/zstd/programs/benchzstd.c | 888 ++ ceph/src/zstd/programs/benchzstd.h | 212 + ceph/src/zstd/programs/datagen.c | 35 +- ceph/src/zstd/programs/datagen.h | 2 +- ceph/src/zstd/programs/dibio.c | 92 +- ceph/src/zstd/programs/dibio.h | 8 +- ceph/src/zstd/programs/fileio.c | 2599 ++++-- ceph/src/zstd/programs/fileio.h | 118 +- ceph/src/zstd/programs/platform.h | 113 +- ceph/src/zstd/programs/timefn.c | 169 + ceph/src/zstd/programs/timefn.h | 85 + ceph/src/zstd/programs/util.c | 938 ++ ceph/src/zstd/programs/util.h | 790 +- .../zstd/programs/windres/generate_res.bat | 11 - ceph/src/zstd/programs/windres/verrsrc.h | 9 + ceph/src/zstd/programs/windres/zstd32.res | Bin 1044 -> 1044 bytes ceph/src/zstd/programs/windres/zstd64.res | Bin 1044 -> 1044 bytes ceph/src/zstd/programs/zstd.1 | 258 +- ceph/src/zstd/programs/zstd.1.md | 253 +- ceph/src/zstd/programs/zstdcli.c | 1364 ++- ceph/src/zstd/programs/zstdgrep | 148 +- ceph/src/zstd/programs/zstdgrep.1 | 23 + ceph/src/zstd/programs/zstdgrep.1.md | 26 + ceph/src/zstd/programs/zstdless.1 | 14 + ceph/src/zstd/programs/zstdless.1.md | 16 + ...speed.py => DEPRECATED-test-zstd-speed.py} | 4 +- ceph/src/zstd/tests/Makefile | 287 +- ceph/src/zstd/tests/README.md | 99 +- ceph/src/zstd/tests/automated_benchmarking.py | 326 + ceph/src/zstd/tests/bigdict.c | 128 + ceph/src/zstd/tests/checkTag.c | 65 + ceph/src/zstd/tests/datagencli.c | 4 +- ceph/src/zstd/tests/decodecorpus.c | 165 +- .../zstd/tests/dict-files/zero-weight-dict | Bin 0 -> 153 bytes ceph/src/zstd/tests/fullbench.c | 655 +- ceph/src/zstd/tests/fuzz/Makefile | 157 +- ceph/src/zstd/tests/fuzz/README.md | 25 +- ceph/src/zstd/tests/fuzz/block_decompress.c | 8 +- ceph/src/zstd/tests/fuzz/block_round_trip.c | 39 +- ceph/src/zstd/tests/fuzz/default.options | 2 - .../zstd/tests/fuzz/dictionary_decompress.c | 73 + ceph/src/zstd/tests/fuzz/dictionary_loader.c | 103 + .../zstd/tests/fuzz/dictionary_round_trip.c | 121 + .../tests/fuzz/dictionary_stream_round_trip.c | 206 + ceph/src/zstd/tests/fuzz/fuzz.h | 19 +- ceph/src/zstd/tests/fuzz/fuzz.py | 213 +- ceph/src/zstd/tests/fuzz/fuzz_data_producer.c | 84 + ceph/src/zstd/tests/fuzz/fuzz_data_producer.h | 61 + ceph/src/zstd/tests/fuzz/fuzz_helpers.c | 32 + ceph/src/zstd/tests/fuzz/fuzz_helpers.h | 38 +- .../tests/fuzz/raw_dictionary_round_trip.c | 115 + ceph/src/zstd/tests/fuzz/regression_driver.c | 58 +- ceph/src/zstd/tests/fuzz/simple_compress.c | 52 + ceph/src/zstd/tests/fuzz/simple_decompress.c | 30 +- ceph/src/zstd/tests/fuzz/simple_round_trip.c | 89 +- ceph/src/zstd/tests/fuzz/stream_decompress.c | 57 +- ceph/src/zstd/tests/fuzz/stream_round_trip.c | 75 +- ceph/src/zstd/tests/fuzz/zstd_frame_info.c | 43 + ceph/src/zstd/tests/fuzz/zstd_helpers.c | 145 +- ceph/src/zstd/tests/fuzz/zstd_helpers.h | 28 +- ceph/src/zstd/tests/fuzzer.c | 2635 +++++- .../huffman-compressed-larger | Bin .../golden-decompression/rle-first-block.zst | Bin 0 -> 45 bytes ceph/src/zstd/tests/gzip/Makefile | 5 +- ceph/src/zstd/tests/invalidDictionaries.c | 2 +- ceph/src/zstd/tests/legacy.c | 83 +- ceph/src/zstd/tests/libzstd_partial_builds.sh | 89 + ceph/src/zstd/tests/longmatch.c | 20 +- ceph/src/zstd/tests/namespaceTest.c | 24 - ceph/src/zstd/tests/paramgrill.c | 3203 +++++-- ceph/src/zstd/tests/playTests.sh | 1471 +++- ceph/src/zstd/tests/poolTests.c | 223 +- ceph/src/zstd/tests/rateLimiter.py | 41 + ceph/src/zstd/tests/regression/Makefile | 59 + ceph/src/zstd/tests/regression/config.c | 278 + ceph/src/zstd/tests/regression/config.h | 86 + ceph/src/zstd/tests/regression/data.c | 613 ++ ceph/src/zstd/tests/regression/data.h | 121 + ceph/src/zstd/tests/regression/levels.h | 44 + ceph/src/zstd/tests/regression/method.c | 688 ++ ceph/src/zstd/tests/regression/method.h | 65 + ceph/src/zstd/tests/regression/result.c | 28 + ceph/src/zstd/tests/regression/result.h | 103 + ceph/src/zstd/tests/regression/results.csv | 636 ++ ceph/src/zstd/tests/regression/test.c | 362 + ceph/src/zstd/tests/roundTripCrash.c | 14 +- ceph/src/zstd/tests/seqgen.c | 260 + ceph/src/zstd/tests/seqgen.h | 58 + ceph/src/zstd/tests/symbols.c | 162 - ceph/src/zstd/tests/test-license.py | 141 + ceph/src/zstd/tests/test-zstd-versions.py | 5 +- ceph/src/zstd/tests/zbufftest.c | 53 +- ceph/src/zstd/tests/zstreamtest.c | 1522 +++- ceph/src/zstd/zlibWrapper/BUCK | 2 +- ceph/src/zstd/zlibWrapper/Makefile | 59 +- ceph/src/zstd/zlibWrapper/README.md | 6 +- ceph/src/zstd/zlibWrapper/examples/fitblk.c | 16 +- .../zlibWrapper/examples/fitblk_original.c | 4 +- ceph/src/zstd/zlibWrapper/examples/minigzip.c | 18 +- .../zstd/zlibWrapper/examples/zwrapbench.c | 113 +- ceph/src/zstd/zlibWrapper/gzclose.c | 2 +- ceph/src/zstd/zlibWrapper/gzcompatibility.h | 2 +- ceph/src/zstd/zlibWrapper/gzguts.h | 6 +- ceph/src/zstd/zlibWrapper/gzlib.c | 22 +- ceph/src/zstd/zlibWrapper/gzread.c | 24 +- ceph/src/zstd/zlibWrapper/gzwrite.c | 25 +- ceph/src/zstd/zlibWrapper/zstd_zlibwrapper.c | 38 +- ceph/src/zstd/zlibWrapper/zstd_zlibwrapper.h | 2 +- 934 files changed, 181812 insertions(+), 24420 deletions(-) create mode 100644 ceph/cmake/modules/BuildZstd.cmake create mode 100644 ceph/cmake/modules/FindZstd.cmake create mode 100644 ceph/monitoring/grafana/dashboards/radosgw-sync-overview.json create mode 100644 ceph/monitoring/prometheus/alerts/test_alerts.yml create mode 120000 ceph/qa/suites/upgrade-clients/client-upgrade-nautilus-pacific/nautilus-client-x/.qa create mode 100644 ceph/qa/suites/upgrade-clients/client-upgrade-nautilus-pacific/nautilus-client-x/rbd/% create mode 120000 ceph/qa/suites/upgrade-clients/client-upgrade-nautilus-pacific/nautilus-client-x/rbd/.qa create mode 100644 ceph/qa/suites/upgrade-clients/client-upgrade-nautilus-pacific/nautilus-client-x/rbd/0-cluster/+ create mode 120000 ceph/qa/suites/upgrade-clients/client-upgrade-nautilus-pacific/nautilus-client-x/rbd/0-cluster/.qa create mode 100644 ceph/qa/suites/upgrade-clients/client-upgrade-nautilus-pacific/nautilus-client-x/rbd/0-cluster/openstack.yaml create mode 100644 ceph/qa/suites/upgrade-clients/client-upgrade-nautilus-pacific/nautilus-client-x/rbd/0-cluster/start.yaml create mode 120000 ceph/qa/suites/upgrade-clients/client-upgrade-nautilus-pacific/nautilus-client-x/rbd/1-install/.qa create mode 100644 ceph/qa/suites/upgrade-clients/client-upgrade-nautilus-pacific/nautilus-client-x/rbd/1-install/nautilus-client-x.yaml create mode 120000 ceph/qa/suites/upgrade-clients/client-upgrade-nautilus-pacific/nautilus-client-x/rbd/2-features/.qa create mode 100644 ceph/qa/suites/upgrade-clients/client-upgrade-nautilus-pacific/nautilus-client-x/rbd/2-features/defaults.yaml create mode 100644 ceph/qa/suites/upgrade-clients/client-upgrade-nautilus-pacific/nautilus-client-x/rbd/2-features/layering.yaml create mode 120000 ceph/qa/suites/upgrade-clients/client-upgrade-nautilus-pacific/nautilus-client-x/rbd/3-workload/.qa create mode 100644 ceph/qa/suites/upgrade-clients/client-upgrade-nautilus-pacific/nautilus-client-x/rbd/3-workload/rbd_notification_tests.yaml create mode 120000 ceph/qa/suites/upgrade-clients/client-upgrade-nautilus-pacific/nautilus-client-x/rbd/supported/.qa create mode 120000 ceph/qa/suites/upgrade-clients/client-upgrade-nautilus-pacific/nautilus-client-x/rbd/supported/ubuntu_18.04.yaml create mode 100644 ceph/src/ceph-volume/ceph_volume/devices/lvm/migrate.py create mode 100644 ceph/src/ceph-volume/ceph_volume/tests/devices/lvm/test_migrate.py create mode 100644 ceph/src/mgr/DaemonKey.cc create mode 100644 ceph/src/mgr/DaemonKey.h delete mode 100644 ceph/src/pybind/mgr/dashboard/frontend/dist/en-US/2.22b12bf9358f95829e55.js create mode 100644 ceph/src/pybind/mgr/dashboard/frontend/dist/en-US/2.44961b8d762dc0e4cac6.js create mode 100644 ceph/src/pybind/mgr/dashboard/frontend/dist/en-US/6.db2c4fdf5b9136aa0756.js delete mode 100644 ceph/src/pybind/mgr/dashboard/frontend/dist/en-US/6.f054f1f260c137d64363.js delete mode 100644 ceph/src/pybind/mgr/dashboard/frontend/dist/en-US/7.76846491986b0ea6c0bd.js create mode 100644 ceph/src/pybind/mgr/dashboard/frontend/dist/en-US/7.e3a7bd88bd1897000455.js rename ceph/src/pybind/mgr/dashboard/frontend/dist/en-US/{main.a755488a34fa64d1b79f.js => main.7b0f89a21c505f20dbd6.js} (79%) rename ceph/src/pybind/mgr/dashboard/frontend/dist/en-US/{runtime.ff444394af058f159c51.js => runtime.78a4cc7309d6fb4c4558.js} (86%) create mode 100644 ceph/src/pybind/mgr/dashboard/frontend/src/app/shared/api/rgw-site.service.spec.ts create mode 100644 ceph/src/pybind/mgr/dashboard/frontend/src/app/shared/api/rgw-site.service.ts create mode 100644 ceph/src/pybind/mgr/dashboard/tests/test_rbd.py create mode 100755 ceph/src/rgw/rgw-gap-list create mode 100755 ceph/src/rgw/rgw-gap-list-comparator create mode 100644 ceph/src/spawn/.gitmodules create mode 100644 ceph/src/spawn/CMakeLists.txt create mode 100644 ceph/src/spawn/LICENSE_1_0.txt create mode 100644 ceph/src/spawn/README.md create mode 100644 ceph/src/spawn/include/spawn/detail/is_stack_allocator.hpp create mode 100644 ceph/src/spawn/include/spawn/detail/net.hpp create mode 100644 ceph/src/spawn/include/spawn/impl/spawn.hpp create mode 100644 ceph/src/spawn/include/spawn/spawn.hpp create mode 100644 ceph/src/spawn/test/CMakeLists.txt create mode 100644 ceph/src/spawn/test/dependency/CMakeLists.txt create mode 100644 ceph/src/spawn/test/dependency/googletest/.clang-format create mode 100644 ceph/src/spawn/test/dependency/googletest/.travis.yml create mode 100644 ceph/src/spawn/test/dependency/googletest/BUILD.bazel create mode 100644 ceph/src/spawn/test/dependency/googletest/CMakeLists.txt create mode 100644 ceph/src/spawn/test/dependency/googletest/CONTRIBUTING.md create mode 100644 ceph/src/spawn/test/dependency/googletest/LICENSE create mode 100644 ceph/src/spawn/test/dependency/googletest/README.md create mode 100644 ceph/src/spawn/test/dependency/googletest/WORKSPACE create mode 100644 ceph/src/spawn/test/dependency/googletest/appveyor.yml create mode 100755 ceph/src/spawn/test/dependency/googletest/ci/build-linux-bazel.sh create mode 100644 ceph/src/spawn/test/dependency/googletest/ci/build-platformio.sh create mode 100755 ceph/src/spawn/test/dependency/googletest/ci/env-linux.sh create mode 100755 ceph/src/spawn/test/dependency/googletest/ci/env-osx.sh create mode 100755 ceph/src/spawn/test/dependency/googletest/ci/get-nprocessors.sh create mode 100755 ceph/src/spawn/test/dependency/googletest/ci/install-linux.sh create mode 100755 ceph/src/spawn/test/dependency/googletest/ci/install-osx.sh create mode 100644 ceph/src/spawn/test/dependency/googletest/ci/install-platformio.sh create mode 100755 ceph/src/spawn/test/dependency/googletest/ci/log-config.sh create mode 100755 ceph/src/spawn/test/dependency/googletest/ci/travis.sh create mode 100644 ceph/src/spawn/test/dependency/googletest/googlemock/CMakeLists.txt create mode 100644 ceph/src/spawn/test/dependency/googletest/googlemock/CONTRIBUTORS create mode 100644 ceph/src/spawn/test/dependency/googletest/googlemock/LICENSE create mode 100644 ceph/src/spawn/test/dependency/googletest/googlemock/README.md create mode 100644 ceph/src/spawn/test/dependency/googletest/googlemock/cmake/gmock.pc.in create mode 100644 ceph/src/spawn/test/dependency/googletest/googlemock/cmake/gmock_main.pc.in create mode 100644 ceph/src/spawn/test/dependency/googletest/googlemock/docs/cheat_sheet.md create mode 100644 ceph/src/spawn/test/dependency/googletest/googlemock/docs/cook_book.md create mode 100644 ceph/src/spawn/test/dependency/googletest/googlemock/docs/for_dummies.md create mode 100644 ceph/src/spawn/test/dependency/googletest/googlemock/docs/gmock_faq.md create mode 100644 ceph/src/spawn/test/dependency/googletest/googlemock/include/gmock/gmock-actions.h create mode 100644 ceph/src/spawn/test/dependency/googletest/googlemock/include/gmock/gmock-cardinalities.h create mode 100644 ceph/src/spawn/test/dependency/googletest/googlemock/include/gmock/gmock-function-mocker.h create mode 100644 ceph/src/spawn/test/dependency/googletest/googlemock/include/gmock/gmock-generated-actions.h create mode 100644 ceph/src/spawn/test/dependency/googletest/googlemock/include/gmock/gmock-generated-actions.h.pump create mode 100644 ceph/src/spawn/test/dependency/googletest/googlemock/include/gmock/gmock-generated-function-mockers.h create mode 100644 ceph/src/spawn/test/dependency/googletest/googlemock/include/gmock/gmock-generated-function-mockers.h.pump create mode 100644 ceph/src/spawn/test/dependency/googletest/googlemock/include/gmock/gmock-generated-matchers.h create mode 100644 ceph/src/spawn/test/dependency/googletest/googlemock/include/gmock/gmock-generated-matchers.h.pump create mode 100644 ceph/src/spawn/test/dependency/googletest/googlemock/include/gmock/gmock-matchers.h create mode 100644 ceph/src/spawn/test/dependency/googletest/googlemock/include/gmock/gmock-more-actions.h create mode 100644 ceph/src/spawn/test/dependency/googletest/googlemock/include/gmock/gmock-more-matchers.h create mode 100644 ceph/src/spawn/test/dependency/googletest/googlemock/include/gmock/gmock-nice-strict.h create mode 100644 ceph/src/spawn/test/dependency/googletest/googlemock/include/gmock/gmock-spec-builders.h create mode 100644 ceph/src/spawn/test/dependency/googletest/googlemock/include/gmock/gmock.h create mode 100644 ceph/src/spawn/test/dependency/googletest/googlemock/include/gmock/internal/custom/README.md create mode 100644 ceph/src/spawn/test/dependency/googletest/googlemock/include/gmock/internal/custom/gmock-generated-actions.h create mode 100644 ceph/src/spawn/test/dependency/googletest/googlemock/include/gmock/internal/custom/gmock-generated-actions.h.pump create mode 100644 ceph/src/spawn/test/dependency/googletest/googlemock/include/gmock/internal/custom/gmock-matchers.h create mode 100644 ceph/src/spawn/test/dependency/googletest/googlemock/include/gmock/internal/custom/gmock-port.h create mode 100644 ceph/src/spawn/test/dependency/googletest/googlemock/include/gmock/internal/gmock-internal-utils.h create mode 100644 ceph/src/spawn/test/dependency/googletest/googlemock/include/gmock/internal/gmock-port.h create mode 100644 ceph/src/spawn/test/dependency/googletest/googlemock/include/gmock/internal/gmock-pp.h create mode 100755 ceph/src/spawn/test/dependency/googletest/googlemock/scripts/fuse_gmock_files.py create mode 100644 ceph/src/spawn/test/dependency/googletest/googlemock/scripts/generator/LICENSE create mode 100644 ceph/src/spawn/test/dependency/googletest/googlemock/scripts/generator/README create mode 100644 ceph/src/spawn/test/dependency/googletest/googlemock/scripts/generator/README.cppclean create mode 100755 ceph/src/spawn/test/dependency/googletest/googlemock/scripts/generator/cpp/__init__.py create mode 100755 ceph/src/spawn/test/dependency/googletest/googlemock/scripts/generator/cpp/ast.py create mode 100755 ceph/src/spawn/test/dependency/googletest/googlemock/scripts/generator/cpp/gmock_class.py create mode 100755 ceph/src/spawn/test/dependency/googletest/googlemock/scripts/generator/cpp/gmock_class_test.py create mode 100755 ceph/src/spawn/test/dependency/googletest/googlemock/scripts/generator/cpp/keywords.py create mode 100755 ceph/src/spawn/test/dependency/googletest/googlemock/scripts/generator/cpp/tokenize.py create mode 100755 ceph/src/spawn/test/dependency/googletest/googlemock/scripts/generator/cpp/utils.py create mode 100755 ceph/src/spawn/test/dependency/googletest/googlemock/scripts/generator/gmock_gen.py create mode 100755 ceph/src/spawn/test/dependency/googletest/googlemock/scripts/gmock-config.in create mode 100755 ceph/src/spawn/test/dependency/googletest/googlemock/scripts/gmock_doctor.py create mode 100755 ceph/src/spawn/test/dependency/googletest/googlemock/scripts/upload.py create mode 100755 ceph/src/spawn/test/dependency/googletest/googlemock/scripts/upload_gmock.py create mode 100644 ceph/src/spawn/test/dependency/googletest/googlemock/src/gmock-all.cc create mode 100644 ceph/src/spawn/test/dependency/googletest/googlemock/src/gmock-cardinalities.cc create mode 100644 ceph/src/spawn/test/dependency/googletest/googlemock/src/gmock-internal-utils.cc create mode 100644 ceph/src/spawn/test/dependency/googletest/googlemock/src/gmock-matchers.cc create mode 100644 ceph/src/spawn/test/dependency/googletest/googlemock/src/gmock-spec-builders.cc create mode 100644 ceph/src/spawn/test/dependency/googletest/googlemock/src/gmock.cc create mode 100644 ceph/src/spawn/test/dependency/googletest/googlemock/src/gmock_main.cc create mode 100644 ceph/src/spawn/test/dependency/googletest/googlemock/test/BUILD.bazel create mode 100644 ceph/src/spawn/test/dependency/googletest/googlemock/test/gmock-actions_test.cc create mode 100644 ceph/src/spawn/test/dependency/googletest/googlemock/test/gmock-cardinalities_test.cc create mode 100644 ceph/src/spawn/test/dependency/googletest/googlemock/test/gmock-function-mocker_nc.cc create mode 100644 ceph/src/spawn/test/dependency/googletest/googlemock/test/gmock-function-mocker_nc_test.py create mode 100644 ceph/src/spawn/test/dependency/googletest/googlemock/test/gmock-function-mocker_test.cc create mode 100644 ceph/src/spawn/test/dependency/googletest/googlemock/test/gmock-generated-actions_test.cc create mode 100644 ceph/src/spawn/test/dependency/googletest/googlemock/test/gmock-generated-function-mockers_test.cc create mode 100644 ceph/src/spawn/test/dependency/googletest/googlemock/test/gmock-generated-matchers_test.cc create mode 100644 ceph/src/spawn/test/dependency/googletest/googlemock/test/gmock-internal-utils_test.cc create mode 100644 ceph/src/spawn/test/dependency/googletest/googlemock/test/gmock-matchers_test.cc create mode 100644 ceph/src/spawn/test/dependency/googletest/googlemock/test/gmock-more-actions_test.cc create mode 100644 ceph/src/spawn/test/dependency/googletest/googlemock/test/gmock-nice-strict_test.cc create mode 100644 ceph/src/spawn/test/dependency/googletest/googlemock/test/gmock-port_test.cc create mode 100644 ceph/src/spawn/test/dependency/googletest/googlemock/test/gmock-pp-string_test.cc create mode 100644 ceph/src/spawn/test/dependency/googletest/googlemock/test/gmock-pp_test.cc create mode 100644 ceph/src/spawn/test/dependency/googletest/googlemock/test/gmock-spec-builders_test.cc create mode 100644 ceph/src/spawn/test/dependency/googletest/googlemock/test/gmock_all_test.cc create mode 100644 ceph/src/spawn/test/dependency/googletest/googlemock/test/gmock_ex_test.cc create mode 100755 ceph/src/spawn/test/dependency/googletest/googlemock/test/gmock_leak_test.py create mode 100644 ceph/src/spawn/test/dependency/googletest/googlemock/test/gmock_leak_test_.cc create mode 100644 ceph/src/spawn/test/dependency/googletest/googlemock/test/gmock_link2_test.cc create mode 100644 ceph/src/spawn/test/dependency/googletest/googlemock/test/gmock_link_test.cc create mode 100644 ceph/src/spawn/test/dependency/googletest/googlemock/test/gmock_link_test.h create mode 100755 ceph/src/spawn/test/dependency/googletest/googlemock/test/gmock_output_test.py create mode 100644 ceph/src/spawn/test/dependency/googletest/googlemock/test/gmock_output_test_.cc create mode 100644 ceph/src/spawn/test/dependency/googletest/googlemock/test/gmock_output_test_golden.txt create mode 100644 ceph/src/spawn/test/dependency/googletest/googlemock/test/gmock_stress_test.cc create mode 100644 ceph/src/spawn/test/dependency/googletest/googlemock/test/gmock_test.cc create mode 100755 ceph/src/spawn/test/dependency/googletest/googlemock/test/gmock_test_utils.py create mode 100644 ceph/src/spawn/test/dependency/googletest/googletest/CMakeLists.txt create mode 100644 ceph/src/spawn/test/dependency/googletest/googletest/CONTRIBUTORS create mode 100644 ceph/src/spawn/test/dependency/googletest/googletest/LICENSE create mode 100644 ceph/src/spawn/test/dependency/googletest/googletest/README.md create mode 100644 ceph/src/spawn/test/dependency/googletest/googletest/cmake/Config.cmake.in create mode 100644 ceph/src/spawn/test/dependency/googletest/googletest/cmake/gtest.pc.in create mode 100644 ceph/src/spawn/test/dependency/googletest/googletest/cmake/gtest_main.pc.in create mode 100644 ceph/src/spawn/test/dependency/googletest/googletest/cmake/internal_utils.cmake create mode 100644 ceph/src/spawn/test/dependency/googletest/googletest/cmake/libgtest.la.in create mode 100644 ceph/src/spawn/test/dependency/googletest/googletest/docs/advanced.md create mode 100644 ceph/src/spawn/test/dependency/googletest/googletest/docs/faq.md create mode 100644 ceph/src/spawn/test/dependency/googletest/googletest/docs/pkgconfig.md create mode 100644 ceph/src/spawn/test/dependency/googletest/googletest/docs/primer.md create mode 100644 ceph/src/spawn/test/dependency/googletest/googletest/docs/pump_manual.md create mode 100644 ceph/src/spawn/test/dependency/googletest/googletest/docs/samples.md create mode 100644 ceph/src/spawn/test/dependency/googletest/googletest/include/gtest/gtest-death-test.h create mode 100644 ceph/src/spawn/test/dependency/googletest/googletest/include/gtest/gtest-matchers.h create mode 100644 ceph/src/spawn/test/dependency/googletest/googletest/include/gtest/gtest-message.h create mode 100644 ceph/src/spawn/test/dependency/googletest/googletest/include/gtest/gtest-param-test.h create mode 100644 ceph/src/spawn/test/dependency/googletest/googletest/include/gtest/gtest-printers.h create mode 100644 ceph/src/spawn/test/dependency/googletest/googletest/include/gtest/gtest-spi.h create mode 100644 ceph/src/spawn/test/dependency/googletest/googletest/include/gtest/gtest-test-part.h create mode 100644 ceph/src/spawn/test/dependency/googletest/googletest/include/gtest/gtest-typed-test.h create mode 100644 ceph/src/spawn/test/dependency/googletest/googletest/include/gtest/gtest.h create mode 100644 ceph/src/spawn/test/dependency/googletest/googletest/include/gtest/gtest_pred_impl.h create mode 100644 ceph/src/spawn/test/dependency/googletest/googletest/include/gtest/gtest_prod.h create mode 100644 ceph/src/spawn/test/dependency/googletest/googletest/include/gtest/internal/custom/README.md create mode 100644 ceph/src/spawn/test/dependency/googletest/googletest/include/gtest/internal/custom/gtest-port.h create mode 100644 ceph/src/spawn/test/dependency/googletest/googletest/include/gtest/internal/custom/gtest-printers.h create mode 100644 ceph/src/spawn/test/dependency/googletest/googletest/include/gtest/internal/custom/gtest.h create mode 100644 ceph/src/spawn/test/dependency/googletest/googletest/include/gtest/internal/gtest-death-test-internal.h create mode 100644 ceph/src/spawn/test/dependency/googletest/googletest/include/gtest/internal/gtest-filepath.h create mode 100644 ceph/src/spawn/test/dependency/googletest/googletest/include/gtest/internal/gtest-internal.h create mode 100644 ceph/src/spawn/test/dependency/googletest/googletest/include/gtest/internal/gtest-param-util.h create mode 100644 ceph/src/spawn/test/dependency/googletest/googletest/include/gtest/internal/gtest-port-arch.h create mode 100644 ceph/src/spawn/test/dependency/googletest/googletest/include/gtest/internal/gtest-port.h create mode 100644 ceph/src/spawn/test/dependency/googletest/googletest/include/gtest/internal/gtest-string.h create mode 100644 ceph/src/spawn/test/dependency/googletest/googletest/include/gtest/internal/gtest-type-util.h create mode 100644 ceph/src/spawn/test/dependency/googletest/googletest/include/gtest/internal/gtest-type-util.h.pump create mode 100644 ceph/src/spawn/test/dependency/googletest/googletest/samples/prime_tables.h create mode 100644 ceph/src/spawn/test/dependency/googletest/googletest/samples/sample1.cc create mode 100644 ceph/src/spawn/test/dependency/googletest/googletest/samples/sample1.h create mode 100644 ceph/src/spawn/test/dependency/googletest/googletest/samples/sample10_unittest.cc create mode 100644 ceph/src/spawn/test/dependency/googletest/googletest/samples/sample1_unittest.cc create mode 100644 ceph/src/spawn/test/dependency/googletest/googletest/samples/sample2.cc create mode 100644 ceph/src/spawn/test/dependency/googletest/googletest/samples/sample2.h create mode 100644 ceph/src/spawn/test/dependency/googletest/googletest/samples/sample2_unittest.cc create mode 100644 ceph/src/spawn/test/dependency/googletest/googletest/samples/sample3-inl.h create mode 100644 ceph/src/spawn/test/dependency/googletest/googletest/samples/sample3_unittest.cc create mode 100644 ceph/src/spawn/test/dependency/googletest/googletest/samples/sample4.cc create mode 100644 ceph/src/spawn/test/dependency/googletest/googletest/samples/sample4.h create mode 100644 ceph/src/spawn/test/dependency/googletest/googletest/samples/sample4_unittest.cc create mode 100644 ceph/src/spawn/test/dependency/googletest/googletest/samples/sample5_unittest.cc create mode 100644 ceph/src/spawn/test/dependency/googletest/googletest/samples/sample6_unittest.cc create mode 100644 ceph/src/spawn/test/dependency/googletest/googletest/samples/sample7_unittest.cc create mode 100644 ceph/src/spawn/test/dependency/googletest/googletest/samples/sample8_unittest.cc create mode 100644 ceph/src/spawn/test/dependency/googletest/googletest/samples/sample9_unittest.cc create mode 100644 ceph/src/spawn/test/dependency/googletest/googletest/scripts/common.py create mode 100755 ceph/src/spawn/test/dependency/googletest/googletest/scripts/fuse_gtest_files.py create mode 100755 ceph/src/spawn/test/dependency/googletest/googletest/scripts/gen_gtest_pred_impl.py create mode 100755 ceph/src/spawn/test/dependency/googletest/googletest/scripts/gtest-config.in create mode 100755 ceph/src/spawn/test/dependency/googletest/googletest/scripts/pump.py create mode 100755 ceph/src/spawn/test/dependency/googletest/googletest/scripts/release_docs.py create mode 100644 ceph/src/spawn/test/dependency/googletest/googletest/scripts/test/Makefile create mode 100755 ceph/src/spawn/test/dependency/googletest/googletest/scripts/upload.py create mode 100755 ceph/src/spawn/test/dependency/googletest/googletest/scripts/upload_gtest.py create mode 100644 ceph/src/spawn/test/dependency/googletest/googletest/src/gtest-all.cc create mode 100644 ceph/src/spawn/test/dependency/googletest/googletest/src/gtest-death-test.cc create mode 100644 ceph/src/spawn/test/dependency/googletest/googletest/src/gtest-filepath.cc create mode 100644 ceph/src/spawn/test/dependency/googletest/googletest/src/gtest-internal-inl.h create mode 100644 ceph/src/spawn/test/dependency/googletest/googletest/src/gtest-matchers.cc create mode 100644 ceph/src/spawn/test/dependency/googletest/googletest/src/gtest-port.cc create mode 100644 ceph/src/spawn/test/dependency/googletest/googletest/src/gtest-printers.cc create mode 100644 ceph/src/spawn/test/dependency/googletest/googletest/src/gtest-test-part.cc create mode 100644 ceph/src/spawn/test/dependency/googletest/googletest/src/gtest-typed-test.cc create mode 100644 ceph/src/spawn/test/dependency/googletest/googletest/src/gtest.cc create mode 100644 ceph/src/spawn/test/dependency/googletest/googletest/src/gtest_main.cc create mode 100644 ceph/src/spawn/test/dependency/googletest/googletest/test/BUILD.bazel create mode 100755 ceph/src/spawn/test/dependency/googletest/googletest/test/googletest-break-on-failure-unittest.py create mode 100644 ceph/src/spawn/test/dependency/googletest/googletest/test/googletest-break-on-failure-unittest_.cc create mode 100755 ceph/src/spawn/test/dependency/googletest/googletest/test/googletest-catch-exceptions-test.py create mode 100644 ceph/src/spawn/test/dependency/googletest/googletest/test/googletest-catch-exceptions-test_.cc create mode 100755 ceph/src/spawn/test/dependency/googletest/googletest/test/googletest-color-test.py create mode 100644 ceph/src/spawn/test/dependency/googletest/googletest/test/googletest-color-test_.cc create mode 100644 ceph/src/spawn/test/dependency/googletest/googletest/test/googletest-death-test-test.cc create mode 100644 ceph/src/spawn/test/dependency/googletest/googletest/test/googletest-death-test_ex_test.cc create mode 100755 ceph/src/spawn/test/dependency/googletest/googletest/test/googletest-env-var-test.py create mode 100644 ceph/src/spawn/test/dependency/googletest/googletest/test/googletest-env-var-test_.cc create mode 100644 ceph/src/spawn/test/dependency/googletest/googletest/test/googletest-filepath-test.cc create mode 100755 ceph/src/spawn/test/dependency/googletest/googletest/test/googletest-filter-unittest.py create mode 100644 ceph/src/spawn/test/dependency/googletest/googletest/test/googletest-filter-unittest_.cc create mode 100644 ceph/src/spawn/test/dependency/googletest/googletest/test/googletest-json-outfiles-test.py create mode 100644 ceph/src/spawn/test/dependency/googletest/googletest/test/googletest-json-output-unittest.py create mode 100755 ceph/src/spawn/test/dependency/googletest/googletest/test/googletest-list-tests-unittest.py create mode 100644 ceph/src/spawn/test/dependency/googletest/googletest/test/googletest-list-tests-unittest_.cc create mode 100644 ceph/src/spawn/test/dependency/googletest/googletest/test/googletest-listener-test.cc create mode 100644 ceph/src/spawn/test/dependency/googletest/googletest/test/googletest-message-test.cc create mode 100644 ceph/src/spawn/test/dependency/googletest/googletest/test/googletest-options-test.cc create mode 100644 ceph/src/spawn/test/dependency/googletest/googletest/test/googletest-output-test-golden-lin.txt create mode 100755 ceph/src/spawn/test/dependency/googletest/googletest/test/googletest-output-test.py create mode 100644 ceph/src/spawn/test/dependency/googletest/googletest/test/googletest-output-test_.cc create mode 100644 ceph/src/spawn/test/dependency/googletest/googletest/test/googletest-param-test-invalid-name1-test.py create mode 100644 ceph/src/spawn/test/dependency/googletest/googletest/test/googletest-param-test-invalid-name1-test_.cc create mode 100644 ceph/src/spawn/test/dependency/googletest/googletest/test/googletest-param-test-invalid-name2-test.py create mode 100644 ceph/src/spawn/test/dependency/googletest/googletest/test/googletest-param-test-invalid-name2-test_.cc create mode 100644 ceph/src/spawn/test/dependency/googletest/googletest/test/googletest-param-test-test.cc create mode 100644 ceph/src/spawn/test/dependency/googletest/googletest/test/googletest-param-test-test.h create mode 100644 ceph/src/spawn/test/dependency/googletest/googletest/test/googletest-param-test2-test.cc create mode 100644 ceph/src/spawn/test/dependency/googletest/googletest/test/googletest-port-test.cc create mode 100644 ceph/src/spawn/test/dependency/googletest/googletest/test/googletest-printers-test.cc create mode 100755 ceph/src/spawn/test/dependency/googletest/googletest/test/googletest-shuffle-test.py create mode 100644 ceph/src/spawn/test/dependency/googletest/googletest/test/googletest-shuffle-test_.cc create mode 100644 ceph/src/spawn/test/dependency/googletest/googletest/test/googletest-test-part-test.cc create mode 100644 ceph/src/spawn/test/dependency/googletest/googletest/test/googletest-test2_test.cc create mode 100755 ceph/src/spawn/test/dependency/googletest/googletest/test/googletest-throw-on-failure-test.py create mode 100644 ceph/src/spawn/test/dependency/googletest/googletest/test/googletest-throw-on-failure-test_.cc create mode 100755 ceph/src/spawn/test/dependency/googletest/googletest/test/googletest-uninitialized-test.py create mode 100644 ceph/src/spawn/test/dependency/googletest/googletest/test/googletest-uninitialized-test_.cc create mode 100644 ceph/src/spawn/test/dependency/googletest/googletest/test/gtest-typed-test2_test.cc create mode 100644 ceph/src/spawn/test/dependency/googletest/googletest/test/gtest-typed-test_test.cc create mode 100644 ceph/src/spawn/test/dependency/googletest/googletest/test/gtest-typed-test_test.h create mode 100644 ceph/src/spawn/test/dependency/googletest/googletest/test/gtest-unittest-api_test.cc create mode 100644 ceph/src/spawn/test/dependency/googletest/googletest/test/gtest_all_test.cc create mode 100644 ceph/src/spawn/test/dependency/googletest/googletest/test/gtest_assert_by_exception_test.cc create mode 100644 ceph/src/spawn/test/dependency/googletest/googletest/test/gtest_environment_test.cc create mode 100755 ceph/src/spawn/test/dependency/googletest/googletest/test/gtest_help_test.py create mode 100644 ceph/src/spawn/test/dependency/googletest/googletest/test/gtest_help_test_.cc create mode 100644 ceph/src/spawn/test/dependency/googletest/googletest/test/gtest_json_test_utils.py create mode 100644 ceph/src/spawn/test/dependency/googletest/googletest/test/gtest_list_output_unittest.py create mode 100644 ceph/src/spawn/test/dependency/googletest/googletest/test/gtest_list_output_unittest_.cc create mode 100644 ceph/src/spawn/test/dependency/googletest/googletest/test/gtest_main_unittest.cc create mode 100644 ceph/src/spawn/test/dependency/googletest/googletest/test/gtest_no_test_unittest.cc create mode 100644 ceph/src/spawn/test/dependency/googletest/googletest/test/gtest_pred_impl_unittest.cc create mode 100644 ceph/src/spawn/test/dependency/googletest/googletest/test/gtest_premature_exit_test.cc create mode 100644 ceph/src/spawn/test/dependency/googletest/googletest/test/gtest_prod_test.cc create mode 100644 ceph/src/spawn/test/dependency/googletest/googletest/test/gtest_repeat_test.cc create mode 100755 ceph/src/spawn/test/dependency/googletest/googletest/test/gtest_skip_environment_check_output_test.py create mode 100644 ceph/src/spawn/test/dependency/googletest/googletest/test/gtest_skip_in_environment_setup_test.cc create mode 100644 ceph/src/spawn/test/dependency/googletest/googletest/test/gtest_skip_test.cc create mode 100644 ceph/src/spawn/test/dependency/googletest/googletest/test/gtest_sole_header_test.cc create mode 100644 ceph/src/spawn/test/dependency/googletest/googletest/test/gtest_stress_test.cc create mode 100644 ceph/src/spawn/test/dependency/googletest/googletest/test/gtest_test_macro_stack_footprint_test.cc create mode 100755 ceph/src/spawn/test/dependency/googletest/googletest/test/gtest_test_utils.py create mode 100755 ceph/src/spawn/test/dependency/googletest/googletest/test/gtest_testbridge_test.py create mode 100644 ceph/src/spawn/test/dependency/googletest/googletest/test/gtest_testbridge_test_.cc create mode 100644 ceph/src/spawn/test/dependency/googletest/googletest/test/gtest_throw_on_failure_ex_test.cc create mode 100644 ceph/src/spawn/test/dependency/googletest/googletest/test/gtest_unittest.cc create mode 100644 ceph/src/spawn/test/dependency/googletest/googletest/test/gtest_xml_outfile1_test_.cc create mode 100644 ceph/src/spawn/test/dependency/googletest/googletest/test/gtest_xml_outfile2_test_.cc create mode 100755 ceph/src/spawn/test/dependency/googletest/googletest/test/gtest_xml_outfiles_test.py create mode 100755 ceph/src/spawn/test/dependency/googletest/googletest/test/gtest_xml_output_unittest.py create mode 100644 ceph/src/spawn/test/dependency/googletest/googletest/test/gtest_xml_output_unittest_.cc create mode 100755 ceph/src/spawn/test/dependency/googletest/googletest/test/gtest_xml_test_utils.py create mode 100644 ceph/src/spawn/test/dependency/googletest/googletest/test/production.cc create mode 100644 ceph/src/spawn/test/dependency/googletest/googletest/test/production.h create mode 100644 ceph/src/spawn/test/dependency/googletest/library.json create mode 100644 ceph/src/spawn/test/dependency/googletest/platformio.ini create mode 100644 ceph/src/spawn/test/test_spawn.cc create mode 100755 ceph/src/test/run-promtool-unittests.sh create mode 100644 ceph/src/zstd/.circleci/config.yml create mode 100644 ceph/src/zstd/.circleci/images/primary/Dockerfile create mode 100644 ceph/src/zstd/.cirrus.yml create mode 100644 ceph/src/zstd/.github/ISSUE_TEMPLATE/bug_report.md create mode 100644 ceph/src/zstd/.github/ISSUE_TEMPLATE/feature_request.md create mode 100644 ceph/src/zstd/.github/workflows/main.yml rename ceph/src/zstd/{NEWS => CHANGELOG} (54%) create mode 100644 ceph/src/zstd/CODE_OF_CONDUCT.md create mode 100644 ceph/src/zstd/build/LICENSE create mode 100644 ceph/src/zstd/build/VS_scripts/build.VS2017.cmd create mode 100644 ceph/src/zstd/build/VS_scripts/build.VS2017Community.cmd create mode 100644 ceph/src/zstd/build/VS_scripts/build.VS2017Enterprise.cmd create mode 100644 ceph/src/zstd/build/VS_scripts/build.VS2017Professional.cmd create mode 100644 ceph/src/zstd/build/cmake/CMakeModules/FindLibLZ4.cmake create mode 100644 ceph/src/zstd/build/cmake/README.md create mode 100644 ceph/src/zstd/build/cmake/zstdConfig.cmake create mode 100644 ceph/src/zstd/build/meson/GetZstdLibraryVersion.py create mode 100644 ceph/src/zstd/build/meson/InstallSymlink.py create mode 100644 ceph/src/zstd/build/meson/README.md create mode 100644 ceph/src/zstd/build/meson/contrib/gen_html/meson.build create mode 100644 ceph/src/zstd/build/meson/contrib/meson.build create mode 100644 ceph/src/zstd/build/meson/contrib/pzstd/meson.build create mode 100644 ceph/src/zstd/build/meson/lib/meson.build create mode 100644 ceph/src/zstd/build/meson/meson.build create mode 100644 ceph/src/zstd/build/meson/meson_options.txt create mode 100644 ceph/src/zstd/build/meson/programs/meson.build create mode 100644 ceph/src/zstd/build/meson/tests/meson.build create mode 100644 ceph/src/zstd/build/meson/tests/valgrindTest.py delete mode 100644 ceph/src/zstd/circle.yml delete mode 100644 ceph/src/zstd/contrib/adaptive-compression/Makefile delete mode 100644 ceph/src/zstd/contrib/adaptive-compression/README.md delete mode 100644 ceph/src/zstd/contrib/adaptive-compression/adapt.c delete mode 100644 ceph/src/zstd/contrib/adaptive-compression/datagencli.c delete mode 100755 ceph/src/zstd/contrib/adaptive-compression/test-correctness.sh delete mode 100755 ceph/src/zstd/contrib/adaptive-compression/test-performance.sh create mode 100644 ceph/src/zstd/contrib/diagnose_corruption/Makefile create mode 100644 ceph/src/zstd/contrib/diagnose_corruption/check_flipped_bits.c create mode 100644 ceph/src/zstd/contrib/docker/Dockerfile create mode 100644 ceph/src/zstd/contrib/docker/README.md create mode 100644 ceph/src/zstd/contrib/largeNbDicts/Makefile create mode 100644 ceph/src/zstd/contrib/largeNbDicts/README.md create mode 100644 ceph/src/zstd/contrib/largeNbDicts/largeNbDicts.c delete mode 100644 ceph/src/zstd/contrib/long_distance_matching/Makefile delete mode 100644 ceph/src/zstd/contrib/long_distance_matching/README.md delete mode 100644 ceph/src/zstd/contrib/long_distance_matching/ldm.c delete mode 100644 ceph/src/zstd/contrib/long_distance_matching/ldm.h delete mode 100644 ceph/src/zstd/contrib/long_distance_matching/ldm_common.c delete mode 100644 ceph/src/zstd/contrib/long_distance_matching/ldm_params.h delete mode 100644 ceph/src/zstd/contrib/long_distance_matching/main.c create mode 100644 ceph/src/zstd/contrib/match_finders/README.md create mode 100644 ceph/src/zstd/contrib/match_finders/zstd_edist.c create mode 100644 ceph/src/zstd/contrib/match_finders/zstd_edist.h delete mode 100644 ceph/src/zstd/contrib/meson/README delete mode 100644 ceph/src/zstd/contrib/meson/meson.build delete mode 100644 ceph/src/zstd/contrib/meson/meson_options.txt create mode 100644 ceph/src/zstd/contrib/premake/premake4.lua create mode 100644 ceph/src/zstd/contrib/premake/zstd.lua create mode 100644 ceph/src/zstd/contrib/seekable_format/examples/seekable_decompression_mem.c create mode 100644 ceph/src/zstd/contrib/single_file_libs/README.md create mode 100755 ceph/src/zstd/contrib/single_file_libs/build_decoder_test.sh create mode 100755 ceph/src/zstd/contrib/single_file_libs/build_library_test.sh create mode 100755 ceph/src/zstd/contrib/single_file_libs/combine.sh create mode 100755 ceph/src/zstd/contrib/single_file_libs/create_single_file_decoder.sh create mode 100755 ceph/src/zstd/contrib/single_file_libs/create_single_file_library.sh create mode 100644 ceph/src/zstd/contrib/single_file_libs/examples/README.md create mode 100644 ceph/src/zstd/contrib/single_file_libs/examples/emscripten.c create mode 100644 ceph/src/zstd/contrib/single_file_libs/examples/roundtrip.c create mode 100644 ceph/src/zstd/contrib/single_file_libs/examples/shell.html create mode 100644 ceph/src/zstd/contrib/single_file_libs/examples/simple.c create mode 100644 ceph/src/zstd/contrib/single_file_libs/examples/testcard-dxt1.inl create mode 100644 ceph/src/zstd/contrib/single_file_libs/examples/testcard-zstd.inl create mode 100755 ceph/src/zstd/contrib/single_file_libs/examples/testcard.png create mode 100644 ceph/src/zstd/contrib/single_file_libs/zstd-in.c create mode 100644 ceph/src/zstd/contrib/single_file_libs/zstddeclib-in.c create mode 100644 ceph/src/zstd/contrib/snap/snapcraft.yaml create mode 100644 ceph/src/zstd/doc/images/CSpeed2.png delete mode 100644 ceph/src/zstd/doc/images/Cspeed4.png create mode 100644 ceph/src/zstd/doc/images/DSpeed3.png delete mode 100644 ceph/src/zstd/doc/images/Dspeed4.png create mode 100644 ceph/src/zstd/doc/images/cdict_v136.png delete mode 100644 ceph/src/zstd/doc/images/ldmCspeed.png delete mode 100644 ceph/src/zstd/doc/images/ldmDspeed.png create mode 100644 ceph/src/zstd/doc/images/zstd_cdict_v1_3_5.png create mode 100644 ceph/src/zstd/doc/images/zstd_logo86.png create mode 100644 ceph/src/zstd/examples/common.h create mode 100644 ceph/src/zstd/examples/multiple_simple_compression.c create mode 100644 ceph/src/zstd/lib/common/cpu.h create mode 100644 ceph/src/zstd/lib/common/debug.c create mode 100644 ceph/src/zstd/lib/common/debug.h create mode 100644 ceph/src/zstd/lib/compress/hist.c create mode 100644 ceph/src/zstd/lib/compress/hist.h delete mode 100644 ceph/src/zstd/lib/compress/zstd_compress.h create mode 100644 ceph/src/zstd/lib/compress/zstd_compress_internal.h create mode 100644 ceph/src/zstd/lib/compress/zstd_compress_literals.c create mode 100644 ceph/src/zstd/lib/compress/zstd_compress_literals.h create mode 100644 ceph/src/zstd/lib/compress/zstd_compress_sequences.c create mode 100644 ceph/src/zstd/lib/compress/zstd_compress_sequences.h create mode 100644 ceph/src/zstd/lib/compress/zstd_compress_superblock.c create mode 100644 ceph/src/zstd/lib/compress/zstd_compress_superblock.h create mode 100644 ceph/src/zstd/lib/compress/zstd_cwksp.h create mode 100644 ceph/src/zstd/lib/decompress/zstd_ddict.c create mode 100644 ceph/src/zstd/lib/decompress/zstd_ddict.h create mode 100644 ceph/src/zstd/lib/decompress/zstd_decompress_block.c create mode 100644 ceph/src/zstd/lib/decompress/zstd_decompress_block.h create mode 100644 ceph/src/zstd/lib/decompress/zstd_decompress_internal.h create mode 100644 ceph/src/zstd/lib/dictBuilder/cover.h create mode 100644 ceph/src/zstd/lib/dictBuilder/fastcover.c delete mode 100644 ceph/src/zstd/lib/dll/libzstd.def delete mode 100644 ceph/src/zstd/programs/bench.c delete mode 100644 ceph/src/zstd/programs/bench.h create mode 100644 ceph/src/zstd/programs/benchfn.c create mode 100644 ceph/src/zstd/programs/benchfn.h create mode 100644 ceph/src/zstd/programs/benchzstd.c create mode 100644 ceph/src/zstd/programs/benchzstd.h create mode 100644 ceph/src/zstd/programs/timefn.c create mode 100644 ceph/src/zstd/programs/timefn.h create mode 100644 ceph/src/zstd/programs/util.c delete mode 100644 ceph/src/zstd/programs/windres/generate_res.bat create mode 100644 ceph/src/zstd/programs/zstdgrep.1 create mode 100644 ceph/src/zstd/programs/zstdgrep.1.md create mode 100644 ceph/src/zstd/programs/zstdless.1 create mode 100644 ceph/src/zstd/programs/zstdless.1.md rename ceph/src/zstd/tests/{test-zstd-speed.py => DEPRECATED-test-zstd-speed.py} (98%) create mode 100644 ceph/src/zstd/tests/automated_benchmarking.py create mode 100644 ceph/src/zstd/tests/bigdict.c create mode 100644 ceph/src/zstd/tests/checkTag.c create mode 100644 ceph/src/zstd/tests/dict-files/zero-weight-dict delete mode 100644 ceph/src/zstd/tests/fuzz/default.options create mode 100644 ceph/src/zstd/tests/fuzz/dictionary_decompress.c create mode 100644 ceph/src/zstd/tests/fuzz/dictionary_loader.c create mode 100644 ceph/src/zstd/tests/fuzz/dictionary_round_trip.c create mode 100644 ceph/src/zstd/tests/fuzz/dictionary_stream_round_trip.c create mode 100644 ceph/src/zstd/tests/fuzz/fuzz_data_producer.c create mode 100644 ceph/src/zstd/tests/fuzz/fuzz_data_producer.h create mode 100644 ceph/src/zstd/tests/fuzz/fuzz_helpers.c create mode 100644 ceph/src/zstd/tests/fuzz/raw_dictionary_round_trip.c create mode 100644 ceph/src/zstd/tests/fuzz/simple_compress.c create mode 100644 ceph/src/zstd/tests/fuzz/zstd_frame_info.c rename ceph/src/zstd/tests/{files => golden-compression}/huffman-compressed-larger (100%) create mode 100644 ceph/src/zstd/tests/golden-decompression/rle-first-block.zst create mode 100755 ceph/src/zstd/tests/libzstd_partial_builds.sh delete mode 100644 ceph/src/zstd/tests/namespaceTest.c create mode 100755 ceph/src/zstd/tests/rateLimiter.py create mode 100644 ceph/src/zstd/tests/regression/Makefile create mode 100644 ceph/src/zstd/tests/regression/config.c create mode 100644 ceph/src/zstd/tests/regression/config.h create mode 100644 ceph/src/zstd/tests/regression/data.c create mode 100644 ceph/src/zstd/tests/regression/data.h create mode 100644 ceph/src/zstd/tests/regression/levels.h create mode 100644 ceph/src/zstd/tests/regression/method.c create mode 100644 ceph/src/zstd/tests/regression/method.h create mode 100644 ceph/src/zstd/tests/regression/result.c create mode 100644 ceph/src/zstd/tests/regression/result.h create mode 100644 ceph/src/zstd/tests/regression/results.csv create mode 100644 ceph/src/zstd/tests/regression/test.c create mode 100644 ceph/src/zstd/tests/seqgen.c create mode 100644 ceph/src/zstd/tests/seqgen.h delete mode 100644 ceph/src/zstd/tests/symbols.c create mode 100755 ceph/src/zstd/tests/test-license.py diff --git a/ceph/.gitmodules b/ceph/.gitmodules index fc961d5fb..08ed451fd 100644 --- a/ceph/.gitmodules +++ b/ceph/.gitmodules @@ -58,3 +58,6 @@ [submodule "src/c-ares"] path = src/c-ares url = https://github.com/ceph/c-ares.git +[submodule "src/spawn"] + path = src/spawn + url = https://github.com/ceph/spawn.git diff --git a/ceph/CMakeLists.txt b/ceph/CMakeLists.txt index 6afd9c3db..979ccc1b5 100644 --- a/ceph/CMakeLists.txt +++ b/ceph/CMakeLists.txt @@ -1,7 +1,7 @@ cmake_minimum_required(VERSION 3.5.1) project(ceph CXX C ASM) -set(VERSION 14.2.20) +set(VERSION 14.2.22) if(POLICY CMP0028) cmake_policy(SET CMP0028 NEW) @@ -123,6 +123,7 @@ cmake_pop_check_state() CHECK_FUNCTION_EXISTS(eventfd HAVE_EVENTFD) CHECK_FUNCTION_EXISTS(getprogname HAVE_GETPROGNAME) +CHECK_FUNCTION_EXISTS(gettid HAVE_GETTID) CHECK_INCLUDE_FILES("linux/types.h" HAVE_LINUX_TYPES_H) CHECK_INCLUDE_FILES("linux/version.h" HAVE_LINUX_VERSION_H) diff --git a/ceph/PendingReleaseNotes b/ceph/PendingReleaseNotes index fff5c5d6d..8a1fdb575 100644 --- a/ceph/PendingReleaseNotes +++ b/ceph/PendingReleaseNotes @@ -1,6 +1,13 @@ -14.2.17 +14.2.19 ------- -* $pid expansion in config paths like `admin_socket` will now properly expand - to the daemon pid for commands like `ceph-mds` or `ceph-osd`. Previously only - `ceph-fuse`/`rbd-nbd` expanded `$pid` with the actual daemon pid. +* OSD: the option ``osd_fast_shutdown_notify_mon`` has been introduced to allow + the OSD to notify the monitor it is shutting down even if ``osd_fast_shutdown`` + is enabled. This helps with the monitor logs on larger clusters, that may get + many 'osd.X reported immediately failed by osd.Y' messages, and confuse tools. + +14.2.18 +------- + +* This release fixes issues loading the dashboard and volumes manager + modules in some environments. diff --git a/ceph/alpine/APKBUILD b/ceph/alpine/APKBUILD index 0677a4ce9..6315f3d1e 100644 --- a/ceph/alpine/APKBUILD +++ b/ceph/alpine/APKBUILD @@ -1,7 +1,7 @@ # Contributor: John Coyle # Maintainer: John Coyle pkgname=ceph -pkgver=14.2.20 +pkgver=14.2.22 pkgrel=0 pkgdesc="Ceph is a distributed object store and file system" pkgusers="ceph" @@ -64,7 +64,7 @@ makedepends=" xmlstarlet yasm " -source="ceph-14.2.20.tar.bz2" +source="ceph-14.2.22.tar.bz2" subpackages=" $pkgname-base $pkgname-common @@ -117,7 +117,7 @@ _sysconfdir=/etc _udevrulesdir=/etc/udev/rules.d _python_sitelib=/usr/lib/python2.7/site-packages -builddir=$srcdir/ceph-14.2.20 +builddir=$srcdir/ceph-14.2.22 build() { export CEPH_BUILD_VIRTUALENV=$builddir diff --git a/ceph/ceph.spec b/ceph/ceph.spec index 1a7e215eb..27422726c 100644 --- a/ceph/ceph.spec +++ b/ceph/ceph.spec @@ -23,7 +23,7 @@ ################################################################################# %bcond_with make_check %bcond_without ceph_test_package -%ifarch s390 s390x +%ifarch s390 %bcond_with tcmalloc %else %bcond_without tcmalloc @@ -109,7 +109,7 @@ # main package definition ################################################################################# Name: ceph -Version: 14.2.20 +Version: 14.2.22 Release: 0%{?dist} %if 0%{?fedora} || 0%{?rhel} Epoch: 2 @@ -125,7 +125,7 @@ License: LGPL-2.1 and CC-BY-SA-3.0 and GPL-2.0 and BSL-1.0 and BSD-3-Clause and Group: System/Filesystems %endif URL: http://ceph.com/ -Source0: %{?_remote_tarball_prefix}ceph-14.2.20.tar.bz2 +Source0: %{?_remote_tarball_prefix}ceph-14.2.22.tar.bz2 %if 0%{?suse_version} # _insert_obs_source_lines_here ExclusiveArch: x86_64 aarch64 ppc64le s390x @@ -293,6 +293,7 @@ BuildRequires: pyOpenSSL%{_python_buildid} %else BuildRequires: python%{_python_buildid}-pyOpenSSL %endif +BuildRequires: golang-github-prometheus BuildRequires: libtool-ltdl-devel BuildRequires: python%{_python_buildid}-cherrypy BuildRequires: python%{_python_buildid}-jwt @@ -306,6 +307,7 @@ BuildRequires: xmlsec1-openssl BuildRequires: xmlsec1-openssl-devel %endif %if 0%{?suse_version} +BuildRequires: golang-github-prometheus-prometheus BuildRequires: libxmlsec1-1 BuildRequires: libxmlsec1-nss1 BuildRequires: libxmlsec1-openssl1 @@ -659,6 +661,9 @@ Requires: librgw2 = %{_epoch_prefix}%{version}-%{release} %if 0%{?rhel} || 0%{?fedora} Requires: mailcap %endif +%if 0%{?weak_deps} +Recommends: gawk +%endif %description radosgw RADOS is a distributed object store used by the Ceph distributed storage system. This package provides a REST gateway to the @@ -1142,7 +1147,7 @@ This package provides Ceph’s default alerts for Prometheus. # common ################################################################################# %prep -%autosetup -p1 -n ceph-14.2.20 +%autosetup -p1 -n ceph-14.2.22 %build # LTO can be enabled as soon as the following GCC bug is fixed: @@ -1870,6 +1875,8 @@ fi %{_bindir}/radosgw-token %{_bindir}/radosgw-es %{_bindir}/radosgw-object-expirer +%{_bindir}/rgw-gap-list +%{_bindir}/rgw-gap-list-comparator %{_bindir}/rgw-orphan-list %{_mandir}/man8/radosgw.8* %dir %{_localstatedir}/lib/ceph/radosgw diff --git a/ceph/ceph.spec.in b/ceph/ceph.spec.in index 7c71ea026..0c94ee897 100644 --- a/ceph/ceph.spec.in +++ b/ceph/ceph.spec.in @@ -23,7 +23,7 @@ ################################################################################# %bcond_with make_check %bcond_without ceph_test_package -%ifarch s390 s390x +%ifarch s390 %bcond_with tcmalloc %else %bcond_without tcmalloc @@ -293,6 +293,7 @@ BuildRequires: pyOpenSSL%{_python_buildid} %else BuildRequires: python%{_python_buildid}-pyOpenSSL %endif +BuildRequires: golang-github-prometheus BuildRequires: libtool-ltdl-devel BuildRequires: python%{_python_buildid}-cherrypy BuildRequires: python%{_python_buildid}-jwt @@ -306,6 +307,7 @@ BuildRequires: xmlsec1-openssl BuildRequires: xmlsec1-openssl-devel %endif %if 0%{?suse_version} +BuildRequires: golang-github-prometheus-prometheus BuildRequires: libxmlsec1-1 BuildRequires: libxmlsec1-nss1 BuildRequires: libxmlsec1-openssl1 @@ -659,6 +661,9 @@ Requires: librgw2 = %{_epoch_prefix}%{version}-%{release} %if 0%{?rhel} || 0%{?fedora} Requires: mailcap %endif +%if 0%{?weak_deps} +Recommends: gawk +%endif %description radosgw RADOS is a distributed object store used by the Ceph distributed storage system. This package provides a REST gateway to the @@ -1870,6 +1875,8 @@ fi %{_bindir}/radosgw-token %{_bindir}/radosgw-es %{_bindir}/radosgw-object-expirer +%{_bindir}/rgw-gap-list +%{_bindir}/rgw-gap-list-comparator %{_bindir}/rgw-orphan-list %{_mandir}/man8/radosgw.8* %dir %{_localstatedir}/lib/ceph/radosgw diff --git a/ceph/changelog.upstream b/ceph/changelog.upstream index 0c0cdd3a8..87503e2eb 100644 --- a/ceph/changelog.upstream +++ b/ceph/changelog.upstream @@ -1,7 +1,19 @@ -ceph (14.2.20-1xenial) xenial; urgency=medium +ceph (14.2.22-1xenial) xenial; urgency=medium - -- Jenkins Build Slave User Mon, 19 Apr 2021 10:22:46 -0400 + -- Jenkins Build Slave User Tue, 29 Jun 2021 22:18:42 +0000 + +ceph (14.2.22-1) stable; urgency=medium + + * New upstream release + + -- Ceph Release Team Tue, 29 Jun 2021 22:09:07 +0000 + +ceph (14.2.21-1) stable; urgency=medium + + * New upstream release + + -- Ceph Release Team Thu, 13 May 2021 17:23:05 +0000 ceph (14.2.20-1) stable; urgency=medium diff --git a/ceph/cmake/modules/BuildBoost.cmake b/ceph/cmake/modules/BuildBoost.cmake index f99d74a0b..eaab33141 100644 --- a/ceph/cmake/modules/BuildBoost.cmake +++ b/ceph/cmake/modules/BuildBoost.cmake @@ -230,6 +230,7 @@ macro(build_boost version) INTERFACE_LINK_LIBRARIES "${dependencies}") unset(dependencies) endif() + set(Boost_${c}_FOUND "TRUE") endforeach() # for header-only libraries diff --git a/ceph/cmake/modules/BuildZstd.cmake b/ceph/cmake/modules/BuildZstd.cmake new file mode 100644 index 000000000..799b14b28 --- /dev/null +++ b/ceph/cmake/modules/BuildZstd.cmake @@ -0,0 +1,22 @@ +# libzstd - build it statically +function(build_Zstd) + set(ZSTD_C_FLAGS "-fPIC -Wno-unused-variable -O3") + + include(ExternalProject) + ExternalProject_Add(zstd_ext + SOURCE_DIR ${CMAKE_SOURCE_DIR}/src/zstd/build/cmake + CMAKE_ARGS -DCMAKE_CXX_COMPILER=${CMAKE_CXX_COMPILER} + -DCMAKE_C_COMPILER=${CMAKE_C_COMPILER} + -DCMAKE_C_FLAGS=${ZSTD_C_FLAGS} + -DCMAKE_AR=${CMAKE_AR} + -DCMAKE_POSITION_INDEPENDENT_CODE=${ENABLE_SHARED} + BINARY_DIR ${CMAKE_CURRENT_BINARY_DIR}/libzstd + BUILD_COMMAND ${CMAKE_COMMAND} --build --target libzstd_static + BUILD_BYPRODUCTS "${CMAKE_CURRENT_BINARY_DIR}/libzstd/lib/libzstd.a" + INSTALL_COMMAND "") + add_library(Zstd::Zstd STATIC IMPORTED) + set_target_properties(Zstd::Zstd PROPERTIES + INTERFACE_INCLUDE_DIRECTORIES "${CMAKE_SOURCE_DIR}/src/zstd/lib" + IMPORTED_LOCATION "${CMAKE_CURRENT_BINARY_DIR}/libzstd/lib/libzstd.a") + add_dependencies(Zstd::Zstd zstd_ext) +endfunction() diff --git a/ceph/cmake/modules/FindZstd.cmake b/ceph/cmake/modules/FindZstd.cmake new file mode 100644 index 000000000..44d2dc3d8 --- /dev/null +++ b/ceph/cmake/modules/FindZstd.cmake @@ -0,0 +1,51 @@ +# Try to find liblz4 +# +# Once done, this will define +# +# Zstd_FOUND +# Zstd_INCLUDE_DIRS +# Zstd_LIBRARIES +# Zstd_VERSION_STRING +# Zstd_VERSION_MAJOR +# Zstd_VERSION_MINOR +# Zstd_VERSION_RELEASE + +find_path(Zstd_INCLUDE_DIR + NAMES zstd.h + HINTS ${Zstd_ROOT_DIR}/include) + +if(Zstd_INCLUDE_DIR AND EXISTS "${Zstd_INCLUDE_DIR}/zstd.h") + foreach(ver "MAJOR" "MINOR" "RELEASE") + file(STRINGS "${Zstd_INCLUDE_DIR}/zstd.h" Zstd_VER_${ver}_LINE + REGEX "^#define[ \t]+ZSTD_VERSION_${ver}[ \t]+[0-9]+$") + string(REGEX REPLACE "^#define[ \t]+ZSTD_VERSION_${ver}[ \t]+([0-9]+)$" + "\\1" Zstd_VERSION_${ver} "${Zstd_VER_${ver}_LINE}") + unset(${Zstd_VER_${ver}_LINE}) + endforeach() + set(Zstd_VERSION_STRING + "${Zstd_VERSION_MAJOR}.${Zstd_VERSION_MINOR}.${Zstd_VERSION_RELEASE}") +endif() + +find_library(Zstd_LIBRARY + NAMES "${CMAKE_STATIC_LIBRARY_PREFIX}zstd.${CMAKE_STATIC_LIBRARY_SUFFIX}" zstd + HINTS ${Zstd_ROOT_DIR}/lib) + +include(FindPackageHandleStandardArgs) +find_package_handle_standard_args(Zstd + REQUIRED_VARS Zstd_LIBRARY Zstd_INCLUDE_DIR + VERSION_VAR Zstd_VERSION_STRING) + +mark_as_advanced( + Zstd_LIBRARY + Zstd_INCLUDE_DIR) + +if(Zstd_FOUND AND NOT (TARGET Zstd::Zstd)) + set(Zstd_INCLUDE_DIRS ${Zstd_INCLUDE_DIR}) + set(Zstd_LIBRARIES ${Zstd_LIBRARY}) + add_library (Zstd::Zstd UNKNOWN IMPORTED) + set_target_properties(Zstd::Zstd PROPERTIES + INTERFACE_INCLUDE_DIRECTORIES ${Zstd_INCLUDE_DIR} + IMPORTED_LINK_INTERFACE_LANGUAGES "C" + IMPORTED_LOCATION ${Zstd_LIBRARY} + VERSION "${Zstd_VERSION_STRING}") +endif() diff --git a/ceph/debian/ceph-common.postinst b/ceph/debian/ceph-common.postinst index b9d382aaa..0abcf4f82 100644 --- a/ceph/debian/ceph-common.postinst +++ b/ceph/debian/ceph-common.postinst @@ -78,7 +78,9 @@ case "$1" in fi if ! dpkg-statoverride --list /var/log/ceph >/dev/null then - chown -R $SERVER_USER:$SERVER_GROUP /var/log/ceph + # take care not to touch cephadm log subdirs + chown $SERVER_USER:$SERVER_GROUP /var/log/ceph + chown $SERVER_USER:$SERVER_GROUP /var/log/ceph/*.log* || true # members of group ceph can log here, but cannot remove # others' files. non-members cannot read any logs. chmod u=rwx,g=rwxs,o=t /var/log/ceph diff --git a/ceph/debian/control b/ceph/debian/control index fdcfee29c..ecbd0dcb9 100644 --- a/ceph/debian/control +++ b/ceph/debian/control @@ -51,30 +51,32 @@ Build-Depends: cmake (>= 3.5), libxml2-dev, librabbitmq-dev, librdkafka-dev, -# Make-Check libxmlsec1 -# Make-Check libxmlsec1-nss -# Make-Check libxmlsec1-openssl -# Make-Check libxmlsec1-dev +# Make-Check libzstd-dev, +# Make-Check libxmlsec1, +# Make-Check libxmlsec1-nss, +# Make-Check libxmlsec1-openssl, +# Make-Check libxmlsec1-dev, lsb-release, parted, patch, pkg-config, python (>= 2.7), python-all-dev, - python-cherrypy3, -# Make-Check python-jwt, -# Make-Check python-nose, -# Make-Check python-pecan, -# Make-Check python-bcrypt, -# Make-Check python-six, -# Make-Check python-tox, -# Make-Check python-coverage, -# Make-Check python-openssl, -# Make-Check python-prettytable, -# Make-Check python-requests, + python-cherrypy3 | python3-cherrypy3, +# Make-Check prometheus, +# Make-Check python3-jwt, +# Make-Check python3-nose, +# Make-Check python3-pecan, +# Make-Check python3-bcrypt, +# Make-Check python3-six, +# Make-Check tox, +# Make-Check python3-coverage, +# Make-Check python3-openssl, +# Make-Check python3-prettytable, +# Make-Check python3-requests, python-setuptools, - python-sphinx, -# Make-Check python-werkzeug, + python3-sphinx, +# Make-Check python3-werkzeug, python3-all-dev, python3-setuptools, # Make-Check socat, @@ -827,6 +829,7 @@ Depends: ceph-common (= ${binary:Version}), mime-support, ${misc:Depends}, ${shlibs:Depends}, +Suggests: gawk, Recommends: ntp | time-daemon, Description: REST gateway for RADOS distributed object store RADOS is a distributed object store used by the Ceph distributed diff --git a/ceph/debian/radosgw.install b/ceph/debian/radosgw.install index 9bb764937..40940c0e1 100644 --- a/ceph/debian/radosgw.install +++ b/ceph/debian/radosgw.install @@ -4,6 +4,8 @@ usr/bin/radosgw usr/bin/radosgw-es usr/bin/radosgw-object-expirer usr/bin/radosgw-token +usr/bin/rgw-gap-list +usr/bin/rgw-gap-list-comparator usr/bin/rgw-orphan-list usr/share/man/man8/ceph-diff-sorted.8 usr/share/man/man8/radosgw.8 diff --git a/ceph/do_cmake.sh b/ceph/do_cmake.sh index ab8f5c8c9..ac60a5e88 100755 --- a/ceph/do_cmake.sh +++ b/ceph/do_cmake.sh @@ -6,7 +6,7 @@ if test -e build; then exit 1 fi -PYBUILD="2" +PYBUILD="3" if [ -r /etc/os-release ]; then source /etc/os-release case "$ID" in diff --git a/ceph/doc/man/8/ceph-volume.rst b/ceph/doc/man/8/ceph-volume.rst index 724657a9d..055571d68 100644 --- a/ceph/doc/man/8/ceph-volume.rst +++ b/ceph/doc/man/8/ceph-volume.rst @@ -15,7 +15,7 @@ Synopsis | **ceph-volume** **inventory** | **ceph-volume** **lvm** [ *trigger* | *create* | *activate* | *prepare* -| *zap* | *list* | *batch*] +| *zap* | *list* | *batch* | *new-wal* | *new-db* | *migrate* ] | **ceph-volume** **simple** [ *trigger* | *scan* | *activate* ] @@ -243,6 +243,71 @@ Positional arguments: ``/path/to/sda1`` or ``/path/to/sda`` for regular devices. +**new-wal** +Attaches the given logical volume to OSD as a WAL. Logical volume +name format is vg/lv. Fails if OSD has already got attached WAL. + +Usage:: + + ceph-volume lvm new-wal --osd-id OSD_ID --osd-fsid OSD_FSID --target TARGET_LV + +Optional arguments: + +* [-h, --help] show the help message and exit + +Required arguments: + +* --osd-id OSD_ID OSD id to attach new WAL to +* --osd-fsid OSD_FSID OSD fsid to attach new WAL to +* --target TARGET_LV logical volume name to attach as WAL + + +**new-db** +Attaches the given logical volume to OSD as a DB. Logical volume +name format is vg/lv. Fails if OSD has already got attached DB. + +Usage:: + + ceph-volume lvm new-db --osd-id OSD_ID --osd-fsid OSD_FSID --target + +Optional arguments: + +* [-h, --help] show the help message and exit + +Required arguments: + +* --osd-id OSD_ID OSD id to attach new DB to +* --osd-fsid OSD_FSID OSD fsid to attach new DB to +* --target TARGET_LV logical volume name to attach as DB + +**migrate** + +Moves BlueFS data from source volume(s) to the target one, source volumes +(except the main, i.e. data or block one) are removed on success. LVM volumes +are permitted for Target only, both already attached or new one. In the latter +case it is attached to the OSD replacing one of the source devices. Following +replacement rules apply (in the order of precedence, stop on the first match): + + - if source list has DB volume - target device replaces it. + - if source list has WAL volume - target device replace it. + - if source list has slow volume only - operation is not permitted, + requires explicit allocation via new-db/new-wal command. + +Usage:: + + ceph-volume lvm migrate --osd-id OSD_ID --osd-fsid OSD_FSID --target TARGET_LV --from {data|db|wal} [{data|db|wal} ...] + +Optional arguments: + +* [-h, --help] show the help message and exit + +Required arguments: + +* --osd-id OSD_ID OSD id to perform migration at +* --osd-fsid OSD_FSID OSD fsid to perform migration at +* --target TARGET_LV logical volume to move data to +* --from TYPE_LIST list of source device type names, e.g. --from db wal + simple ------ diff --git a/ceph/doc/man/8/ceph.rst b/ceph/doc/man/8/ceph.rst index 847abb7a7..7a1352943 100644 --- a/ceph/doc/man/8/ceph.rst +++ b/ceph/doc/man/8/ceph.rst @@ -904,11 +904,16 @@ data should remain readable and writeable, although data redundancy may be reduced as some PGs may end up in a degraded (but active) state. It will return a success code if it is okay to stop the OSD(s), or an error code and informative message if it is not or if no -conclusion can be drawn at the current time. +conclusion can be drawn at the current time. When ``--max `` is +provided, up to OSDs IDs will return (including the provided +OSDs) that can all be stopped simultaneously. This allows larger sets +of stoppable OSDs to be generated easily by providing a single +starting OSD and a max. Additional OSDs are drawn from adjacent locations +in the CRUSH hierarchy. Usage:: - ceph osd ok-to-stop [...] + ceph osd ok-to-stop [...] [--max ] Subcommand ``pause`` pauses osd. diff --git a/ceph/doc/rados/configuration/mon-config-ref.rst b/ceph/doc/rados/configuration/mon-config-ref.rst index 727734d0f..da6743cd3 100644 --- a/ceph/doc/rados/configuration/mon-config-ref.rst +++ b/ceph/doc/rados/configuration/mon-config-ref.rst @@ -791,6 +791,14 @@ Trimming requires that the placement groups are ``active + clean``. :Default: 500 +``paxos service trim max multiplier`` + +:Description: The factor by which paxos service trim max will be multiplied + to get a new upper bound when trim sizes are high (0 disables it) +:Type: Integer +:Default: ``20`` + + ``mon max log epochs`` :Description: The maximum amount of log epochs to trim during a single proposal diff --git a/ceph/doc/rados/configuration/msgr2.rst b/ceph/doc/rados/configuration/msgr2.rst index cea412e73..6470abe68 100644 --- a/ceph/doc/rados/configuration/msgr2.rst +++ b/ceph/doc/rados/configuration/msgr2.rst @@ -88,7 +88,7 @@ Similarly, two options control whether IPv4 and IPv6 addresses are used: * ``ms_bind_ipv6`` [default: false] controls whether a daemon binds to an IPv6 address -.. note: The ability to bind to multiple ports has paved the way for +.. note:: The ability to bind to multiple ports has paved the way for dual-stack IPv4 and IPv6 support. That said, dual-stack support is not yet tested as of Nautilus v14.2.0 and likely needs some additional code changes to work correctly. diff --git a/ceph/doc/rados/configuration/network-config-ref.rst b/ceph/doc/rados/configuration/network-config-ref.rst index 5a719e3cc..c027ad40d 100644 --- a/ceph/doc/rados/configuration/network-config-ref.rst +++ b/ceph/doc/rados/configuration/network-config-ref.rst @@ -201,6 +201,27 @@ following option to the ``[global]`` section of your Ceph configuration file. We prefer that the cluster network is **NOT** reachable from the public network or the Internet for added security. +IPv4/IPv6 Dual Stack Mode +------------------------- + +If you want to run in an IPv4/IPv6 dual stack mode and want to define your public and/or +cluster networks, then you need to specify both your IPv4 and IPv6 networks for each: + +.. code-block:: ini + + [global] + # ... elided configuration + public network = {IPv4 public-network/netmask}, {IPv6 public-network/netmask} + +This is so ceph can find a valid IP address for both address families. + +If you want just an IPv4 or an IPv6 stack environment, then make sure you set the `ms bind` +options correctly. + +.. note:: + Binding to IPv4 is enabled by default, so if you just add the option to bind to IPv6 + you'll actually put yourself into dual stack mode. If you want just IPv6, then disable IPv4 and + enable IPv6. See `Bind`_ below. Ceph Daemons ============ @@ -336,11 +357,16 @@ addresses. :Default: ``7300`` :Required: No. +``ms bind ipv4`` + +:Description: Enables Ceph daemons to bind to IPv4 addresses. +:Type: Boolean +:Default: ``true`` +:Required: No ``ms bind ipv6`` -:Description: Enables Ceph daemons to bind to IPv6 addresses. Currently the - messenger *either* uses IPv4 or IPv6, but it cannot do both. +:Description: Enables Ceph daemons to bind to IPv6 addresses. :Type: Boolean :Default: ``false`` :Required: No diff --git a/ceph/install-deps.sh b/ceph/install-deps.sh index 4f64ee176..756316689 100755 --- a/ceph/install-deps.sh +++ b/ceph/install-deps.sh @@ -147,13 +147,21 @@ function install_pkg_on_ubuntu { } function install_boost_on_ubuntu { - local codename=$1 - if dpkg -s ceph-libboost1.67-dev &> /dev/null; then - $SUDO env DEBIAN_FRONTEND=noninteractive apt-get -y remove 'ceph-libboost.*1.67.*' - $SUDO rm /etc/apt/sources.list.d/ceph-libboost1.67.list - fi - local project=libboost local ver=1.72 + local installed_ver=$(apt -qq list --installed ceph-libboost*-dev 2>/dev/null | + grep -e 'libboost[0-9].[0-9]\+-dev' | + cut -d' ' -f2 | + cut -d'.' -f1,2) + if test -n "$installed_ver"; then + if echo "$installed_ver" | grep -q "^$ver"; then + return + else + $SUDO env DEBIAN_FRONTEND=noninteractive apt-get -y remove "ceph-libboost.*${installed_ver}.*" + $SUDO rm -f /etc/apt/sources.list.d/ceph-libboost${installed_ver}.list + fi + fi + local codename=$1 + local project=libboost local sha1=1d7c7a00cc3f37e340bae0360191a757b44ec80c install_pkg_on_ubuntu \ $project \ diff --git a/ceph/make-dist b/ceph/make-dist index b5dc4f22b..aed683178 100755 --- a/ceph/make-dist +++ b/ceph/make-dist @@ -1,7 +1,21 @@ #!/bin/sh -e +SCRIPTNAME="$(basename "${0}")" +BASEDIR="$(readlink -f "$(dirname "${0}")")" + if [ ! -d .git ]; then - echo "no .git present. run this from the base dir of the git checkout." + echo "$SCRIPTNAME: Full path to the script: $BASEDIR/$SCRIPTNAME" + echo "$SCRIPTNAME: No .git present. Run this from the base dir of the git checkout." + exit 1 +fi + +# Running the script from a directory containing a colon anywhere in the path +# will expose us to the dreaded "[BUG] npm run [command] failed if the directory +# path contains colon" bug https://github.com/npm/cli/issues/633 +# (see https://tracker.ceph.com/issues/39556 for details) +if [[ "$BASEDIR" == *:* ]] ; then + echo "$SCRIPTNAME: Full path to the script: $BASEDIR/$SCRIPTNAME" + echo "$SCRIPTNAME: The path to the script contains a colon. Their presence has been known to break the script." exit 1 fi @@ -67,7 +81,7 @@ build_dashboard_frontend() { $CURR_DIR/src/tools/setup-virtualenv.sh $TEMP_DIR $TEMP_DIR/bin/pip install nodeenv - $TEMP_DIR/bin/nodeenv -p --node=10.13.0 + $TEMP_DIR/bin/nodeenv --verbose -p --node=10.13.0 cd src/pybind/mgr/dashboard/frontend DEFAULT_LANG=`jq -r .config.locale package.json` diff --git a/ceph/monitoring/grafana/dashboards/host-details.json b/ceph/monitoring/grafana/dashboards/host-details.json index d02c40694..7bd319da7 100644 --- a/ceph/monitoring/grafana/dashboards/host-details.json +++ b/ceph/monitoring/grafana/dashboards/host-details.json @@ -37,7 +37,7 @@ "gnetId": null, "graphTooltip": 0, "id": null, - "iteration": 1557386759572, + "iteration": 1615564911000, "links": [], "panels": [ { @@ -182,7 +182,7 @@ "steppedLine": false, "targets": [ { - "expr": "sum by (mode) (\n irate(node_cpu{instance=~\"($ceph_hosts).*\", mode=~\"(irq|nice|softirq|steal|system|user|iowait)\"}[1m]) or\n irate(node_cpu_seconds_total{instance=~\"($ceph_hosts).*\", mode=~\"(irq|nice|softirq|steal|system|user|iowait)\"}[1m])\n) / scalar(\n sum(irate(node_cpu{instance=~\"($ceph_hosts).*\"}[1m]) or\n irate(node_cpu_seconds_total{instance=~\"($ceph_hosts).*\"}[1m]))\n) * 100", + "expr": "sum by (mode) (\n irate(node_cpu{instance=~\"($ceph_hosts)([\\\\.:].*)?\", mode=~\"(irq|nice|softirq|steal|system|user|iowait)\"}[1m]) or\n irate(node_cpu_seconds_total{instance=~\"($ceph_hosts)([\\\\.:].*)?\", mode=~\"(irq|nice|softirq|steal|system|user|iowait)\"}[1m])\n) / scalar(\n sum(irate(node_cpu{instance=~\"($ceph_hosts)([\\\\.:].*)?\"}[1m]) or\n irate(node_cpu_seconds_total{instance=~\"($ceph_hosts)([\\\\.:].*)?\"}[1m]))\n) * 100", "format": "time_series", "intervalFactor": 2, "legendFormat": "{{mode}}", @@ -283,14 +283,14 @@ "steppedLine": false, "targets": [ { - "expr": "(node_memory_MemTotal{instance=~\"[[ceph_hosts]].*\"} or node_memory_MemTotal_bytes{instance=~\"[[ceph_hosts]].*\"})- (\n (node_memory_MemFree{instance=~\"[[ceph_hosts]].*\"} or node_memory_MemFree_bytes{instance=~\"[[ceph_hosts]].*\"}) + \n (node_memory_Cached{instance=~\"[[ceph_hosts]].*\"} or node_memory_Cached_bytes{instance=~\"[[ceph_hosts]].*\"}) + \n (node_memory_Buffers{instance=~\"[[ceph_hosts]].*\"} or node_memory_Buffers_bytes{instance=~\"[[ceph_hosts]].*\"}) +\n (node_memory_Slab{instance=~\"[[ceph_hosts]].*\"} or node_memory_Slab_bytes{instance=~\"[[ceph_hosts]].*\"})\n )\n \n", + "expr": "(node_memory_MemTotal{instance=~\"[[ceph_hosts]]([\\\\.:].*)?\"} or node_memory_MemTotal_bytes{instance=~\"[[ceph_hosts]]([\\\\.:].*)?\"})- (\n (node_memory_MemFree{instance=~\"[[ceph_hosts]]([\\\\.:].*)?\"} or node_memory_MemFree_bytes{instance=~\"[[ceph_hosts]]([\\\\.:].*)?\"}) + \n (node_memory_Cached{instance=~\"[[ceph_hosts]]([\\\\.:].*)?\"} or node_memory_Cached_bytes{instance=~\"[[ceph_hosts]]([\\\\.:].*)?\"}) + \n (node_memory_Buffers{instance=~\"[[ceph_hosts]]([\\\\.:].*)?\"} or node_memory_Buffers_bytes{instance=~\"[[ceph_hosts]]([\\\\.:].*)?\"}) +\n (node_memory_Slab{instance=~\"[[ceph_hosts]]([\\\\.:].*)?\"} or node_memory_Slab_bytes{instance=~\"[[ceph_hosts]]([\\\\.:].*)?\"})\n )\n \n", "format": "time_series", "intervalFactor": 1, "legendFormat": "used", "refId": "D" }, { - "expr": "node_memory_MemFree{instance=~\"[[ceph_hosts]].*\"} or node_memory_MemFree_bytes{instance=~\"[[ceph_hosts]].*\"} ", + "expr": "node_memory_MemFree{instance=~\"[[ceph_hosts]]([\\\\.:].*)?\"} or node_memory_MemFree_bytes{instance=~\"[[ceph_hosts]]([\\\\.:].*)?\"} ", "format": "time_series", "hide": false, "intervalFactor": 1, @@ -298,7 +298,7 @@ "refId": "A" }, { - "expr": "(node_memory_Cached{instance=~\"[[ceph_hosts]].*\"} or node_memory_Cached_bytes{instance=~\"[[ceph_hosts]].*\"}) + \n(node_memory_Buffers{instance=~\"[[ceph_hosts]].*\"} or node_memory_Buffers_bytes{instance=~\"[[ceph_hosts]].*\"}) +\n(node_memory_Slab{instance=~\"[[ceph_hosts]].*\"} or node_memory_Slab_bytes{instance=~\"[[ceph_hosts]].*\"}) \n", + "expr": "(node_memory_Cached{instance=~\"[[ceph_hosts]]([\\\\.:].*)?\"} or node_memory_Cached_bytes{instance=~\"[[ceph_hosts]]([\\\\.:].*)?\"}) + \n(node_memory_Buffers{instance=~\"[[ceph_hosts]]([\\\\.:].*)?\"} or node_memory_Buffers_bytes{instance=~\"[[ceph_hosts]]([\\\\.:].*)?\"}) +\n(node_memory_Slab{instance=~\"[[ceph_hosts]]([\\\\.:].*)?\"} or node_memory_Slab_bytes{instance=~\"[[ceph_hosts]]([\\\\.:].*)?\"}) \n", "format": "time_series", "hide": false, "intervalFactor": 1, @@ -306,7 +306,7 @@ "refId": "C" }, { - "expr": "node_memory_MemTotal{instance=~\"[[ceph_hosts]].*\"} or node_memory_MemTotal_bytes{instance=~\"[[ceph_hosts]].*\"} ", + "expr": "node_memory_MemTotal{instance=~\"[[ceph_hosts]]([\\\\.:].*)?\"} or node_memory_MemTotal_bytes{instance=~\"[[ceph_hosts]]([\\\\.:].*)?\"} ", "format": "time_series", "hide": false, "intervalFactor": 1, @@ -401,7 +401,7 @@ "steppedLine": false, "targets": [ { - "expr": "sum by (device) (\n irate(node_network_receive_bytes{instance=~\"($ceph_hosts).*\",device!=\"lo\"}[1m]) or \n irate(node_network_receive_bytes_total{instance=~\"($ceph_hosts).*\",device!=\"lo\"}[1m])\n)", + "expr": "sum by (device) (\n irate(node_network_receive_bytes{instance=~\"($ceph_hosts)([\\\\.:].*)?\",device!=\"lo\"}[1m]) or \n irate(node_network_receive_bytes_total{instance=~\"($ceph_hosts)([\\\\.:].*)?\",device!=\"lo\"}[1m])\n)", "format": "time_series", "intervalFactor": 1, "legendFormat": "{{device}}.rx", @@ -410,7 +410,7 @@ "textEditor": true }, { - "expr": "sum by (device) (\n irate(node_network_transmit_bytes{instance=~\"($ceph_hosts).*\",device!=\"lo\"}[1m]) or\n irate(node_network_transmit_bytes_total{instance=~\"($ceph_hosts).*\",device!=\"lo\"}[1m])\n)", + "expr": "sum by (device) (\n irate(node_network_transmit_bytes{instance=~\"($ceph_hosts)([\\\\.:].*)?\",device!=\"lo\"}[1m]) or\n irate(node_network_transmit_bytes_total{instance=~\"($ceph_hosts)([\\\\.:].*)?\",device!=\"lo\"}[1m])\n)", "format": "time_series", "intervalFactor": 1, "legendFormat": "{{device}}.tx", @@ -501,7 +501,7 @@ "steppedLine": false, "targets": [ { - "expr": "irate(node_network_receive_drop{instance=~\"[[ceph_hosts]].*\"}[1m]) or irate(node_network_receive_drop_total{instance=~\"[[ceph_hosts]].*\"}[1m])", + "expr": "irate(node_network_receive_drop{instance=~\"[[ceph_hosts]]([\\\\.:].*)?\"}[1m]) or irate(node_network_receive_drop_total{instance=~\"[[ceph_hosts]]([\\\\.:].*)?\"}[1m])", "format": "time_series", "instant": false, "intervalFactor": 1, @@ -509,7 +509,7 @@ "refId": "A" }, { - "expr": "irate(node_network_transmit_drop{instance=~\"[[ceph_hosts]].*\"}[1m]) or irate(node_network_transmit_drop_total{instance=~\"[[ceph_hosts]].*\"}[1m])", + "expr": "irate(node_network_transmit_drop{instance=~\"[[ceph_hosts]]([\\\\.:].*)?\"}[1m]) or irate(node_network_transmit_drop_total{instance=~\"[[ceph_hosts]]([\\\\.:].*)?\"}[1m])", "format": "time_series", "intervalFactor": 1, "legendFormat": "{{device}}.tx", @@ -621,7 +621,7 @@ "tableColumn": "", "targets": [ { - "expr": "sum(ceph_osd_stat_bytes and on (ceph_daemon) ceph_disk_occupation{instance=~\"($ceph_hosts).*\"})", + "expr": "sum(ceph_osd_stat_bytes and on (ceph_daemon) ceph_disk_occupation{instance=~\"($ceph_hosts)([\\\\.:].*)?\"})", "format": "time_series", "intervalFactor": 2, "refId": "A", @@ -685,7 +685,7 @@ "steppedLine": false, "targets": [ { - "expr": "irate(node_network_receive_errs{instance=~\"[[ceph_hosts]].*\"}[1m]) or irate(node_network_receive_errs_total{instance=~\"[[ceph_hosts]].*\"}[1m])", + "expr": "irate(node_network_receive_errs{instance=~\"[[ceph_hosts]]([\\\\.:].*)?\"}[1m]) or irate(node_network_receive_errs_total{instance=~\"[[ceph_hosts]]([\\\\.:].*)?\"}[1m])", "format": "time_series", "instant": false, "intervalFactor": 1, @@ -693,7 +693,7 @@ "refId": "A" }, { - "expr": "irate(node_network_transmit_errs{instance=~\"[[ceph_hosts]].*\"}[1m]) or irate(node_network_transmit_errs_total{instance=~\"[[ceph_hosts]].*\"}[1m])", + "expr": "irate(node_network_transmit_errs{instance=~\"[[ceph_hosts]]([\\\\.:].*)?\"}[1m]) or irate(node_network_transmit_errs_total{instance=~\"[[ceph_hosts]]([\\\\.:].*)?\"}[1m])", "format": "time_series", "intervalFactor": 1, "legendFormat": "{{device}}.tx", @@ -798,7 +798,7 @@ "steppedLine": false, "targets": [ { - "expr": "label_replace(\n (\n irate(node_disk_writes_completed{instance=~\"($ceph_hosts).*\"}[5m]) or\n irate(node_disk_writes_completed_total{instance=~\"($ceph_hosts).*\"}[5m])\n ),\n \"instance\",\n \"$1\",\n \"instance\",\n \"([^:.]*).*\"\n)\n* on(instance, device) group_left(ceph_daemon)\n label_replace(\n label_replace(\n ceph_disk_occupation,\n \"device\",\n \"$1\",\n \"device\",\n \"/dev/(.*)\"\n ),\n \"instance\",\n \"$1\",\n \"instance\",\n \"([^:.]*).*\"\n )", + "expr": "label_replace(\n (\n irate(node_disk_writes_completed{instance=~\"($ceph_hosts)([\\\\.:].*)?\"}[5m]) or\n irate(node_disk_writes_completed_total{instance=~\"($ceph_hosts)([\\\\.:].*)?\"}[5m])\n ),\n \"instance\",\n \"$1\",\n \"instance\",\n \"([^:.]*).*\"\n)\n* on(instance, device, ceph_daemon) group_left\n label_replace(\n label_replace(\n ceph_disk_occupation,\n \"device\",\n \"$1\",\n \"device\",\n \"/dev/(.*)\"\n ),\n \"instance\",\n \"$1\",\n \"instance\",\n \"([^:.]*).*\"\n )", "format": "time_series", "intervalFactor": 1, "legendFormat": "{{device}}({{ceph_daemon}}) writes", @@ -807,7 +807,7 @@ "textEditor": true }, { - "expr": "label_replace(\n (irate(node_disk_reads_completed{instance=~\"($ceph_hosts).*\"}[5m]) or irate(node_disk_reads_completed_total{instance=~\"($ceph_hosts).*\"}[5m])),\n \"instance\",\n \"$1\",\n \"instance\",\n \"([^:.]*).*\"\n)\n* on(instance, device) group_left(ceph_daemon)\n label_replace(\n label_replace(\n ceph_disk_occupation,\n \"device\",\n \"$1\",\n \"device\",\n \"/dev/(.*)\"\n ),\n \"instance\",\n \"$1\",\n \"instance\",\n \"([^:.]*).*\"\n )", + "expr": "label_replace(\n (irate(node_disk_reads_completed{instance=~\"($ceph_hosts)([\\\\.:].*)?\"}[5m]) or irate(node_disk_reads_completed_total{instance=~\"($ceph_hosts)([\\\\.:].*)?\"}[5m])),\n \"instance\",\n \"$1\",\n \"instance\",\n \"([^:.]*).*\"\n)\n* on(instance, device, ceph_daemon) group_left\n label_replace(\n label_replace(\n ceph_disk_occupation,\n \"device\",\n \"$1\",\n \"device\",\n \"/dev/(.*)\"\n ),\n \"instance\",\n \"$1\",\n \"instance\",\n \"([^:.]*).*\"\n )", "format": "time_series", "hide": false, "intervalFactor": 1, @@ -899,14 +899,14 @@ "steppedLine": false, "targets": [ { - "expr": "label_replace((irate(node_disk_bytes_written{instance=~\"($ceph_hosts).*\"}[5m]) or irate(node_disk_written_bytes_total{instance=~\"($ceph_hosts).*\"}[5m])), \"instance\", \"$1\", \"instance\", \"([^:.]*).*\") * on(instance, device) group_left(ceph_daemon) label_replace(label_replace(ceph_disk_occupation, \"device\", \"$1\", \"device\", \"/dev/(.*)\"), \"instance\", \"$1\", \"instance\", \"([^:.]*).*\")", + "expr": "label_replace((irate(node_disk_bytes_written{instance=~\"($ceph_hosts)([\\\\.:].*)?\"}[5m]) or irate(node_disk_written_bytes_total{instance=~\"($ceph_hosts)([\\\\.:].*)?\"}[5m])), \"instance\", \"$1\", \"instance\", \"([^:.]*).*\") * on(instance, device, ceph_daemon) group_left label_replace(label_replace(ceph_disk_occupation, \"device\", \"$1\", \"device\", \"/dev/(.*)\"), \"instance\", \"$1\", \"instance\", \"([^:.]*).*\")", "format": "time_series", "intervalFactor": 1, "legendFormat": "{{device}}({{ceph_daemon}}) write", "refId": "B" }, { - "expr": "label_replace((irate(node_disk_bytes_read{instance=~\"($ceph_hosts).*\"}[5m]) or irate(node_disk_read_bytes_total{instance=~\"($ceph_hosts).*\"}[5m])), \"instance\", \"$1\", \"instance\", \"([^:.]*).*\") * on(instance, device) group_left(ceph_daemon) label_replace(label_replace(ceph_disk_occupation, \"device\", \"$1\", \"device\", \"/dev/(.*)\"), \"instance\", \"$1\", \"instance\", \"([^:.]*).*\")", + "expr": "label_replace((irate(node_disk_bytes_read{instance=~\"($ceph_hosts)([\\\\.:].*)?\"}[5m]) or irate(node_disk_read_bytes_total{instance=~\"($ceph_hosts)([\\\\.:].*)?\"}[5m])), \"instance\", \"$1\", \"instance\", \"([^:.]*).*\") * on(instance, device, ceph_daemon) group_left label_replace(label_replace(ceph_disk_occupation, \"device\", \"$1\", \"device\", \"/dev/(.*)\"), \"instance\", \"$1\", \"instance\", \"([^:.]*).*\")", "format": "time_series", "intervalFactor": 1, "legendFormat": "{{device}}({{ceph_daemon}}) read", @@ -992,7 +992,7 @@ "steppedLine": false, "targets": [ { - "expr": "max by(instance,device) (label_replace((irate(node_disk_write_time_seconds_total{ instance=~\"($ceph_hosts).*\"}[5m]) ) / clamp_min(irate(node_disk_writes_completed_total{ instance=~\"($ceph_hosts).*\"}[5m]), 0.001) or (irate(node_disk_read_time_seconds_total{ instance=~\"($ceph_hosts).*\"}[5m]) ) / clamp_min(irate(node_disk_reads_completed_total{ instance=~\"($ceph_hosts).*\"}[5m]), 0.001), \"instance\", \"$1\", \"instance\", \"([^:.]*).*\")) * on(instance,device) group_left(ceph_daemon) label_replace(label_replace(ceph_disk_occupation{instance=~\"($ceph_hosts).*\"}, \"device\", \"$1\", \"device\", \"/dev/(.*)\"), \"instance\", \"$1\", \"instance\", \"([^:.]*).*\")", + "expr": "max by(instance,device) (label_replace((irate(node_disk_write_time_seconds_total{ instance=~\"($ceph_hosts)([\\\\.:].*)?\"}[5m]) ) / clamp_min(irate(node_disk_writes_completed_total{ instance=~\"($ceph_hosts)([\\\\.:].*)?\"}[5m]), 0.001) or (irate(node_disk_read_time_seconds_total{ instance=~\"($ceph_hosts)([\\\\.:].*)?\"}[5m]) ) / clamp_min(irate(node_disk_reads_completed_total{ instance=~\"($ceph_hosts)([\\\\.:].*)?\"}[5m]), 0.001), \"instance\", \"$1\", \"instance\", \"([^:.]*).*\")) * on(instance, device, ceph_daemon) group_left label_replace(label_replace(ceph_disk_occupation{instance=~\"($ceph_hosts)([\\\\.:].*)?\"}, \"device\", \"$1\", \"device\", \"/dev/(.*)\"), \"instance\", \"$1\", \"instance\", \"([^:.]*).*\")", "format": "time_series", "hide": false, "intervalFactor": 1, @@ -1083,7 +1083,7 @@ "steppedLine": false, "targets": [ { - "expr": "label_replace(((irate(node_disk_io_time_ms{instance=~\"($ceph_hosts).*\"}[5m]) / 10 ) or irate(node_disk_io_time_seconds_total{instance=~\"($ceph_hosts).*\"}[5m]) * 100), \"instance\", \"$1\", \"instance\", \"([^:.]*).*\") * on(instance, device) group_left(ceph_daemon) label_replace(label_replace(ceph_disk_occupation{instance=~\"($ceph_hosts).*\"}, \"device\", \"$1\", \"device\", \"/dev/(.*)\"), \"instance\", \"$1\", \"instance\", \"([^:.]*).*\")", + "expr": "label_replace(((irate(node_disk_io_time_ms{instance=~\"($ceph_hosts)([\\\\.:].*)?\"}[5m]) / 10 ) or irate(node_disk_io_time_seconds_total{instance=~\"($ceph_hosts)([\\\\.:].*)?\"}[5m]) * 100), \"instance\", \"$1\", \"instance\", \"([^:.]*).*\") * on(instance, device, ceph_daemon) group_left label_replace(label_replace(ceph_disk_occupation{instance=~\"($ceph_hosts)([\\\\.:].*)?\"}, \"device\", \"$1\", \"device\", \"/dev/(.*)\"), \"instance\", \"$1\", \"instance\", \"([^:.]*).*\")", "format": "time_series", "hide": false, "intervalFactor": 1, diff --git a/ceph/monitoring/grafana/dashboards/hosts-overview.json b/ceph/monitoring/grafana/dashboards/hosts-overview.json index 804aa51cc..b179d5717 100644 --- a/ceph/monitoring/grafana/dashboards/hosts-overview.json +++ b/ceph/monitoring/grafana/dashboards/hosts-overview.json @@ -131,7 +131,6 @@ "#d44a3a" ], "datasource": "$datasource", - "decimals": 0, "description": "Average CPU busy across all hosts (OSD, RGW, MON etc) within the cluster", "decimals": 2, "format": "percentunit", @@ -215,7 +214,6 @@ "#d44a3a" ], "datasource": "$datasource", - "decimals": 0, "description": "Average Memory Usage across all hosts in the cluster (excludes buffer/cache usage)", "decimals": 2, "format": "percentunit", @@ -433,7 +431,7 @@ "tableColumn": "", "targets": [ { - "expr" : "avg (\n label_replace((irate(node_disk_io_time_ms[5m]) / 10 ) or\n (irate(node_disk_io_time_seconds_total[5m]) * 100), \"instance\", \"$1\", \"instance\", \"([^.:]*).*\"\n ) *\n on(instance, device) label_replace(label_replace(ceph_disk_occupation{instance=~\"($osd_hosts).*\"}, \"device\", \"$1\", \"device\", \"/dev/(.*)\"), \"instance\", \"$1\", \"instance\", \"([^.:]*).*\")\n)", + "expr" : "avg (\n label_replace((irate(node_disk_io_time_ms[5m]) / 10 ) or\n (irate(node_disk_io_time_seconds_total[5m]) * 100), \"instance\", \"$1\", \"instance\", \"([^.:]*).*\"\n ) *\n on(instance, device, ceph_daemon) label_replace(label_replace(ceph_disk_occupation{instance=~\"($osd_hosts).*\"}, \"device\", \"$1\", \"device\", \"/dev/(.*)\"), \"instance\", \"$1\", \"instance\", \"([^.:]*).*\")\n)", "format": "time_series", "instant": true, "intervalFactor": 1, diff --git a/ceph/monitoring/grafana/dashboards/radosgw-sync-overview.json b/ceph/monitoring/grafana/dashboards/radosgw-sync-overview.json new file mode 100644 index 000000000..e9136d78e --- /dev/null +++ b/ceph/monitoring/grafana/dashboards/radosgw-sync-overview.json @@ -0,0 +1,440 @@ +{ + "__requires": [ + { + "type": "grafana", + "id": "grafana", + "name": "Grafana", + "version": "5.0.0" + }, + { + "type": "panel", + "id": "graph", + "name": "Graph", + "version": "5.0.0" + } + ], + "annotations": { + "list": [ + { + "builtIn": 1, + "datasource": "-- Grafana --", + "enable": true, + "hide": true, + "iconColor": "rgba(0, 211, 255, 1)", + "name": "Annotations & Alerts", + "type": "dashboard" + } + ] + }, + "editable": false, + "gnetId": null, + "graphTooltip": 0, + "id": null, + "iteration": 1534386107523, + "links": [], + "panels": [ + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 1, + "gridPos": { + "h": 7, + "w": 8, + "x": 0, + "y": 0 + }, + "id": 1, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null as zero", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": true, + "steppedLine": false, + "targets": [ + { + "expr": "sum by (source_zone) (rate(ceph_data_sync_from_zone_fetch_bytes_sum[30s]))", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "{{source_zone}}", + "refId": "A" + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "Replication (throughput) from Source Zone", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "unit": "bytes", + "format": "Bps", + "decimals": null, + "logBase": 1, + "max": null, + "min": "0", + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ] + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 1, + "gridPos": { + "h": 7, + "w": 7.4, + "x": 8.3, + "y": 0 + }, + "id": 2, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null as zero", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": true, + "steppedLine": false, + "targets": [ + { + "expr": "sum by (source_zone) (rate(ceph_data_sync_from_zone_fetch_bytes_count[30s]))", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "{{source_zone}}", + "refId": "A" + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "Replication (objects) from Source Zone", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "short", + "decimals": null, + "label": "Objects/s", + "logBase": 1, + "max": null, + "min": "0", + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ] + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 1, + "gridPos": { + "h": 7, + "w": 8, + "x": 16, + "y": 0 + }, + "id": 3, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null as zero", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": true, + "steppedLine": false, + "targets": [ + { + "expr": "sum by (source_zone) (rate(ceph_data_sync_from_zone_poll_latency_sum[30s]) * 1000)", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "{{source_zone}}", + "refId": "A" + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "Polling Request Latency from Source Zone", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "unit": "s", + "format": "ms", + "decimals": null, + "logBase": 1, + "max": null, + "min": "0", + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ] + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 1, + "gridPos": { + "h": 7, + "w": 8, + "x": 0, + "y": 7 + }, + "id": 4, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null as zero", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": true, + "steppedLine": false, + "targets": [ + { + "expr": "sum by (source_zone) (rate(ceph_data_sync_from_zone_fetch_errors[30s]))", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "{{source_zone}}", + "refId": "A" + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "Unsuccessful Object Replications from Source Zone", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "short", + "decimals": null, + "label": "Count/s", + "logBase": 1, + "max": null, + "min": "0", + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ] + } + ], + "refresh": "15s", + "schemaVersion": 16, + "style": "dark", + "tags": [ + "overview" + ], + "templating": { + "list": [ + { + "allValue": null, + "current": {}, + "datasource": "$datasource", + "hide": 2, + "includeAll": true, + "label": null, + "multi": false, + "name": "rgw_servers", + "options": [], + "query": "prometheus", + "refresh": 1, + "regex": "", + "sort": 1, + "tagValuesQuery": "", + "tags": [], + "tagsQuery": "", + "type": "query", + "useTags": false + }, + { + "current": { + "tags": [], + "text": "default", + "value": "default" + }, + "hide": 0, + "label": "Data Source", + "name": "datasource", + "options": [], + "query": "prometheus", + "refresh": 1, + "regex": "", + "type": "datasource" + } + ] + }, + "time": { + "from": "now-1h", + "to": "now" + }, + "timepicker": { + "refresh_intervals": [ + "5s", + "10s", + "15s", + "30s", + "1m", + "5m", + "15m", + "30m", + "1h", + "2h", + "1d" + ], + "time_options": [ + "5m", + "15m", + "1h", + "6h", + "12h", + "24h", + "2d", + "7d", + "30d" + ] + }, + "timezone": "", + "title": "RGW Sync Overview", + "uid": "rgw-sync-overview", + "version": 2 +} diff --git a/ceph/monitoring/prometheus/alerts/test_alerts.yml b/ceph/monitoring/prometheus/alerts/test_alerts.yml new file mode 100644 index 000000000..fe450e343 --- /dev/null +++ b/ceph/monitoring/prometheus/alerts/test_alerts.yml @@ -0,0 +1,769 @@ +rule_files: + - ceph_default_alerts.yml +evaluation_interval: 5m +tests: + # health error + - interval: 5m + input_series: + - series: 'ceph_health_status{instance="ceph:9283",job="ceph"}' + values: '2 2 2 2 2 2 2' + promql_expr_test: + - expr: ceph_health_status == 2 + eval_time: 5m + exp_samples: + - labels: 'ceph_health_status{instance="ceph:9283",job="ceph"}' + value: 2 + alert_rule_test: + - eval_time: 1m + alertname: health error + - eval_time: 6m + alertname: health error + exp_alerts: + - exp_labels: + instance: ceph:9283 + job: ceph + type: ceph_default + severity: critical + exp_annotations: + description: > + Ceph in HEALTH_ERROR state for more than 5 minutes. + Please check "ceph health detail" for more information. + + # health warning + - interval: 5m + input_series: + - series: 'ceph_health_status{instance="ceph:9283",job="ceph"}' + values: '1 1 1 1 1 1 1 1 1 1' + promql_expr_test: + - expr: ceph_health_status == 1 + eval_time: 15m + exp_samples: + - labels: 'ceph_health_status{instance="ceph:9283",job="ceph"}' + value: 1 + alert_rule_test: + - eval_time: 10m + alertname: health warn + - eval_time: 20m + alertname: health warn + exp_alerts: + - exp_labels: + instance: ceph:9283 + job: ceph + type: ceph_default + severity: warning + exp_annotations: + description: > + Ceph has been in HEALTH_WARN for more than 15 minutes. + Please check "ceph health detail" for more information. + + # low monitor quorum count + - interval: 1m + input_series: + - series: 'ceph_mon_quorum_status{ceph_daemon="mon.a",instance="ceph:9283", + job="ceph"}' + values: '1 1 1 1 1' + - series: 'ceph_mon_quorum_status{ceph_daemon="mon.b",instance="ceph:9283", + job="ceph"}' + values: '1 1 1 1 1' + - series: 'ceph_mon_quorum_status{ceph_daemon="mon.c",instance="ceph:9283", + job="ceph"}' + values: '0 0 0 0 0' + - series: 'ceph_mon_metadata{ceph_daemon="mon.a",ceph_version="ceph version + 17.0.0-189-g3558fd72 (3558fd7291855971aa6481a2ade468ad61fbb346) pacific + (dev)",hostname="ceph",instance="ceph:9283",job="ceph", + public_addr="172.20.0.2",rank="0"}' + values: '1 1 1 1 1' + - series: 'ceph_mon_metadata{ceph_daemon="mon.b",ceph_version="ceph version + 17.0.0-189-g3558fd72 (3558fd7291855971aa6481a2ade468ad61fbb346) pacific + (dev)",hostname="ceph",instance="ceph:9283",job="ceph", + public_addr="172.20.0.2",rank="1"}' + values: '1 1 1 1 1' + - series: 'ceph_mon_metadata{ceph_daemon="mon.c",ceph_version="ceph version + 17.0.0-189-g3558fd72 (3558fd7291855971aa6481a2ade468ad61fbb346) pacific + (dev)",hostname="ceph",instance="ceph:9283",job="ceph", + public_addr="172.20.0.2",rank="2"}' + values: '1 1 1 1 1' + promql_expr_test: + - expr: sum(ceph_mon_quorum_status) < 3 + eval_time: 1m + exp_samples: + - labels: '{}' + value: 2 + alert_rule_test: + - eval_time: 1m + alertname: low monitor quorum count + exp_alerts: + - exp_labels: + type: ceph_default + severity: critical + exp_annotations: + description: | + Monitor count in quorum is below three. + + Only 2 of 3 monitors are active. + + The following monitors are down: + - mon.c on ceph + + + # 10% OSDs down + - interval: 1m + input_series: + - series: 'ceph_osd_up{ceph_daemon="osd.0",instance="ceph:9283",job="ceph"}' + values: '1 1 1 1 1' + - series: 'ceph_osd_up{ceph_daemon="osd.1",instance="ceph:9283",job="ceph"}' + values: '0 0 0 0 0' + - series: 'ceph_osd_up{ceph_daemon="osd.2",instance="ceph:9283",job="ceph"}' + values: '1 1 1 1 1' + - series: 'ceph_osd_metadata{back_iface="eth0",ceph_daemon="osd.0", + ceph_version="ceph version 17.0.0-189-g3558fd72 + (3558fd7291855971aa6481a2ade468ad61fbb346) pacific (dev)", + cluster_addr="172.20.0.2",device_class="hdd",front_iface="eth0", + hostname="ceph",instance="ceph:9283",job="ceph",objectstore="bluestore", + public_addr="172.20.0.2"}' + values: '1 1 1 1 1' + - series: 'ceph_osd_metadata{back_iface="eth0",ceph_daemon="osd.1", + ceph_version="ceph version 17.0.0-189-g3558fd72 + (3558fd7291855971aa6481a2ade468ad61fbb346) pacific (dev)", + cluster_addr="172.20.0.2",device_class="hdd",front_iface="eth0", + hostname="ceph",instance="ceph:9283",job="ceph",objectstore="bluestore", + public_addr="172.20.0.2"}' + values: '1 1 1 1 1' + - series: 'ceph_osd_metadata{back_iface="eth0",ceph_daemon="osd.2", + ceph_version="ceph version 17.0.0-189-g3558fd72 + (3558fd7291855971aa6481a2ade468ad61fbb346) pacific (dev)", + cluster_addr="172.20.0.2",device_class="hdd",front_iface="eth0", + hostname="ceph",instance="ceph:9283",job="ceph",objectstore="bluestore", + public_addr="172.20.0.2"}' + values: '1 1 1 1 1' + promql_expr_test: + - expr: count(ceph_osd_up == 0) / count(ceph_osd_up) * 100 >= 10 + eval_time: 1m + exp_samples: + - labels: '{}' + value: 3.333333333333333E+01 + alert_rule_test: + - eval_time: 1m + alertname: 10% OSDs down + exp_alerts: + - exp_labels: + type: ceph_default + severity: critical + exp_annotations: + description: | + 33.33% or 1 of 3 OSDs are down (≥ 10%). + + The following OSDs are down: + - osd.1 on ceph + + # OSD down + - interval: 1m + input_series: + - series: 'ceph_osd_up{ceph_daemon="osd.0",instance="ceph:9283",job="ceph"}' + values: '1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1' + - series: 'ceph_osd_up{ceph_daemon="osd.1",instance="ceph:9283",job="ceph"}' + values: '0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0' + - series: 'ceph_osd_up{ceph_daemon="osd.2",instance="ceph:9283",job="ceph"}' + values: '1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1' + - series: 'ceph_osd_metadata{back_iface="eth0",ceph_daemon="osd.0", + ceph_version="ceph version 17.0.0-189-g3558fd72 + (3558fd7291855971aa6481a2ade468ad61fbb346) pacific (dev)", + cluster_addr="172.20.0.2",device_class="hdd",front_iface="eth0", + hostname="ceph",instance="ceph:9283",job="ceph",objectstore="bluestore", + public_addr="172.20.0.2"}' + values: '1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1' + - series: 'ceph_osd_metadata{back_iface="eth0",ceph_daemon="osd.1", + ceph_version="ceph version 17.0.0-189-g3558fd72 + (3558fd7291855971aa6481a2ade468ad61fbb346) pacific (dev)", + cluster_addr="172.20.0.2",device_class="hdd",front_iface="eth0", + hostname="ceph",instance="ceph:9283",job="ceph",objectstore="bluestore", + public_addr="172.20.0.2"}' + values: '1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1' + - series: 'ceph_osd_metadata{back_iface="eth0",ceph_daemon="osd.2", + ceph_version="ceph version 17.0.0-189-g3558fd72 + (3558fd7291855971aa6481a2ade468ad61fbb346) pacific (dev)", + cluster_addr="172.20.0.2",device_class="hdd",front_iface="eth0", + hostname="ceph",instance="ceph:9283",job="ceph",objectstore="bluestore", + public_addr="172.20.0.2"}' + values: '1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1' + promql_expr_test: + - expr: count(ceph_osd_up == 0) > 0 + eval_time: 1m + exp_samples: + - labels: '{}' + value: 1 + alert_rule_test: + - eval_time: 15m + alertname: OSD down + exp_alerts: + - exp_labels: + type: ceph_default + severity: warning + exp_annotations: + description: | + + 1 OSD down for more than 15 minutes. + + 1 of 3 OSDs are down. + + The following OSD is down: + - osd.1 on ceph + + # OSDs near full + - interval: 1m + input_series: + - series: 'ceph_osd_stat_bytes_used{ceph_daemon="osd.0",instance="ceph:9283" + ,job="ceph"}' + values: '1076310016 1076310016 1076310016 1076310016 1076310016 + 1076310016' + - series: 'ceph_osd_stat_bytes_used{ceph_daemon="osd.1",instance="ceph:9283" + ,job="ceph"}' + values: '1076310016 1076310016 1076310016 1076310016 1076310016 + 1076310016' + - series: 'ceph_osd_stat_bytes_used{ceph_daemon="osd.2",instance="ceph:9283" + ,job="ceph"}' + values: '1076310016 1076310016 1076310016 1076310016 1076310016 + 100856561909.76' + - series: 'ceph_osd_stat_bytes{ceph_daemon="osd.0",instance="ceph:9283" + ,job="ceph"}' + values: '108447916032 108447916032 108447916032 108447916032 108447916032 + 108447916032' + - series: 'ceph_osd_stat_bytes{ceph_daemon="osd.1",instance="ceph:9283" + ,job="ceph"}' + values: '108447916032 108447916032 108447916032 108447916032 108447916032 + 108447916032' + - series: 'ceph_osd_stat_bytes{ceph_daemon="osd.2",instance="ceph:9283" + ,job="ceph"}' + values: '108447916032 108447916032 108447916032 108447916032 108447916032 + 108447916032' + - series: 'ceph_osd_up{ceph_daemon="osd.0",instance="ceph:9283",job="ceph"}' + values: '1 1 1 1 1 1' + - series: 'ceph_osd_up{ceph_daemon="osd.1",instance="ceph:9283",job="ceph"}' + values: '1 1 1 1 1 1' + - series: 'ceph_osd_up{ceph_daemon="osd.2",instance="ceph:9283",job="ceph"}' + values: '1 1 1 1 1 1' + - series: 'ceph_osd_metadata{back_iface="eth0",ceph_daemon="osd.0", + ceph_version="ceph version 17.0.0-189-g3558fd72 + (3558fd7291855971aa6481a2ade468ad61fbb346) pacific (dev)", + cluster_addr="172.20.0.2",device_class="hdd",front_iface="eth0", + hostname="ceph",instance="ceph:9283",job="ceph",objectstore="bluestore", + public_addr="172.20.0.2"}' + values: '1 1 1 1 1 1' + - series: 'ceph_osd_metadata{back_iface="eth0",ceph_daemon="osd.1", + ceph_version="ceph version 17.0.0-189-g3558fd72 + (3558fd7291855971aa6481a2ade468ad61fbb346) pacific (dev)", + cluster_addr="172.20.0.2",device_class="hdd",front_iface="eth0", + hostname="ceph",instance="ceph:9283",job="ceph",objectstore="bluestore", + public_addr="172.20.0.2"}' + values: '1 1 1 1 1 1' + - series: 'ceph_osd_metadata{back_iface="eth0",ceph_daemon="osd.2", + ceph_version="ceph version 17.0.0-189-g3558fd72 + (3558fd7291855971aa6481a2ade468ad61fbb346) pacific (dev)", + cluster_addr="172.20.0.2",device_class="hdd",front_iface="eth0", + hostname="ceph",instance="ceph:9283",job="ceph",objectstore="bluestore", + public_addr="172.20.0.2"}' + values: '1 1 1 1 1 1' + promql_expr_test: + - expr: | + ( + ((ceph_osd_stat_bytes_used / ceph_osd_stat_bytes) and on(ceph_daemon) + ceph_osd_up == 1) * on(ceph_daemon) group_left(hostname) + ceph_osd_metadata + ) * 100 > 90 + + eval_time: 5m + exp_samples: + - labels: '{ceph_daemon="osd.2",hostname="ceph",instance="ceph:9283", + job="ceph"}' + value: 9.3E+01 + alert_rule_test: + - eval_time: 10m + alertname: OSDs near full + exp_alerts: + - exp_labels: + ceph_daemon: osd.2 + hostname: ceph + instance: ceph:9283 + job: ceph + type: ceph_default + severity: critical + exp_annotations: + description: > + OSD osd.2 on ceph is dangerously full: 93% + + # flapping OSD + - interval: 1s + input_series: + - series: 'ceph_osd_up{ceph_daemon="osd.0",instance="ceph:9283",job="ceph"}' + values: '1+1x100' + - series: 'ceph_osd_up{ceph_daemon="osd.1",instance="ceph:9283",job="ceph"}' + values: '1+0x100' + - series: 'ceph_osd_up{ceph_daemon="osd.2",instance="ceph:9283",job="ceph"}' + values: '1+0x100' + - series: 'ceph_osd_metadata{back_iface="eth0",ceph_daemon="osd.0", + ceph_version="ceph version 17.0.0-189-g3558fd72 + (3558fd7291855971aa6481a2ade468ad61fbb346) pacific (dev)", + cluster_addr="172.20.0.2",device_class="hdd",front_iface="eth0", + hostname="ceph",instance="ceph:9283",job="ceph",objectstore="bluestore", + public_addr="172.20.0.2"}' + values: '1 1 1 1 1 1' + - series: 'ceph_osd_metadata{back_iface="eth0",ceph_daemon="osd.1", + ceph_version="ceph version 17.0.0-189-g3558fd72 + (3558fd7291855971aa6481a2ade468ad61fbb346) pacific (dev)", + cluster_addr="172.20.0.2",device_class="hdd",front_iface="eth0", + hostname="ceph",instance="ceph:9283",job="ceph",objectstore="bluestore", + public_addr="172.20.0.2"}' + values: '1 1 1 1 1 1' + - series: 'ceph_osd_metadata{back_iface="eth0",ceph_daemon="osd.2", + ceph_version="ceph version 17.0.0-189-g3558fd72 + (3558fd7291855971aa6481a2ade468ad61fbb346) pacific (dev)", + cluster_addr="172.20.0.2",device_class="hdd",front_iface="eth0", + hostname="ceph",instance="ceph:9283",job="ceph",objectstore="bluestore", + public_addr="172.20.0.2"}' + values: '1 1 1 1 1 1' + promql_expr_test: + - expr: | + ( + rate(ceph_osd_up[5m]) + * on(ceph_daemon) group_left(hostname) ceph_osd_metadata + ) * 60 > 1 + eval_time: 1m + exp_samples: + - labels: '{ceph_daemon="osd.0", hostname="ceph", instance="ceph:9283", + job="ceph"}' + value: 1.2200000000000001E+01 + alert_rule_test: + - eval_time: 5m + alertname: flapping OSD + exp_alerts: + - exp_labels: + ceph_daemon: osd.0 + hostname: ceph + instance: ceph:9283 + job: ceph + severity: warning + type: ceph_default + exp_annotations: + description: > + OSD osd.0 on ceph was + marked down and back up at 20.1 times once a + minute for 5 minutes. + + # high pg count deviation + - interval: 1m + input_series: + - series: 'ceph_osd_numpg{ceph_daemon="osd.0",instance="ceph:9283", + job="ceph"}' + values: '100 100 100 100 100 160' + - series: 'ceph_osd_numpg{ceph_daemon="osd.1",instance="ceph:9283", + job="ceph"}' + values: '100 100 100 100 100 320' + - series: 'ceph_osd_numpg{ceph_daemon="osd.2",instance="ceph:9283", + job="ceph"}' + values: '100 100 100 100 100 160' + - series: 'ceph_osd_numpg{ceph_daemon="osd.3",instance="ceph:9283", + job="ceph"}' + values: '100 100 100 100 100 160' + - series: 'ceph_osd_metadata{back_iface="eth0",ceph_daemon="osd.0", + ceph_version="ceph version 17.0.0-189-g3558fd72 + (3558fd7291855971aa6481a2ade468ad61fbb346) pacific (dev)", + cluster_addr="172.20.0.2",device_class="hdd",front_iface="eth0", + hostname="ceph",instance="ceph:9283",job="ceph",objectstore="bluestore", + public_addr="172.20.0.2"}' + values: '1 1 1 1 1 1' + - series: 'ceph_osd_metadata{back_iface="eth0",ceph_daemon="osd.1", + ceph_version="ceph version 17.0.0-189-g3558fd72 + (3558fd7291855971aa6481a2ade468ad61fbb346) pacific (dev)", + cluster_addr="172.20.0.2",device_class="hdd",front_iface="eth0", + hostname="ceph",instance="ceph:9283",job="ceph",objectstore="bluestore", + public_addr="172.20.0.2"}' + values: '1 1 1 1 1 1' + - series: 'ceph_osd_metadata{back_iface="eth0",ceph_daemon="osd.2", + ceph_version="ceph version 17.0.0-189-g3558fd72 + (3558fd7291855971aa6481a2ade468ad61fbb346) pacific (dev)", + cluster_addr="172.20.0.2",device_class="hdd",front_iface="eth0", + hostname="ceph",instance="ceph:9283",job="ceph",objectstore="bluestore", + public_addr="172.20.0.2"}' + values: '1 1 1 1 1 1' + - series: 'ceph_osd_metadata{back_iface="eth0",ceph_daemon="osd.3", + ceph_version="ceph version 17.0.0-189-g3558fd72 + (3558fd7291855971aa6481a2ade468ad61fbb346) pacific (dev)", + cluster_addr="172.20.0.2",device_class="hdd",front_iface="eth0", + hostname="ceph",instance="ceph:9283",job="ceph",objectstore="bluestore", + public_addr="172.20.0.2"}' + values: '1 1 1 1 1 1' + promql_expr_test: + - expr: | + abs( + ( + (ceph_osd_numpg > 0) - on (job) group_left avg(ceph_osd_numpg > 0) + by (job) + ) / on (job) group_left avg(ceph_osd_numpg > 0) by (job) + ) * on(ceph_daemon) group_left(hostname) ceph_osd_metadata > 0.30 + + eval_time: 5m + exp_samples: + - labels: '{ceph_daemon="osd.1", hostname="ceph", instance="ceph:9283", + job="ceph"}' + value: 6E-01 + alert_rule_test: + - eval_time: 10m + alertname: high pg count deviation + exp_alerts: + - exp_labels: + ceph_daemon: osd.1 + hostname: ceph + instance: ceph:9283 + job: ceph + severity: warning + type: ceph_default + exp_annotations: + description: > + OSD osd.1 on ceph deviates + by more than 30% from average PG count. + + # pgs inactive + - interval: 1m + input_series: + - series: 'ceph_pool_metadata{instance="ceph:9283",job="ceph", + name="device_health_metrics",pool_id="1"}' + values: '1 1 1 1 1 1 1 1' + - series: 'ceph_pool_metadata{instance="ceph:9283",job="ceph", + name="device_health_metrics",pool_id="2"}' + values: '1 1 1 1 1 1 1 1' + - series: 'ceph_pool_metadata{instance="ceph:9283",job="ceph", + name="device_health_metrics",pool_id="3"}' + values: '1 1 1 1 1 1 1 1' + - series: 'ceph_pg_total{instance="ceph:9283",job="ceph",pool_id="1"}' + values: '1 1 1 1 1 1 1 1' + - series: 'ceph_pg_total{instance="ceph:9283",job="ceph",pool_id="2"}' + values: '32 32 32 32 32 32 32 32' + - series: 'ceph_pg_total{instance="ceph:9283",job="ceph",pool_id="3"}' + values: '33 32 32 32 32 33 33 32' + - series: 'ceph_pg_active{instance="ceph:9283",job="ceph",pool_id="1"}' + values: '1 1 1 1 1 1 1 1 1' + - series: 'ceph_pg_active{instance="ceph:9283",job="ceph",pool_id="2"}' + values: '32 32 32 32 32 32 32 32' + - series: 'ceph_pg_active{instance="ceph:9283",job="ceph",pool_id="3"}' + values: '32 32 32 32 32 32 32 32' + promql_expr_test: + - expr: ceph_pool_metadata * on(pool_id,instance) group_left() + (ceph_pg_total - ceph_pg_active) > 0 + eval_time: 5m + exp_samples: + - labels: '{instance="ceph:9283", job="ceph", + name="device_health_metrics", + pool_id="3"}' + value: 1 + alert_rule_test: + - eval_time: 5m + alertname: pgs inactive + exp_alerts: + - exp_labels: + instance: ceph:9283 + job: ceph + name: device_health_metrics + pool_id: 3 + severity: critical + type: ceph_default + exp_annotations: + description: > + 1 PGs have been inactive for more than 5 minutes in pool + device_health_metrics. + Inactive placement groups aren't able to serve read/write + requests. + + #pgs unclean + - interval: 1m + input_series: + - series: 'ceph_pool_metadata{instance="ceph:9283",job="ceph", + name="device_health_metrics",pool_id="1"}' + values: '1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1' + - series: 'ceph_pool_metadata{instance="ceph:9283",job="ceph", + name="device_health_metrics",pool_id="2"}' + values: '1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1' + - series: 'ceph_pool_metadata{instance="ceph:9283",job="ceph", + name="device_health_metrics",pool_id="3"}' + values: '1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1' + - series: 'ceph_pg_total{instance="ceph:9283",job="ceph",pool_id="1"}' + values: '1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1' + - series: 'ceph_pg_total{instance="ceph:9283",job="ceph",pool_id="2"}' + values: '32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 + 32 32 32' + - series: 'ceph_pg_total{instance="ceph:9283",job="ceph",pool_id="3"}' + values: '33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 + 33 33' + - series: 'ceph_pg_clean{instance="ceph:9283",job="ceph",pool_id="1"}' + values: '1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1' + - series: 'ceph_pg_clean{instance="ceph:9283",job="ceph",pool_id="2"}' + values: '32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 + 32 32' + - series: 'ceph_pg_clean{instance="ceph:9283",job="ceph",pool_id="3"}' + values: '32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 + 32 32' + promql_expr_test: + - expr: ceph_pool_metadata * on(pool_id,instance) group_left() + (ceph_pg_total - ceph_pg_clean) > 0 + eval_time: 15m + exp_samples: + - labels: '{instance="ceph:9283", job="ceph", + name="device_health_metrics", pool_id="3"}' + value: 1 + alert_rule_test: + - eval_time: 16m + alertname: pgs unclean + exp_alerts: + - exp_labels: + instance: ceph:9283 + job: ceph + name: device_health_metrics + pool_id: 3 + severity: warning + type: ceph_default + exp_annotations: + description: > + 1 PGs haven't been clean for more than 15 minutes in pool + device_health_metrics. + Unclean PGs haven't been able to completely recover from a + previous failure. + + # root volume full + - interval: 1m + input_series: + - series: 'node_filesystem_avail_bytes{device="/dev/mapper/fedora_localhost + --live-home",fstype="ext4",instance="node-exporter",job="node-exporter", + mountpoint="/"}' + values: '35336400896 35336400896 35336400896 35336400896 35336400896 + 3525385519.104 3533640089' + - series: 'node_filesystem_size_bytes{device="/dev/mapper/fedora_localhost + --live-home",fstype="ext4",instance="node-exporter",job="node-exporter", + mountpoint="/"}' + values: '73445531648 73445531648 73445531648 73445531648 73445531648 + 73445531648 73445531648' + promql_expr_test: + - expr: node_filesystem_avail_bytes{mountpoint="/"} / + node_filesystem_size_bytes{mountpoint="/"} * 100 < 5 + eval_time: 5m + exp_samples: + - labels: '{device="/dev/mapper/fedora_localhost --live-home", + fstype="ext4", instance="node-exporter", job="node-exporter", + mountpoint="/"}' + value: 4.8E+00 + alert_rule_test: + - eval_time: 10m + alertname: root volume full + exp_alerts: + - exp_labels: + device: /dev/mapper/fedora_localhost --live-home + fstype: ext4 + instance: node-exporter + job: node-exporter + mountpoint: / + severity: critical + type: ceph_default + exp_annotations: + description: > + Root volume (OSD and MON store) is dangerously full: 4.811% free. + + # network packets dropped + - interval: 1s + input_series: + - series: 'node_network_receive_drop_total{device="eth0", + instance="node-exporter",job="node-exporter"}' + values: '1+1x500' + - series: 'node_network_transmit_drop_total{device="eth0", + instance="node-exporter",job="node-exporter"}' + values: '1+1x500' + promql_expr_test: + - expr: | + ( + increase(node_network_receive_drop_total{device!="lo"}[1m]) + + increase(node_network_transmit_drop_total{device!="lo"}[1m]) + ) / ( + increase(node_network_receive_packets_total{device!="lo"}[1m]) + + increase(node_network_transmit_packets_total{device!="lo"}[1m]) + ) >= 0.0001 or ( + increase(node_network_receive_drop_total{device!="lo"}[1m]) + + increase(node_network_transmit_drop_total{device!="lo"}[1m]) + ) >= 10 + + eval_time: 5m + exp_samples: + - labels: '{device="eth0", instance="node-exporter", + job="node-exporter"}' + value: 1.2E+02 + alert_rule_test: + - eval_time: 5m + alertname: network packets dropped + exp_alerts: + - exp_labels: + device: eth0 + instance: node-exporter + job: node-exporter + severity: warning + type: ceph_default + exp_annotations: + description: > + Node node-exporter experiences packet drop > 0.01% or > + 10 packets/s on interface eth0. + + # network packets errors + - interval: 1s + input_series: + - series: 'node_network_receive_errs_total{device="eth0", + instance="node-exporter",job="node-exporter"}' + values: '1+1x500' + - series: 'node_network_transmit_errs_total{device="eth0", + instance="node-exporter",job="node-exporter"}' + values: '1+1x500' + promql_expr_test: + - expr: | + ( + increase(node_network_receive_errs_total{device!="lo"}[1m]) + + increase(node_network_transmit_errs_total{device!="lo"}[1m]) + ) / ( + increase(node_network_receive_packets_total{device!="lo"}[1m]) + + increase(node_network_transmit_packets_total{device!="lo"}[1m]) + ) >= 0.0001 or ( + increase(node_network_receive_errs_total{device!="lo"}[1m]) + + increase(node_network_transmit_errs_total{device!="lo"}[1m]) + ) >= 10 + + eval_time: 5m + exp_samples: + - labels: '{device="eth0", instance="node-exporter", + job="node-exporter"}' + value: 1.2E+02 + alert_rule_test: + - eval_time: 5m + alertname: network packet errors + exp_alerts: + - exp_labels: + device: eth0 + instance: node-exporter + job: node-exporter + severity: warning + type: ceph_default + exp_annotations: + description: > + Node node-exporter experiences packet errors > 0.01% or > 10 + packets/s on interface eth0. + + # MTU Mismatch + - interval: 1m + input_series: + - series: 'node_network_mtu_bytes{device="eth0",instance="node-exporter", + job="node-exporter"}' + values: '1500 1500 1500 1500 1500' + - series: 'node_network_mtu_bytes{device="eth1",instance="node-exporter", + job="node-exporter"}' + values: '1500 1500 1500 1500 1500' + - series: 'node_network_mtu_bytes{device="eth2",instance="node-exporter", + job="node-exporter"}' + values: '1500 1500 1500 1500 1500' + - series: 'node_network_mtu_bytes{device="eth3",instance="node-exporter", + job="node-exporter"}' + values: '1500 1500 1500 1500 1500' + - series: 'node_network_mtu_bytes{device="eth4",instance="node-exporter", + job="node-exporter"}' + values: '9000 9000 9000 9000 9000' + promql_expr_test: + - expr: node_network_mtu_bytes{device!="lo"} != on() group_left() + (quantile(0.5, node_network_mtu_bytes{device!="lo"})) + eval_time: 1m + exp_samples: + - labels: '{__name__="node_network_mtu_bytes", device="eth4", + instance="node-exporter", job="node-exporter"}' + value: 9000 + alert_rule_test: + - eval_time: 1m + alertname: MTU Mismatch + exp_alerts: + - exp_labels: + device: eth4 + instance: node-exporter + job: node-exporter + oid: 1.3.6.1.4.1.50495.15.1.2.8.5 + severity: warning + type: ceph_default + exp_annotations: + description: > + Node node-exporter has a different MTU size (9000) + than the median value on device eth4. + + # pool full + - interval: 1m + input_series: + - series: 'ceph_pool_stored{instance="ceph:9283",job="ceph",pool_id="1"}' + values: '0 0 0 0 0 0 0 0 0' + - series: 'ceph_pool_stored{instance="ceph:9283",job="ceph",pool_id="2"}' + values: '1850 1850 1850 1850 1850 1850 1850' + - series: 'ceph_pool_stored{instance="ceph:9283",job="ceph",pool_id="3"}' + values: '900 900 23524 23524 23524 23524 23524 23524 + 23524' + - series: 'ceph_pool_max_avail{instance="ceph:9283",job="ceph",pool_id="1"}' + values: '106287063040 106287063040 106287063040 106287063040 106287063040 + 106287063040 106287063040' + - series: 'ceph_pool_max_avail{instance="ceph:9283",job="ceph",pool_id="2"}' + values: '106287063040 106287063040 106287063040 106287063040 106287063040 + 106287063040 106287063040' + - series: 'ceph_pool_max_avail{instance="ceph:9283",job="ceph",pool_id="3"}' + values: '37.5 37.5 37.5 37.5 37.5 37.5 37.5' + - series: 'ceph_pool_metadata{instance="ceph:9283",job="ceph", + name="device_health_metrics",pool_id="1"}' + values: '1 1 1 1 1 1 1 1 1' + - series: 'ceph_pool_metadata{instance="ceph:9283",job="ceph", + name=".rgw.root",pool_id="2"}' + values: '1 1 1 1 1 1 1 1 1' + - series: 'ceph_pool_metadata{instance="ceph:9283",job="ceph", + name="default.rgw.log",pool_id="3"}' + values: '1 1 1 1 1 1 1 1 1' + promql_expr_test: + - expr: | + ceph_pool_stored / (ceph_pool_stored + ceph_pool_max_avail) + * on(pool_id) group_right ceph_pool_metadata * 100 > 90 + + eval_time: 1m + exp_samples: + - labels: '{instance="ceph:9283", job="ceph", name="default.rgw.log", + pool_id="3"}' + value: 9.6E+01 + alert_rule_test: + - eval_time: 2m + alertname: pool full + exp_alerts: + - exp_labels: + instance: ceph:9283 + job: ceph + name: default.rgw.log + pool_id: 3 + severity: critical + type: ceph_default + exp_annotations: + description: Pool default.rgw.log at 96% capacity. + + # slow OSD ops + - interval : 1m + input_series: + - series: 'ceph_healthcheck_slow_ops{instance="ceph:9283",job="ceph"}' + values: '1+0x120' + promql_expr_test: + - expr: ceph_healthcheck_slow_ops > 0 + eval_time: 1m + exp_samples: + - labels: '{__name__="ceph_healthcheck_slow_ops", instance="ceph:9283", + job="ceph"}' + value: 1 + alert_rule_test: + - eval_time: 20m + alertname: Slow OSD Ops + exp_alerts: + - exp_labels: + instance: ceph:9283 + job: ceph + severity: warning + type: ceph_default + exp_annotations: + description: > + 1 OSD requests are taking too long to process + (osd_op_complaint_time exceeded) diff --git a/ceph/qa/cephfs/tasks/cfuse_workunit_suites_ffsb.yaml b/ceph/qa/cephfs/tasks/cfuse_workunit_suites_ffsb.yaml index 1e96e6d8a..4710c26f1 100644 --- a/ceph/qa/cephfs/tasks/cfuse_workunit_suites_ffsb.yaml +++ b/ceph/qa/cephfs/tasks/cfuse_workunit_suites_ffsb.yaml @@ -6,6 +6,7 @@ overrides: conf: osd: filestore flush min: 0 + osd heartbeat grace: 60 tasks: - check-counter: counters: diff --git a/ceph/qa/rbd/krbd_blkroset.t b/ceph/qa/rbd/krbd_blkroset.t index bbbd26aac..ee6b342c7 100644 --- a/ceph/qa/rbd/krbd_blkroset.t +++ b/ceph/qa/rbd/krbd_blkroset.t @@ -146,9 +146,7 @@ R/O, unpartitioned: $ blockdev --setrw $DEV .*BLKROSET: Permission denied (re) [1] - $ sudo blockdev --setrw $DEV - .*BLKROSET: Read-only file system (re) - [1] + $ sudo blockdev --setrw $DEV # succeeds but effectively ignored $ blockdev --getro $DEV 1 $ dd if=/dev/urandom of=$DEV bs=1k seek=1 count=1 status=none @@ -182,15 +180,11 @@ R/O, partitioned: $ blockdev --setrw ${DEV}p1 .*BLKROSET: Permission denied (re) [1] - $ sudo blockdev --setrw ${DEV}p1 - .*BLKROSET: Read-only file system (re) - [1] + $ sudo blockdev --setrw ${DEV}p1 # succeeds but effectively ignored $ blockdev --setrw ${DEV}p2 .*BLKROSET: Permission denied (re) [1] - $ sudo blockdev --setrw ${DEV}p2 - .*BLKROSET: Read-only file system (re) - [1] + $ sudo blockdev --setrw ${DEV}p2 # succeeds but effectively ignored $ blockdev --getro ${DEV}p1 1 $ blockdev --getro ${DEV}p2 @@ -227,9 +221,7 @@ Unpartitioned: $ blockdev --setrw $DEV .*BLKROSET: Permission denied (re) [1] - $ sudo blockdev --setrw $DEV - .*BLKROSET: Read-only file system (re) - [1] + $ sudo blockdev --setrw $DEV # succeeds but effectively ignored $ blockdev --getro $DEV 1 $ dd if=/dev/urandom of=$DEV bs=1k seek=1 count=1 status=none @@ -263,15 +255,11 @@ Partitioned: $ blockdev --setrw ${DEV}p1 .*BLKROSET: Permission denied (re) [1] - $ sudo blockdev --setrw ${DEV}p1 - .*BLKROSET: Read-only file system (re) - [1] + $ sudo blockdev --setrw ${DEV}p1 # succeeds but effectively ignored $ blockdev --setrw ${DEV}p2 .*BLKROSET: Permission denied (re) [1] - $ sudo blockdev --setrw ${DEV}p2 - .*BLKROSET: Read-only file system (re) - [1] + $ sudo blockdev --setrw ${DEV}p2 # succeeds but effectively ignored $ blockdev --getro ${DEV}p1 1 $ blockdev --getro ${DEV}p2 diff --git a/ceph/qa/standalone/crush/crush-classes.sh b/ceph/qa/standalone/crush/crush-classes.sh index 509585db7..558aabe6d 100755 --- a/ceph/qa/standalone/crush/crush-classes.sh +++ b/ceph/qa/standalone/crush/crush-classes.sh @@ -57,6 +57,34 @@ function get_osds_up() { echo $osds } +function TEST_reweight_vs_classes() { + local dir=$1 + + # CrushWrapper::update_item (and ceph osd crush set) must rebuild the shadow + # tree too. https://tracker.ceph.com/issues/48065 + + run_mon $dir a || return 1 + run_osd $dir 0 || return 1 + run_osd $dir 1 || return 1 + run_osd $dir 2 || return 1 + + ceph osd crush set-device-class ssd osd.0 || return 1 + ceph osd crush class ls-osd ssd | grep 0 || return 1 + ceph osd crush set-device-class ssd osd.1 || return 1 + ceph osd crush class ls-osd ssd | grep 1 || return 1 + + ceph osd crush reweight osd.0 1 + + h=`hostname -s` + ceph osd crush dump | jq ".buckets[] | select(.name==\"$h\") | .items[0].weight" | grep 65536 + ceph osd crush dump | jq ".buckets[] | select(.name==\"$h~ssd\") | .items[0].weight" | grep 65536 + + ceph osd crush set 0 2 host=$h + + ceph osd crush dump | jq ".buckets[] | select(.name==\"$h\") | .items[0].weight" | grep 131072 + ceph osd crush dump | jq ".buckets[] | select(.name==\"$h~ssd\") | .items[0].weight" | grep 131072 +} + function TEST_classes() { local dir=$1 diff --git a/ceph/qa/standalone/misc/ok-to-stop.sh b/ceph/qa/standalone/misc/ok-to-stop.sh index 5465939e8..abecd685d 100755 --- a/ceph/qa/standalone/misc/ok-to-stop.sh +++ b/ceph/qa/standalone/misc/ok-to-stop.sh @@ -264,6 +264,8 @@ function TEST_0_osd() { ceph osd ok-to-stop 3 || return 1 ! ceph osd ok-to-stop 0 1 || return 1 ! ceph osd ok-to-stop 2 3 || return 1 + ceph osd ok-to-stop 0 --max 2 | grep '[0]' || return 1 + ceph osd ok-to-stop 1 --max 2 | grep '[1]' || return 1 # with min_size 2 we can stop 1 osds ceph osd pool set ec min_size 2 || return 1 @@ -274,6 +276,11 @@ function TEST_0_osd() { ! ceph osd ok-to-stop 0 1 2 || return 1 ! ceph osd ok-to-stop 1 2 3 || return 1 + ceph osd ok-to-stop 0 --max 2 | grep '[0,1]' || return 1 + ceph osd ok-to-stop 0 --max 20 | grep '[0,1]' || return 1 + ceph osd ok-to-stop 2 --max 2 | grep '[2,3]' || return 1 + ceph osd ok-to-stop 2 --max 20 | grep '[2,3]' || return 1 + # we should get the same result with one of the osds already down kill_daemons $dir TERM osd.0 || return 1 ceph osd down 0 || return 1 diff --git a/ceph/qa/suites/fs/basic_functional/tasks/volumes.yaml b/ceph/qa/suites/fs/basic_functional/tasks/volumes.yaml index 1315980ed..fe5a964c9 100644 --- a/ceph/qa/suites/fs/basic_functional/tasks/volumes.yaml +++ b/ceph/qa/suites/fs/basic_functional/tasks/volumes.yaml @@ -2,7 +2,10 @@ overrides: ceph: conf: mgr: - debug client: 10 + debug mgr: 20 + debug ms: 1 + debug finisher: 20 + debug client: 20 log-whitelist: - OSD full dropping all updates - OSD near full diff --git a/ceph/qa/suites/fs/upgrade/featureful_client/old_client/tasks/3-compat_client/mimic.yaml b/ceph/qa/suites/fs/upgrade/featureful_client/old_client/tasks/3-compat_client/mimic.yaml index 367206766..97757732f 100644 --- a/ceph/qa/suites/fs/upgrade/featureful_client/old_client/tasks/3-compat_client/mimic.yaml +++ b/ceph/qa/suites/fs/upgrade/featureful_client/old_client/tasks/3-compat_client/mimic.yaml @@ -7,4 +7,6 @@ tasks: mon.a: - ceph fs dump --format=json-pretty - ceph fs set cephfs min_compat_client mimic +- sleep: + duration: 5 - fs.clients_evicted: diff --git a/ceph/qa/suites/fs/upgrade/featureful_client/upgraded_client/tasks/4-compat_client.yaml b/ceph/qa/suites/fs/upgrade/featureful_client/upgraded_client/tasks/4-compat_client.yaml index bdf484da6..070bfc7a1 100644 --- a/ceph/qa/suites/fs/upgrade/featureful_client/upgraded_client/tasks/4-compat_client.yaml +++ b/ceph/qa/suites/fs/upgrade/featureful_client/upgraded_client/tasks/4-compat_client.yaml @@ -7,6 +7,8 @@ tasks: mon.a: - ceph fs dump --format=json-pretty - ceph fs set cephfs min_compat_client mimic +- sleep: + duration: 5 - fs.clients_evicted: clients: client.0: False diff --git a/ceph/qa/suites/kcephfs/cephfs/tasks/kclient_workunit_suites_ffsb.yaml b/ceph/qa/suites/kcephfs/cephfs/tasks/kclient_workunit_suites_ffsb.yaml index 3eedd281e..fa75f29d6 100644 --- a/ceph/qa/suites/kcephfs/cephfs/tasks/kclient_workunit_suites_ffsb.yaml +++ b/ceph/qa/suites/kcephfs/cephfs/tasks/kclient_workunit_suites_ffsb.yaml @@ -3,6 +3,9 @@ overrides: log-whitelist: - SLOW_OPS - slow request + conf: + osd: + osd heartbeat grace: 60 tasks: - workunit: clients: diff --git a/ceph/qa/suites/kcephfs/thrash/workloads/kclient_workunit_suites_ffsb.yaml b/ceph/qa/suites/kcephfs/thrash/workloads/kclient_workunit_suites_ffsb.yaml index 53e74bea3..1f2dd88c6 100644 --- a/ceph/qa/suites/kcephfs/thrash/workloads/kclient_workunit_suites_ffsb.yaml +++ b/ceph/qa/suites/kcephfs/thrash/workloads/kclient_workunit_suites_ffsb.yaml @@ -6,6 +6,7 @@ overrides: conf: osd: filestore flush min: 0 + osd heartbeat grace: 60 tasks: - workunit: clients: diff --git a/ceph/qa/suites/upgrade-clients/client-upgrade-nautilus-pacific/nautilus-client-x/.qa b/ceph/qa/suites/upgrade-clients/client-upgrade-nautilus-pacific/nautilus-client-x/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/upgrade-clients/client-upgrade-nautilus-pacific/nautilus-client-x/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/upgrade-clients/client-upgrade-nautilus-pacific/nautilus-client-x/rbd/% b/ceph/qa/suites/upgrade-clients/client-upgrade-nautilus-pacific/nautilus-client-x/rbd/% new file mode 100644 index 000000000..e69de29bb diff --git a/ceph/qa/suites/upgrade-clients/client-upgrade-nautilus-pacific/nautilus-client-x/rbd/.qa b/ceph/qa/suites/upgrade-clients/client-upgrade-nautilus-pacific/nautilus-client-x/rbd/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/upgrade-clients/client-upgrade-nautilus-pacific/nautilus-client-x/rbd/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/upgrade-clients/client-upgrade-nautilus-pacific/nautilus-client-x/rbd/0-cluster/+ b/ceph/qa/suites/upgrade-clients/client-upgrade-nautilus-pacific/nautilus-client-x/rbd/0-cluster/+ new file mode 100644 index 000000000..e69de29bb diff --git a/ceph/qa/suites/upgrade-clients/client-upgrade-nautilus-pacific/nautilus-client-x/rbd/0-cluster/.qa b/ceph/qa/suites/upgrade-clients/client-upgrade-nautilus-pacific/nautilus-client-x/rbd/0-cluster/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/upgrade-clients/client-upgrade-nautilus-pacific/nautilus-client-x/rbd/0-cluster/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/upgrade-clients/client-upgrade-nautilus-pacific/nautilus-client-x/rbd/0-cluster/openstack.yaml b/ceph/qa/suites/upgrade-clients/client-upgrade-nautilus-pacific/nautilus-client-x/rbd/0-cluster/openstack.yaml new file mode 100644 index 000000000..b0f3b9b4d --- /dev/null +++ b/ceph/qa/suites/upgrade-clients/client-upgrade-nautilus-pacific/nautilus-client-x/rbd/0-cluster/openstack.yaml @@ -0,0 +1,4 @@ +openstack: + - volumes: # attached to each instance + count: 4 + size: 30 # GB diff --git a/ceph/qa/suites/upgrade-clients/client-upgrade-nautilus-pacific/nautilus-client-x/rbd/0-cluster/start.yaml b/ceph/qa/suites/upgrade-clients/client-upgrade-nautilus-pacific/nautilus-client-x/rbd/0-cluster/start.yaml new file mode 100644 index 000000000..93f3c4714 --- /dev/null +++ b/ceph/qa/suites/upgrade-clients/client-upgrade-nautilus-pacific/nautilus-client-x/rbd/0-cluster/start.yaml @@ -0,0 +1,21 @@ +meta: +- desc: | + Insatll and run ceph on one node, + with a separate client 1. + Upgrade client 1 to nautilus + Run tests against old cluster +roles: +- - mon.a + - mon.b + - mon.c + - osd.0 + - osd.1 + - osd.2 + - client.0 + - mgr.x +- - client.1 +overrides: + ceph: + log-whitelist: + - failed to encode map + fs: xfs diff --git a/ceph/qa/suites/upgrade-clients/client-upgrade-nautilus-pacific/nautilus-client-x/rbd/1-install/.qa b/ceph/qa/suites/upgrade-clients/client-upgrade-nautilus-pacific/nautilus-client-x/rbd/1-install/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/upgrade-clients/client-upgrade-nautilus-pacific/nautilus-client-x/rbd/1-install/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/upgrade-clients/client-upgrade-nautilus-pacific/nautilus-client-x/rbd/1-install/nautilus-client-x.yaml b/ceph/qa/suites/upgrade-clients/client-upgrade-nautilus-pacific/nautilus-client-x/rbd/1-install/nautilus-client-x.yaml new file mode 100644 index 000000000..ca53178ab --- /dev/null +++ b/ceph/qa/suites/upgrade-clients/client-upgrade-nautilus-pacific/nautilus-client-x/rbd/1-install/nautilus-client-x.yaml @@ -0,0 +1,11 @@ +tasks: +- install: + branch: nautilus + exclude_packages: ['ceph-mgr','libcephfs2','libcephfs-devel','libcephfs-dev','python34-cephfs','python34-rados'] +- print: "**** done install nautilus" +- install.upgrade: + exclude_packages: ['ceph-test', 'ceph-test-dbg','libcephfs1', 'python-ceph'] + client.1: +- print: "**** done install.upgrade to -x on client.1" +- ceph: +- print: "**** done ceph task" diff --git a/ceph/qa/suites/upgrade-clients/client-upgrade-nautilus-pacific/nautilus-client-x/rbd/2-features/.qa b/ceph/qa/suites/upgrade-clients/client-upgrade-nautilus-pacific/nautilus-client-x/rbd/2-features/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/upgrade-clients/client-upgrade-nautilus-pacific/nautilus-client-x/rbd/2-features/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/upgrade-clients/client-upgrade-nautilus-pacific/nautilus-client-x/rbd/2-features/defaults.yaml b/ceph/qa/suites/upgrade-clients/client-upgrade-nautilus-pacific/nautilus-client-x/rbd/2-features/defaults.yaml new file mode 100644 index 000000000..dff6623ad --- /dev/null +++ b/ceph/qa/suites/upgrade-clients/client-upgrade-nautilus-pacific/nautilus-client-x/rbd/2-features/defaults.yaml @@ -0,0 +1,6 @@ +overrides: + ceph: + conf: + client: + rbd default features: 61 + diff --git a/ceph/qa/suites/upgrade-clients/client-upgrade-nautilus-pacific/nautilus-client-x/rbd/2-features/layering.yaml b/ceph/qa/suites/upgrade-clients/client-upgrade-nautilus-pacific/nautilus-client-x/rbd/2-features/layering.yaml new file mode 100644 index 000000000..5613d0155 --- /dev/null +++ b/ceph/qa/suites/upgrade-clients/client-upgrade-nautilus-pacific/nautilus-client-x/rbd/2-features/layering.yaml @@ -0,0 +1,6 @@ +overrides: + ceph: + conf: + client: + rbd default features: 1 + diff --git a/ceph/qa/suites/upgrade-clients/client-upgrade-nautilus-pacific/nautilus-client-x/rbd/3-workload/.qa b/ceph/qa/suites/upgrade-clients/client-upgrade-nautilus-pacific/nautilus-client-x/rbd/3-workload/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/upgrade-clients/client-upgrade-nautilus-pacific/nautilus-client-x/rbd/3-workload/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/upgrade-clients/client-upgrade-nautilus-pacific/nautilus-client-x/rbd/3-workload/rbd_notification_tests.yaml b/ceph/qa/suites/upgrade-clients/client-upgrade-nautilus-pacific/nautilus-client-x/rbd/3-workload/rbd_notification_tests.yaml new file mode 100644 index 000000000..7ad393a6a --- /dev/null +++ b/ceph/qa/suites/upgrade-clients/client-upgrade-nautilus-pacific/nautilus-client-x/rbd/3-workload/rbd_notification_tests.yaml @@ -0,0 +1,34 @@ +tasks: +- parallel: + - workunit: + branch: nautilus + clients: + client.0: + - rbd/notify_master.sh + env: + RBD_FEATURES: "61" + - workunit: + branch: pacific + clients: + client.1: + - rbd/notify_slave.sh + env: + RBD_FEATURES: "61" + RBD_DISABLE_UPDATE_FEATURES: "1" +- print: "**** done rbd: old librbd -> new librbd" +- parallel: + - workunit: + branch: nautilus + clients: + client.0: + - rbd/notify_slave.sh + env: + RBD_FEATURES: "61" + - workunit: + branch: pacific + clients: + client.1: + - rbd/notify_master.sh + env: + RBD_FEATURES: "61" +- print: "**** done rbd: new librbd -> old librbd" diff --git a/ceph/qa/suites/upgrade-clients/client-upgrade-nautilus-pacific/nautilus-client-x/rbd/supported/.qa b/ceph/qa/suites/upgrade-clients/client-upgrade-nautilus-pacific/nautilus-client-x/rbd/supported/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/upgrade-clients/client-upgrade-nautilus-pacific/nautilus-client-x/rbd/supported/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/upgrade-clients/client-upgrade-nautilus-pacific/nautilus-client-x/rbd/supported/ubuntu_18.04.yaml b/ceph/qa/suites/upgrade-clients/client-upgrade-nautilus-pacific/nautilus-client-x/rbd/supported/ubuntu_18.04.yaml new file mode 120000 index 000000000..886e87fa2 --- /dev/null +++ b/ceph/qa/suites/upgrade-clients/client-upgrade-nautilus-pacific/nautilus-client-x/rbd/supported/ubuntu_18.04.yaml @@ -0,0 +1 @@ +../../../../../../distros/all/ubuntu_18.04.yaml \ No newline at end of file diff --git a/ceph/qa/suites/upgrade/nautilus-p2p/nautilus-p2p-parallel/point-to-point-upgrade.yaml b/ceph/qa/suites/upgrade/nautilus-p2p/nautilus-p2p-parallel/point-to-point-upgrade.yaml index b25b51761..c2e78d3a0 100644 --- a/ceph/qa/suites/upgrade/nautilus-p2p/nautilus-p2p-parallel/point-to-point-upgrade.yaml +++ b/ceph/qa/suites/upgrade/nautilus-p2p/nautilus-p2p-parallel/point-to-point-upgrade.yaml @@ -1,9 +1,9 @@ meta: - desc: | Run ceph on two nodes, using one of them as a client, - with a separate client-only node. + with a separate client-only node. Use xfs beneath the osds. - install ceph/nautilus v14.2.2 point version + install ceph/nautilus v14.2.20 point version run workload and upgrade-sequence in parallel (every point reslease should be tested) run workload and upgrade-sequence in parallel @@ -32,8 +32,8 @@ overrides: - cache pools at or near target size - filesystem is degraded - OBJECT_MISPLACED - ### ref: https://tracker.ceph.com/issues/40251 - #removed see ^ - failed to encode map + ### ref: https://tracker.ceph.com/issues/40251 + #removed see ^ - failed to encode map fs: xfs @@ -46,7 +46,7 @@ overrides: osd: osd map max advance: 1000 osd_class_default_list: "*" - osd_class_load_list: "*" + osd_class_load_list: "*" client: rgw_crypt_require_ssl: false rgw crypt s3 kms encryption keys: testkey-1=YmluCmJvb3N0CmJvb3N0LWJ1aWxkCmNlcGguY29uZgo= testkey-2=aWIKTWFrZWZpbGUKbWFuCm91dApzcmMKVGVzdGluZwo= @@ -70,19 +70,19 @@ openstack: size: 30 # GB tasks: # v14.2.0 removed per http://tracker.ceph.com/issues/40251 -- print: "**** done nautilus v14.2.2 about to install" +- print: "**** done nautilus v14.2.20 about to install" - install: - tag: v14.2.2 + tag: v14.2.20 # line below can be removed its from jewel test #exclude_packages: ['ceph-mgr','libcephfs2','libcephfs-devel','libcephfs-dev', 'librgw2'] -- print: "**** done v14.2.2 install" +- print: "**** done v14.2.20 install" - ceph: fs: xfs add_osds_to_crush: true - print: "**** done ceph xfs" - sequential: - workload -- print: "**** done workload v14.2.2" +- print: "**** done workload v14.2.20" # v14.2.1 removed per http://tracker.ceph.com/issues/40251 diff --git a/ceph/qa/suites/upgrade/nautilus-p2p/nautilus-p2p-stress-split/1-ceph-install/nautilus.yaml b/ceph/qa/suites/upgrade/nautilus-p2p/nautilus-p2p-stress-split/1-ceph-install/nautilus.yaml index 75dc71fe9..7629a3b19 100644 --- a/ceph/qa/suites/upgrade/nautilus-p2p/nautilus-p2p-stress-split/1-ceph-install/nautilus.yaml +++ b/ceph/qa/suites/upgrade/nautilus-p2p/nautilus-p2p-stress-split/1-ceph-install/nautilus.yaml @@ -1,11 +1,11 @@ meta: -- desc: install ceph/nautilus v14.2.2 +- desc: install ceph/nautilus v14.2.20 tasks: - install: - tag: v14.2.2 + tag: v14.2.20 exclude_packages: ['librados3'] extra_packages: ['librados2'] -- print: "**** done install nautilus v14.2.2" +- print: "**** done install nautilus v14.2.20" - ceph: - exec: osd.0: diff --git a/ceph/qa/suites/upgrade/nautilus-p2p/nautilus-p2p-stress-split/7-final-workload/rbd-python.yaml b/ceph/qa/suites/upgrade/nautilus-p2p/nautilus-p2p-stress-split/7-final-workload/rbd-python.yaml index dbfde7f72..31462988a 100644 --- a/ceph/qa/suites/upgrade/nautilus-p2p/nautilus-p2p-stress-split/7-final-workload/rbd-python.yaml +++ b/ceph/qa/suites/upgrade/nautilus-p2p/nautilus-p2p-stress-split/7-final-workload/rbd-python.yaml @@ -3,7 +3,7 @@ meta: librbd python api tests tasks: - workunit: - tag: v14.2.10 + tag: v14.2.20 clients: client.0: - rbd/test_librbd_python.sh diff --git a/ceph/qa/tasks/ceph_manager.py b/ceph/qa/tasks/ceph_manager.py index 3e1a2ec51..1117205fd 100644 --- a/ceph/qa/tasks/ceph_manager.py +++ b/ceph/qa/tasks/ceph_manager.py @@ -3,6 +3,7 @@ ceph manager -- Thrasher and CephManager objects """ from functools import wraps import contextlib +import errno import random import signal import time @@ -2560,13 +2561,22 @@ class CephManager: Loop until quorum size is reached. """ self.log('waiting for quorum size %d' % size) - start = time.time() - while not len(self.get_mon_quorum()) == size: - if timeout is not None: - assert time.time() - start < timeout, \ - ('failed to reach quorum size %d ' - 'before timeout expired' % size) - time.sleep(3) + sleep = 3 + with safe_while(sleep=sleep, + tries=timeout // sleep, + action=f'wait for quorum size {size}') as proceed: + while proceed(): + try: + if len(self.get_mon_quorum()) == size: + break + except CommandFailedError as e: + # could fail instea4d of blocked if the rotating key of the + # connected monitor is not updated yet after they form the + # quorum + if e.exitstatus == errno.EACCES: + pass + else: + raise self.log("quorum is size %d" % size) def get_mon_health(self, debug=False): diff --git a/ceph/qa/tasks/cephfs/cephfs_test_case.py b/ceph/qa/tasks/cephfs/cephfs_test_case.py index f901f44ba..8136d4d5a 100644 --- a/ceph/qa/tasks/cephfs/cephfs_test_case.py +++ b/ceph/qa/tasks/cephfs/cephfs_test_case.py @@ -176,6 +176,9 @@ class CephFSTestCase(CephTestCase): for m in self.mounts: m.teardown() + # To prevent failover messages during Unwind of ceph task + self.mds_cluster.delete_all_filesystems() + for i, m in enumerate(self.mounts): m.client_id = self._original_client_ids[i] diff --git a/ceph/qa/tasks/cephfs/test_scrub_checks.py b/ceph/qa/tasks/cephfs/test_scrub_checks.py index 54ed16ffa..58661621b 100644 --- a/ceph/qa/tasks/cephfs/test_scrub_checks.py +++ b/ceph/qa/tasks/cephfs/test_scrub_checks.py @@ -6,6 +6,7 @@ import logging import errno import time from teuthology.exceptions import CommandFailedError +from teuthology.contextutil import safe_while import os from tasks.cephfs.cephfs_test_case import CephFSTestCase @@ -30,22 +31,46 @@ class TestScrubControls(CephFSTestCase): self.assertEqual(res['return_code'], expected) def _get_scrub_status(self): return self.fs.rank_tell(["scrub", "status"]) - def _check_task_status(self, expected_status): - task_status = self.fs.get_task_status("scrub status") - active = self.fs.get_active_names() - log.debug("current active={0}".format(active)) - self.assertTrue(task_status[active[0]].startswith(expected_status)) + def _check_task_status(self, expected_status, timo=120): + """ check scrub status for current active mds in ceph status """ + with safe_while(sleep=1, tries=120, action='wait for task status') as proceed: + while proceed(): + active = self.fs.get_active_names() + log.debug("current active={0}".format(active)) + task_status = self.fs.get_task_status("scrub status") + try: + if task_status[active[0]].startswith(expected_status): + return True + except KeyError: + pass + + def _check_task_status_na(self, timo=120): + """ check absence of scrub status in ceph status """ + with safe_while(sleep=1, tries=120, action='wait for task status') as proceed: + while proceed(): + active = self.fs.get_active_names() + log.debug("current active={0}".format(active)) + task_status = self.fs.get_task_status("scrub status") + if not active[0] in task_status: + return True + + def create_scrub_data(self, test_dir): + for i in range(32): + dirname = "dir.{0}".format(i) + dirpath = os.path.join(test_dir, dirname) + self.mount_a.run_shell_payload(f""" +set -e +mkdir -p {dirpath} +for ((i = 0; i < 32; i++)); do + dd if=/dev/urandom of={dirpath}/filename.$i bs=1M conv=fdatasync count=1 +done +""") def test_scrub_abort(self): test_dir = "scrub_control_test_path" abs_test_path = "/{0}".format(test_dir) - log.info("mountpoint: {0}".format(self.mount_a.mountpoint)) - client_path = os.path.join(self.mount_a.mountpoint, test_dir) - log.info("client_path: {0}".format(client_path)) - - log.info("Cloning repo into place") - TestScrubChecks.clone_repo(self.mount_a, client_path) + self.create_scrub_data(test_dir) out_json = self.fs.rank_tell(["scrub", "start", abs_test_path, "recursive"]) self.assertNotEqual(out_json, None) @@ -56,8 +81,8 @@ class TestScrubControls(CephFSTestCase): self.assertTrue("no active" in out_json['status']) # sleep enough to fetch updated task status - time.sleep(10) - self._check_task_status("idle") + checked = self._check_task_status_na() + self.assertTrue(checked) def test_scrub_pause_and_resume(self): test_dir = "scrub_control_test_path" @@ -67,8 +92,7 @@ class TestScrubControls(CephFSTestCase): client_path = os.path.join(self.mount_a.mountpoint, test_dir) log.info("client_path: {0}".format(client_path)) - log.info("Cloning repo into place") - _ = TestScrubChecks.clone_repo(self.mount_a, client_path) + self.create_scrub_data(test_dir) out_json = self.fs.rank_tell(["scrub", "start", abs_test_path, "recursive"]) self.assertNotEqual(out_json, None) @@ -78,25 +102,22 @@ class TestScrubControls(CephFSTestCase): out_json = self._get_scrub_status() self.assertTrue("PAUSED" in out_json['status']) - # sleep enough to fetch updated task status - time.sleep(10) - self._check_task_status("paused") + checked = self._check_task_status("paused") + self.assertTrue(checked) # resume and verify self._resume_scrub(0) out_json = self._get_scrub_status() self.assertFalse("PAUSED" in out_json['status']) + checked = self._check_task_status_na() + self.assertTrue(checked) + def test_scrub_pause_and_resume_with_abort(self): test_dir = "scrub_control_test_path" abs_test_path = "/{0}".format(test_dir) - log.info("mountpoint: {0}".format(self.mount_a.mountpoint)) - client_path = os.path.join(self.mount_a.mountpoint, test_dir) - log.info("client_path: {0}".format(client_path)) - - log.info("Cloning repo into place") - _ = TestScrubChecks.clone_repo(self.mount_a, client_path) + self.create_scrub_data(test_dir) out_json = self.fs.rank_tell(["scrub", "start", abs_test_path, "recursive"]) self.assertNotEqual(out_json, None) @@ -106,9 +127,8 @@ class TestScrubControls(CephFSTestCase): out_json = self._get_scrub_status() self.assertTrue("PAUSED" in out_json['status']) - # sleep enough to fetch updated task status - time.sleep(10) - self._check_task_status("paused") + checked = self._check_task_status("paused") + self.assertTrue(checked) # abort and verify self._abort_scrub(0) @@ -116,26 +136,37 @@ class TestScrubControls(CephFSTestCase): self.assertTrue("PAUSED" in out_json['status']) self.assertTrue("0 inodes" in out_json['status']) - # sleep enough to fetch updated task status - time.sleep(10) - self._check_task_status("paused") + # scrub status should still be paused... + checked = self._check_task_status("paused") + self.assertTrue(checked) # resume and verify self._resume_scrub(0) out_json = self._get_scrub_status() self.assertTrue("no active" in out_json['status']) - # sleep enough to fetch updated task status - time.sleep(10) - self._check_task_status("idle") + checked = self._check_task_status_na() + self.assertTrue(checked) def test_scrub_task_status_on_mds_failover(self): - # sleep enough to fetch updated task status - time.sleep(10) - (original_active, ) = self.fs.get_active_names() original_standbys = self.mds_cluster.get_standby_daemons() - self._check_task_status("idle") + + test_dir = "scrub_control_test_path" + abs_test_path = "/{0}".format(test_dir) + + self.create_scrub_data(test_dir) + + out_json = self.fs.rank_tell(["scrub", "start", abs_test_path, "recursive"]) + self.assertNotEqual(out_json, None) + + # pause and verify + self._pause_scrub(0) + out_json = self._get_scrub_status() + self.assertTrue("PAUSED" in out_json['status']) + + checked = self._check_task_status("paused") + self.assertTrue(checked) # Kill the rank 0 self.fs.mds_stop(original_active) @@ -150,12 +181,7 @@ class TestScrubControls(CephFSTestCase): original_standbys)) self.wait_until_true(promoted, timeout=grace*2) - mgr_beacon_grace = float(self.fs.get_config("mgr_service_beacon_grace", service_type="mon")) - - def status_check(): - task_status = self.fs.get_task_status("scrub status") - return original_active not in task_status - self.wait_until_true(status_check, timeout=mgr_beacon_grace*2) + self._check_task_status_na() class TestScrubChecks(CephFSTestCase): """ diff --git a/ceph/qa/tasks/cephfs/test_volume_client.py b/ceph/qa/tasks/cephfs/test_volume_client.py index 7f66218c5..2478955b9 100644 --- a/ceph/qa/tasks/cephfs/test_volume_client.py +++ b/ceph/qa/tasks/cephfs/test_volume_client.py @@ -562,6 +562,9 @@ vc.disconnect() self.mount_a.run_shell(["touch", os.path.join(mount_path, "noperms")]) self.mount_a.run_shell(["chmod", "0000", os.path.join(mount_path, "noperms")]) + # A folder with non-ascii characters + self.mount_a.run_shell(["mkdir", os.path.join(mount_path, u"f\u00F6n")]) + self._volume_client_python(self.mount_b, dedent(""" vp = VolumePath("{group_id}", u"{volume_id}") vc.delete_volume(vp) diff --git a/ceph/qa/tasks/cephfs/test_volumes.py b/ceph/qa/tasks/cephfs/test_volumes.py index 67f138f87..778ad83d3 100644 --- a/ceph/qa/tasks/cephfs/test_volumes.py +++ b/ceph/qa/tasks/cephfs/test_volumes.py @@ -3649,6 +3649,48 @@ class TestVolumes(CephFSTestCase): # verify trash dir is clean self._wait_for_trash_empty() + def test_subvolume_snapshot_clone_retain_suid_guid(self): + subvolume = self._generate_random_subvolume_name() + snapshot = self._generate_random_snapshot_name() + clone = self._generate_random_clone_name() + + # create subvolume + self._fs_cmd("subvolume", "create", self.volname, subvolume) + + # Create a file with suid, guid bits set along with executable bit. + args = ["subvolume", "getpath", self.volname, subvolume] + args = tuple(args) + subvolpath = self._fs_cmd(*args) + self.assertNotEqual(subvolpath, None) + subvolpath = subvolpath[1:].rstrip() # remove "/" prefix and any trailing newline + + file_path = subvolpath + file_path = os.path.join(subvolpath, "test_suid_file") + self.mount_a.run_shell(["touch", file_path]) + self.mount_a.run_shell(["chmod", "u+sx,g+sx", file_path]) + + # snapshot subvolume + self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot) + + # schedule a clone + self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone) + + # check clone status + self._wait_for_clone_to_complete(clone) + + # verify clone + self._verify_clone(subvolume, snapshot, clone) + + # remove snapshot + self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot) + + # remove subvolumes + self._fs_cmd("subvolume", "rm", self.volname, subvolume) + self._fs_cmd("subvolume", "rm", self.volname, clone) + + # verify trash dir is clean + self._wait_for_trash_empty() + def test_subvolume_snapshot_reconf_max_concurrent_clones(self): """ Validate 'max_concurrent_clones' config option diff --git a/ceph/qa/tasks/mgr/dashboard/test_rgw.py b/ceph/qa/tasks/mgr/dashboard/test_rgw.py index 9e7811420..8baaf0565 100644 --- a/ceph/qa/tasks/mgr/dashboard/test_rgw.py +++ b/ceph/qa/tasks/mgr/dashboard/test_rgw.py @@ -107,6 +107,16 @@ class RgwApiCredentialsTest(RgwTestCase): data['message']) +class RgwSiteTest(RgwTestCase): + + AUTH_ROLES = ['rgw-manager'] + + def test_get_realms(self): + data = self._get('/api/rgw/site?query=realms') + self.assertStatus(200) + self.assertSchema(data, JList(str)) + + class RgwBucketTest(RgwTestCase): AUTH_ROLES = ['rgw-manager'] diff --git a/ceph/qa/tasks/mgr/test_progress.py b/ceph/qa/tasks/mgr/test_progress.py index 8c06dd0e0..25a9f3764 100644 --- a/ceph/qa/tasks/mgr/test_progress.py +++ b/ceph/qa/tasks/mgr/test_progress.py @@ -166,8 +166,7 @@ class TestProgress(MgrTestCase): # Wait for a progress event to pop up self.wait_until_equal(lambda: self._osd_in_out_events_count('out'), 1, - timeout=self.EVENT_CREATION_PERIOD*2, - period=1) + timeout=self.EVENT_CREATION_PERIOD*2) ev = self._get_osd_in_out_events('out')[0] log.info(json.dumps(ev, indent=1)) self.assertIn("Rebalancing after osd.0 marked out", ev['message']) @@ -182,13 +181,12 @@ class TestProgress(MgrTestCase): # First Event should complete promptly self.wait_until_true(lambda: self._is_complete(initial_event['id']), - timeout=self.EVENT_CREATION_PERIOD) - + timeout=self.RECOVERY_PERIOD) + try: # Wait for progress event marked in to pop up self.wait_until_equal(lambda: self._osd_in_out_events_count('in'), 1, - timeout=self.EVENT_CREATION_PERIOD*2, - period=1) + timeout=self.EVENT_CREATION_PERIOD*2) except RuntimeError as ex: if not "Timed out after" in str(ex): raise ex @@ -261,7 +259,7 @@ class TestProgress(MgrTestCase): # Event should complete promptly self.wait_until_true(lambda: self._is_complete(ev['id']), - timeout=self.EVENT_CREATION_PERIOD) + timeout=self.RECOVERY_PERIOD) self.assertTrue(self._is_quiet()) def test_osd_came_back(self): @@ -274,10 +272,11 @@ class TestProgress(MgrTestCase): ev1 = self._simulate_failure() ev2 = self._simulate_back_in([0], ev1) - - # Wait for progress event to ultimately complete - self.wait_until_true(lambda: self._is_complete(ev2['id']), - timeout=self.RECOVERY_PERIOD) + + if ev2 is not None: + # Wait for progress event to ultimately complete + self.wait_until_true(lambda: self._is_complete(ev2['id']), + timeout=self.RECOVERY_PERIOD) self.assertTrue(self._is_quiet()) @@ -364,8 +363,8 @@ class TestProgress(MgrTestCase): 'osd', 'out', '0') # Wait for a progress event to pop up - self.wait_until_equal(lambda: len(self._all_events()), 1, - timeout=self.EVENT_CREATION_PERIOD*2) + self.wait_until_equal(lambda: self._osd_in_out_events_count('out'), 1, + timeout=self.RECOVERY_PERIOD) ev = self._all_events()[0] diff --git a/ceph/qa/tasks/userdata_setup.yaml b/ceph/qa/tasks/userdata_setup.yaml index 7271925c9..9aa2d0396 100644 --- a/ceph/qa/tasks/userdata_setup.yaml +++ b/ceph/qa/tasks/userdata_setup.yaml @@ -14,6 +14,8 @@ #!/usr/bin/env bash # mount a NFS share for storing logs + sed -i 's/archive.ubuntu.com/old-releases.ubuntu.com/' /etc/apt/sources.list + sed -i 's/security.ubuntu.com/old-releases.ubuntu.com/' /etc/apt/sources.list apt-get update apt-get -y install nfs-common mkdir /mnt/log diff --git a/ceph/qa/tasks/vstart_runner.py b/ceph/qa/tasks/vstart_runner.py index cfbaad782..eaeba38dd 100644 --- a/ceph/qa/tasks/vstart_runner.py +++ b/ceph/qa/tasks/vstart_runner.py @@ -298,6 +298,7 @@ class LocalRemote(object): stderr=subprocess.PIPE, stdin=subprocess.PIPE, cwd=cwd, + env=env, shell=True) else: # Sanity check that we've got a list of strings @@ -917,7 +918,9 @@ class LocalContext(object): self.daemons.daemons[prefixed_type][svc_id] = LocalDaemon(svc_type, svc_id) def __del__(self): - shutil.rmtree(self.teuthology_config['test_path']) + path = self.teuthology_config['test_path'] + if path is not None: + shutil.rmtree(path) def exec_test(): # Parse arguments diff --git a/ceph/qa/workunits/rbd/cli_generic.sh b/ceph/qa/workunits/rbd/cli_generic.sh index 7f44d932d..3f5c2fd45 100755 --- a/ceph/qa/workunits/rbd/cli_generic.sh +++ b/ceph/qa/workunits/rbd/cli_generic.sh @@ -485,21 +485,148 @@ test_purge() { echo "testing trash purge..." remove_images + rbd trash ls | wc -l | grep 0 + rbd trash purge + + rbd create $RBD_CREATE_ARGS --size 256 testimg1 + rbd create $RBD_CREATE_ARGS --size 256 testimg2 + rbd trash mv testimg1 + rbd trash mv testimg2 + rbd trash ls | wc -l | grep 2 rbd trash purge rbd trash ls | wc -l | grep 0 - rbd create $RBD_CREATE_ARGS foo -s 1 - rbd create $RBD_CREATE_ARGS bar -s 1 + rbd create $RBD_CREATE_ARGS --size 256 testimg1 + rbd create $RBD_CREATE_ARGS --size 256 testimg2 + rbd trash mv testimg1 --expires-at "1 hour" + rbd trash mv testimg2 --expires-at "3 hours" + rbd trash ls | wc -l | grep 2 + rbd trash purge + rbd trash ls | wc -l | grep 2 + rbd trash purge --expired-before "now + 2 hours" + rbd trash ls | wc -l | grep 1 + rbd trash ls | grep testimg2 + rbd trash purge --expired-before "now + 4 hours" + rbd trash ls | wc -l | grep 0 - rbd trash mv foo --expires-at "10 sec" - rbd trash mv bar --expires-at "30 sec" + rbd create $RBD_CREATE_ARGS --size 256 testimg1 + rbd snap create testimg1@snap # pin testimg1 + rbd create $RBD_CREATE_ARGS --size 256 testimg2 + rbd create $RBD_CREATE_ARGS --size 256 testimg3 + rbd trash mv testimg1 + rbd trash mv testimg2 + rbd trash mv testimg3 + rbd trash ls | wc -l | grep 3 + rbd trash purge 2>&1 | grep 'some expired images could not be removed' + rbd trash ls | wc -l | grep 1 + rbd trash ls | grep testimg1 + ID=$(rbd trash ls | awk '{ print $1 }') + rbd snap purge --image-id $ID + rbd trash purge + rbd trash ls | wc -l | grep 0 - rbd trash purge --expired-before "now + 10 sec" - rbd trash ls | grep -v foo | wc -l | grep 1 - rbd trash ls | grep bar + rbd create $RBD_CREATE_ARGS --size 256 testimg1 + rbd create $RBD_CREATE_ARGS --size 256 testimg2 + rbd snap create testimg2@snap # pin testimg2 + rbd create $RBD_CREATE_ARGS --size 256 testimg3 + rbd trash mv testimg1 + rbd trash mv testimg2 + rbd trash mv testimg3 + rbd trash ls | wc -l | grep 3 + rbd trash purge 2>&1 | grep 'some expired images could not be removed' + rbd trash ls | wc -l | grep 1 + rbd trash ls | grep testimg2 + ID=$(rbd trash ls | awk '{ print $1 }') + rbd snap purge --image-id $ID + rbd trash purge + rbd trash ls | wc -l | grep 0 - LAST_IMG=$(rbd trash ls | grep bar | awk '{print $1;}') - rbd trash rm $LAST_IMG --force --no-progress | grep -v '.' | wc -l | grep 0 + rbd create $RBD_CREATE_ARGS --size 256 testimg1 + rbd create $RBD_CREATE_ARGS --size 256 testimg2 + rbd create $RBD_CREATE_ARGS --size 256 testimg3 + rbd snap create testimg3@snap # pin testimg3 + rbd trash mv testimg1 + rbd trash mv testimg2 + rbd trash mv testimg3 + rbd trash ls | wc -l | grep 3 + rbd trash purge 2>&1 | grep 'some expired images could not be removed' + rbd trash ls | wc -l | grep 1 + rbd trash ls | grep testimg3 + ID=$(rbd trash ls | awk '{ print $1 }') + rbd snap purge --image-id $ID + rbd trash purge + rbd trash ls | wc -l | grep 0 + + # test purging a clone with a chain of parents + rbd create $RBD_CREATE_ARGS --size 256 testimg1 + rbd snap create testimg1@snap + rbd clone --rbd-default-clone-format=2 testimg1@snap testimg2 + rbd snap rm testimg1@snap + rbd create $RBD_CREATE_ARGS --size 256 testimg3 + rbd snap create testimg2@snap + rbd clone --rbd-default-clone-format=2 testimg2@snap testimg4 + rbd clone --rbd-default-clone-format=2 testimg2@snap testimg5 + rbd snap rm testimg2@snap + rbd snap create testimg4@snap + rbd clone --rbd-default-clone-format=2 testimg4@snap testimg6 + rbd snap rm testimg4@snap + rbd trash mv testimg1 + rbd trash mv testimg2 + rbd trash mv testimg3 + rbd trash mv testimg4 + rbd trash ls | wc -l | grep 4 + rbd trash purge 2>&1 | grep 'some expired images could not be removed' + rbd trash ls | wc -l | grep 3 + rbd trash ls | grep testimg1 + rbd trash ls | grep testimg2 + rbd trash ls | grep testimg4 + rbd trash mv testimg6 + rbd trash ls | wc -l | grep 4 + rbd trash purge 2>&1 | grep 'some expired images could not be removed' + rbd trash ls | wc -l | grep 2 + rbd trash ls | grep testimg1 + rbd trash ls | grep testimg2 + rbd trash mv testimg5 + rbd trash ls | wc -l | grep 3 + rbd trash purge + rbd trash ls | wc -l | grep 0 + + rbd create $RBD_CREATE_ARGS --size 256 testimg1 + rbd snap create testimg1@snap + rbd clone --rbd-default-clone-format=2 testimg1@snap testimg2 + rbd snap rm testimg1@snap + rbd create $RBD_CREATE_ARGS --size 256 testimg3 + rbd snap create testimg3@snap # pin testimg3 + rbd snap create testimg2@snap + rbd clone --rbd-default-clone-format=2 testimg2@snap testimg4 + rbd clone --rbd-default-clone-format=2 testimg2@snap testimg5 + rbd snap rm testimg2@snap + rbd snap create testimg4@snap + rbd clone --rbd-default-clone-format=2 testimg4@snap testimg6 + rbd snap rm testimg4@snap + rbd trash mv testimg1 + rbd trash mv testimg2 + rbd trash mv testimg3 + rbd trash mv testimg4 + rbd trash ls | wc -l | grep 4 + rbd trash purge 2>&1 | grep 'some expired images could not be removed' + rbd trash ls | wc -l | grep 4 + rbd trash mv testimg6 + rbd trash ls | wc -l | grep 5 + rbd trash purge 2>&1 | grep 'some expired images could not be removed' + rbd trash ls | wc -l | grep 3 + rbd trash ls | grep testimg1 + rbd trash ls | grep testimg2 + rbd trash ls | grep testimg3 + rbd trash mv testimg5 + rbd trash ls | wc -l | grep 4 + rbd trash purge 2>&1 | grep 'some expired images could not be removed' + rbd trash ls | wc -l | grep 1 + rbd trash ls | grep testimg3 + ID=$(rbd trash ls | awk '{ print $1 }') + rbd snap purge --image-id $ID + rbd trash purge + rbd trash ls | wc -l | grep 0 } test_deep_copy_clone() { diff --git a/ceph/qa/workunits/rgw/test_rgw_orphan_list.sh b/ceph/qa/workunits/rgw/test_rgw_orphan_list.sh index 4299078a1..34d550cea 100755 --- a/ceph/qa/workunits/rgw/test_rgw_orphan_list.sh +++ b/ceph/qa/workunits/rgw/test_rgw_orphan_list.sh @@ -1,12 +1,13 @@ #!/usr/bin/env bash -set -ex +# set -x +set -e # if defined, debug messages will be displayed and prepended with the string # debug="DEBUG" -huge_size=2222 # in megabytes -big_size=6 # in megabytes +huge_size=5100 # in megabytes +big_size=7 # in megabytes huge_obj=/tmp/huge_obj.temp.$$ big_obj=/tmp/big_obj.temp.$$ @@ -160,7 +161,6 @@ mys3uploadkill() { exit 1 fi - set -v local_file="$1" remote_bkt="$2" remote_obj="$3" @@ -229,8 +229,16 @@ mys3cmd ls s3://multipart-bkt bkt="incomplete-mp-bkt-1" mys3cmd mb s3://$bkt -mys3uploadkill $huge_obj $bkt incomplete-mp-obj-1 $fifo 20 -mys3uploadkill $huge_obj $bkt incomplete-mp-obj-2 $fifo 100 + +mys3uploadkill $huge_obj $bkt incomplete-mp-obj-c $fifo 20 + +# generate an incomplete multipart with more than 1,000 parts +mys3uploadkill $huge_obj $bkt incomplete-mp-obj-b $fifo 1005 + +# generate more than 1000 incomplet multiparts +for c in $(seq 1005) ;do + mys3uploadkill $huge_obj $bkt incomplete-mp-obj-c-$c $fifo 3 +done #################################### # resharded bucket diff --git a/ceph/run-make-check.sh b/ceph/run-make-check.sh index 1927c4594..2bab63a4a 100755 --- a/ceph/run-make-check.sh +++ b/ceph/run-make-check.sh @@ -22,6 +22,10 @@ source src/script/run-make.sh set -e +function in_jenkins() { + test -n "$JENKINS_HOME" +} + function run() { # to prevent OSD EMFILE death on tests, make sure ulimit >= 1024 $DRY_RUN ulimit -n $(ulimit -Hn) @@ -35,9 +39,16 @@ function run() { $DRY_RUN sudo /sbin/sysctl -q -w fs.aio-max-nr=$((65536 * 16)) CHECK_MAKEOPTS=${CHECK_MAKEOPTS:-$DEFAULT_MAKEOPTS} - if ! $DRY_RUN ctest $CHECK_MAKEOPTS --output-on-failure; then - rm -fr ${TMPDIR:-/tmp}/ceph-asok.* - return 1 + if in_jenkins; then + if ! ctest $CHECK_MAKEOPTS --no-compress-output --output-on-failure -T Test; then + # do not return failure, as the jenkins publisher will take care of this + rm -fr ${TMPDIR:-/tmp}/ceph-asok.* + fi + else + if ! $DRY_RUN ctest $CHECK_MAKEOPTS --output-on-failure; then + rm -fr ${TMPDIR:-/tmp}/ceph-asok.* + return 1 + fi fi } diff --git a/ceph/src/.git_version b/ceph/src/.git_version index 090470f4a..13fb220f6 100644 --- a/ceph/src/.git_version +++ b/ceph/src/.git_version @@ -1,2 +1,2 @@ -36274af6eb7f2a5055f2d53ad448f2694e9046a0 -v14.2.20 +ca74598065096e6fcbd8433c8779a2be0c889351 +v14.2.22 diff --git a/ceph/src/CMakeLists.txt b/ceph/src/CMakeLists.txt index f6a97ba15..ed4bbc75d 100644 --- a/ceph/src/CMakeLists.txt +++ b/ceph/src/CMakeLists.txt @@ -401,7 +401,7 @@ target_link_libraries(ceph-common ${ceph_common_deps}) # appease dpkg-shlibdeps set_target_properties(ceph-common PROPERTIES SOVERSION 0 - INSTALL_RPATH "") + SKIP_RPATH TRUE) if(NOT APPLE AND NOT FREEBSD) # Apple uses Mach-O, not ELF. so this option does not apply to APPLE. # @@ -692,6 +692,12 @@ if(WITH_RBD) add_subdirectory(rbd_replay) endif(WITH_RBD) +if(WITH_BOOST_CONTEXT) + set(SPAWN_BUILD_TESTS OFF CACHE INTERNAL "disable building of spawn unit tests") + set(SPAWN_INSTALL OFF CACHE INTERNAL "disable installation of spawn headers") + add_subdirectory(spawn) +endif() + # RadosGW if(WITH_KVS) add_subdirectory(key_value_store) diff --git a/ceph/src/ceph-volume/ceph_volume/api/lvm.py b/ceph/src/ceph-volume/ceph_volume/api/lvm.py index 30362f1bd..e4b932b80 100644 --- a/ceph/src/ceph-volume/ceph_volume/api/lvm.py +++ b/ceph/src/ceph-volume/ceph_volume/api/lvm.py @@ -1134,3 +1134,15 @@ def get_device_lvs(device, name_prefix=''): lvs = _output_parser(stdout, LV_FIELDS) return [Volume(**lv) for lv in lvs if lv['lv_name'] and lv['lv_name'].startswith(name_prefix)] + +def get_lv_by_fullname(full_name): + """ + returns LV by the specified LV's full name (formatted as vg_name/lv_name) + """ + try: + vg_name, lv_name = full_name.split('/') + res_lv = get_first_lv(filters={'lv_name': lv_name, + 'vg_name': vg_name}) + except ValueError: + res_lv = None + return res_lv diff --git a/ceph/src/ceph-volume/ceph_volume/devices/lvm/batch.py b/ceph/src/ceph-volume/ceph_volume/devices/lvm/batch.py index 40c0fea4e..114730ade 100644 --- a/ceph/src/ceph-volume/ceph_volume/devices/lvm/batch.py +++ b/ceph/src/ceph-volume/ceph_volume/devices/lvm/batch.py @@ -106,7 +106,7 @@ def get_physical_fast_allocs(devices, type_, fast_slots_per_device, new_osds, ar requested_slots = fast_slots_per_device requested_size = getattr(args, '{}_size'.format(type_), 0) - if requested_size == 0: + if not requested_size or requested_size == 0: # no size argument was specified, check ceph.conf get_size_fct = getattr(prepare, 'get_{}_size'.format(type_)) requested_size = get_size_fct(lv_format=False) @@ -126,6 +126,7 @@ def get_physical_fast_allocs(devices, type_, fast_slots_per_device, new_osds, ar if requested_size: if requested_size <= abs_size: abs_size = requested_size + relative_size = int(abs_size) / dev_size else: mlogger.error( '{} was requested for {}, but only {} can be fulfilled'.format( diff --git a/ceph/src/ceph-volume/ceph_volume/devices/lvm/main.py b/ceph/src/ceph-volume/ceph_volume/devices/lvm/main.py index 3ef3c1117..39947454d 100644 --- a/ceph/src/ceph-volume/ceph_volume/devices/lvm/main.py +++ b/ceph/src/ceph-volume/ceph_volume/devices/lvm/main.py @@ -9,6 +9,7 @@ from . import trigger from . import listing from . import zap from . import batch +from . import migrate class LVM(object): @@ -30,6 +31,9 @@ class LVM(object): 'trigger': trigger.Trigger, 'list': listing.List, 'zap': zap.Zap, + 'migrate': migrate.Migrate, + 'new-wal': migrate.NewWAL, + 'new-db': migrate.NewDB, } def __init__(self, argv): diff --git a/ceph/src/ceph-volume/ceph_volume/devices/lvm/migrate.py b/ceph/src/ceph-volume/ceph_volume/devices/lvm/migrate.py new file mode 100644 index 000000000..c76a21802 --- /dev/null +++ b/ceph/src/ceph-volume/ceph_volume/devices/lvm/migrate.py @@ -0,0 +1,674 @@ +from __future__ import print_function +import argparse +import logging +import os +from textwrap import dedent +from ceph_volume.util import system, disk, merge_dict +from ceph_volume.util.device import Device +from ceph_volume import decorators, terminal, process +from ceph_volume.api import lvm as api +from ceph_volume.systemd import systemctl + + +logger = logging.getLogger(__name__) +mlogger = terminal.MultiLogger(__name__) + +def get_cluster_name(osd_id, osd_fsid): + """ + From an ``osd_id`` and/or an ``osd_fsid``, filter out all the LVs in the + system that match those tag values, then return cluster_name for the first + one. + """ + lv_tags = {} + lv_tags['ceph.osd_id'] = osd_id + lv_tags['ceph.osd_fsid'] = osd_fsid + + lvs = api.get_lvs(tags=lv_tags) + if not lvs: + mlogger.error( + 'Unable to find any LV for source OSD: id:{} fsid:{}'.format( + osd_id, osd_fsid) ) + raise SystemExit('Unexpected error, terminating') + return next(iter(lvs)).tags["ceph.cluster_name"] + +def get_osd_path(osd_id, osd_fsid): + return '/var/lib/ceph/osd/{}-{}'.format( + get_cluster_name(osd_id, osd_fsid), osd_id) + +def find_associated_devices(osd_id, osd_fsid): + """ + From an ``osd_id`` and/or an ``osd_fsid``, filter out all the LVs in the + system that match those tag values, further detect if any partitions are + part of the OSD, and then return the set of LVs and partitions (if any). + """ + lv_tags = {} + lv_tags['ceph.osd_id'] = osd_id + lv_tags['ceph.osd_fsid'] = osd_fsid + + lvs = api.get_lvs(tags=lv_tags) + if not lvs: + mlogger.error( + 'Unable to find any LV for source OSD: id:{} fsid:{}'.format( + osd_id, osd_fsid) ) + raise SystemExit('Unexpected error, terminating') + + devices = set(ensure_associated_lvs(lvs, lv_tags)) + return [(Device(path), type) for path, type in devices if path] + +def ensure_associated_lvs(lvs, lv_tags): + """ + Go through each LV and ensure if backing devices (journal, wal, block) + are LVs or partitions, so that they can be accurately reported. + """ + # look for many LVs for each backing type, because it is possible to + # receive a filtering for osd.1, and have multiple failed deployments + # leaving many journals with osd.1 - usually, only a single LV will be + # returned + + block_lvs = api.get_lvs(tags=merge_dict(lv_tags, {'ceph.type': 'block'})) + db_lvs = api.get_lvs(tags=merge_dict(lv_tags, {'ceph.type': 'db'})) + wal_lvs = api.get_lvs(tags=merge_dict(lv_tags, {'ceph.type': 'wal'})) + backing_devices = [(block_lvs, 'block'), (db_lvs, 'db'), + (wal_lvs, 'wal')] + + verified_devices = [] + + for lv in lvs: + # go through each lv and append it, otherwise query `blkid` to find + # a physical device. Do this for each type (journal,db,wal) regardless + # if they have been processed in the previous LV, so that bad devices + # with the same ID can be caught + for ceph_lvs, type in backing_devices: + + if ceph_lvs: + verified_devices.extend([(l.lv_path, type) for l in ceph_lvs]) + continue + + # must be a disk partition, by querying blkid by the uuid we are + # ensuring that the device path is always correct + try: + device_uuid = lv.tags['ceph.{}_uuid'.format(type)] + except KeyError: + # Bluestore will not have ceph.journal_uuid, and Filestore + # will not not have ceph.db_uuid + continue + + osd_device = disk.get_device_from_partuuid(device_uuid) + if not osd_device: + # if the osd_device is not found by the partuuid, then it is + # not possible to ensure this device exists anymore, so skip it + continue + verified_devices.append((osd_device, type)) + + return verified_devices + +class VolumeTagTracker(object): + def __init__(self, devices, target_lv): + self.target_lv = target_lv + self.data_device = self.db_device = self.wal_device = None + for device, type in devices: + if type == 'block': + self.data_device = device + elif type == 'db': + self.db_device = device + elif type == 'wal': + self.wal_device = device + if not self.data_device: + mlogger.error('Data device not found') + raise SystemExit( + "Unexpected error, terminating") + if not self.data_device.is_lv: + mlogger.error('Data device isn\'t LVM') + raise SystemExit( + "Unexpected error, terminating") + + self.old_target_tags = self.target_lv.tags.copy() + self.old_data_tags = ( + self.data_device.lv_api.tags.copy() + if self.data_device.is_lv else None) + self.old_db_tags = ( + self.db_device.lv_api.tags.copy() + if self.db_device and self.db_device.is_lv else None) + self.old_wal_tags = ( + self.wal_device.lv_api.tags.copy() + if self.wal_device and self.wal_device.is_lv else None) + + def update_tags_when_lv_create(self, create_type): + tags = {} + if not self.data_device.is_lv: + mlogger.warning( + 'Data device is not LVM, wouldn\'t update LVM tags') + else: + tags["ceph.{}_uuid".format(create_type)] = self.target_lv.lv_uuid + tags["ceph.{}_device".format(create_type)] = self.target_lv.lv_path + self.data_device.lv_api.set_tags(tags) + + tags = self.data_device.lv_api.tags.copy() + tags["ceph.type"] = create_type + self.target_lv.set_tags(tags) + + aux_dev = None + if create_type == "db" and self.wal_device: + aux_dev = self.wal_device + elif create_type == "wal" and self.db_device: + aux_dev = self.db_device + else: + return + if not aux_dev.is_lv: + mlogger.warning( + '{} device is not LVM, wouldn\'t update LVM tags'.format( + create_type.upper())) + else: + tags = {} + tags["ceph.{}_uuid".format(create_type)] = self.target_lv.lv_uuid + tags["ceph.{}_device".format(create_type)] = self.target_lv.lv_path + aux_dev.lv_api.set_tags(tags) + + def remove_lvs(self, source_devices, target_type): + remaining_devices = [self.data_device, self.db_device, self.wal_device] + + outdated_tags = [] + for device, type in source_devices: + if type == "block" or type == target_type: + continue + remaining_devices.remove(device) + if device.is_lv: + outdated_tags.append("ceph.{}_uuid".format(type)) + outdated_tags.append("ceph.{}_device".format(type)) + device.lv_api.clear_tags() + if len(outdated_tags) > 0: + for d in remaining_devices: + if d and d.is_lv: + d.lv_api.clear_tags(outdated_tags) + + def replace_lvs(self, source_devices, target_type): + remaining_devices = [self.data_device] + if self.db_device: + remaining_devices.append(self.db_device) + if self.wal_device: + remaining_devices.append(self.wal_device) + + outdated_tags = [] + for device, type in source_devices: + if type == "block": + continue + remaining_devices.remove(device) + if device.is_lv: + outdated_tags.append("ceph.{}_uuid".format(type)) + outdated_tags.append("ceph.{}_device".format(type)) + device.lv_api.clear_tags() + + new_tags = {} + new_tags["ceph.{}_uuid".format(target_type)] = self.target_lv.lv_uuid + new_tags["ceph.{}_device".format(target_type)] = self.target_lv.lv_path + + for d in remaining_devices: + if d and d.is_lv: + if len(outdated_tags) > 0: + d.lv_api.clear_tags(outdated_tags) + d.lv_api.set_tags(new_tags) + + if not self.data_device.is_lv: + mlogger.warning( + 'Data device is not LVM, wouldn\'t properly update target LVM tags') + else: + tags = self.data_device.lv_api.tags.copy() + + tags["ceph.type"] = target_type + tags["ceph.{}_uuid".format(target_type)] = self.target_lv.lv_uuid + tags["ceph.{}_device".format(target_type)] = self.target_lv.lv_path + self.target_lv.set_tags(tags) + + def undo(self): + mlogger.info( + 'Undoing lv tag set') + if self.data_device: + if self.old_data_tags: + self.data_device.lv_api.set_tags(self.old_data_tags) + else: + self.data_device.lv_api.clear_tags() + if self.db_device: + if self.old_db_tags: + self.db_device.lv_api.set_tags(self.old_db_tags) + else: + self.db_device.lv_api.clear_tags() + if self.wal_device: + if self.old_wal_tags: + self.wal_device.lv_api.set_tags(self.old_wal_tags) + else: + self.wal_device.lv_api.clear_tags() + if self.old_target_tags: + self.target_lv.set_tags(self.old_target_tags) + else: + self.target_lv.clear_tags() + +class Migrate(object): + + help = 'Migrate BlueFS data from to another LVM device' + + def __init__(self, argv): + self.argv = argv + self.osd_id = None + + def get_source_devices(self, devices, target_type=""): + ret = [] + for device, type in devices: + if type == target_type: + continue + if type == 'block': + if 'data' not in self.args.from_: + continue; + elif type == 'db': + if 'db' not in self.args.from_: + continue; + elif type == 'wal': + if 'wal' not in self.args.from_: + continue; + ret.append([device, type]) + if ret == []: + mlogger.error('Source device list is empty') + raise SystemExit( + 'Unable to migrate to : {}'.format(self.args.target)) + return ret + + # ceph-bluestore-tool uses the following replacement rules + # (in the order of precedence, stop on the first match) + # if source list has DB volume - target device replaces it. + # if source list has WAL volume - target device replace it. + # if source list has slow volume only - operation isn't permitted, + # requires explicit allocation via new-db/new-wal command.detects which + def get_target_type_by_source(self, devices): + ret = None + for device, type in devices: + if type == 'db': + return 'db' + elif type == 'wal': + ret = 'wal' + return ret + + def get_filename_by_type(self, type): + filename = 'block' + if type == 'db' or type == 'wal': + filename += '.' + type + return filename + + def get_source_args(self, osd_path, devices): + ret = [] + for device, type in devices: + ret = ret + ["--devs-source", os.path.join( + osd_path, self.get_filename_by_type(type))] + return ret + + @decorators.needs_root + def migrate_to_new(self, osd_id, osd_fsid, devices, target_lv): + source_devices = self.get_source_devices(devices) + target_type = self.get_target_type_by_source(source_devices) + if not target_type: + mlogger.error( + "Unable to determine new volume type," + " please use new-db or new-wal command before.") + raise SystemExit( + "Unable to migrate to : {}".format(self.args.target)) + + target_path = target_lv.lv_path + + try: + tag_tracker = VolumeTagTracker(devices, target_lv) + # we need to update lvm tags for all the remaining volumes + # and clear for ones which to be removed + + # ceph-bluestore-tool removes source volume(s) other than block one + # and attaches target one after successful migration + tag_tracker.replace_lvs(source_devices, target_type) + + osd_path = get_osd_path(osd_id, osd_fsid) + source_args = self.get_source_args(osd_path, source_devices) + mlogger.info("Migrate to new, Source: {} Target: {}".format( + source_args, target_path)) + stdout, stderr, exit_code = process.call([ + 'ceph-bluestore-tool', + '--path', + osd_path, + '--dev-target', + target_path, + '--command', + 'bluefs-bdev-migrate'] + + source_args) + if exit_code != 0: + mlogger.error( + 'Failed to migrate device, error code:{}'.format(exit_code)) + raise SystemExit( + 'Failed to migrate to : {}'.format(self.args.target)) + else: + system.chown(os.path.join(osd_path, "block.{}".format( + target_type))) + terminal.success('Migration successful.') + except: + tag_tracker.undo() + raise + + return + + @decorators.needs_root + def migrate_to_existing(self, osd_id, osd_fsid, devices, target_lv): + target_type = target_lv.tags["ceph.type"] + if target_type == "wal": + mlogger.error("Migrate to WAL is not supported") + raise SystemExit( + "Unable to migrate to : {}".format(self.args.target)) + target_filename = self.get_filename_by_type(target_type) + if (target_filename == ""): + mlogger.error( + "Target Logical Volume doesn't have proper volume type " + "(ceph.type LVM tag): {}".format(target_type)) + raise SystemExit( + "Unable to migrate to : {}".format(self.args.target)) + + osd_path = get_osd_path(osd_id, osd_fsid) + source_devices = self.get_source_devices(devices, target_type) + target_path = os.path.join(osd_path, target_filename) + tag_tracker = VolumeTagTracker(devices, target_lv) + + try: + # ceph-bluestore-tool removes source volume(s) other than + # block and target ones after successful migration + tag_tracker.remove_lvs(source_devices, target_type) + source_args = self.get_source_args(osd_path, source_devices) + mlogger.info("Migrate to existing, Source: {} Target: {}".format( + source_args, target_path)) + stdout, stderr, exit_code = process.call([ + 'ceph-bluestore-tool', + '--path', + osd_path, + '--dev-target', + target_path, + '--command', + 'bluefs-bdev-migrate'] + + source_args) + if exit_code != 0: + mlogger.error( + 'Failed to migrate device, error code:{}'.format(exit_code)) + raise SystemExit( + 'Failed to migrate to : {}'.format(self.args.target)) + else: + terminal.success('Migration successful.') + except: + tag_tracker.undo() + raise + + return + + @decorators.needs_root + def migrate_osd(self): + if self.args.osd_id: + osd_is_running = systemctl.osd_is_active(self.args.osd_id) + if osd_is_running: + mlogger.error('OSD is running, stop it with: ' + 'systemctl stop ceph-osd@{}'.format( + self.args.osd_id)) + raise SystemExit( + 'Unable to migrate devices associated with OSD ID: {}' + .format(self.args.osd_id)) + + target_lv = api.get_lv_by_fullname(self.args.target) + if not target_lv: + mlogger.error( + 'Target path "{}" is not a Logical Volume'.formaat( + self.args.target)) + raise SystemExit( + 'Unable to migrate to : {}'.format(self.args.target)) + devices = find_associated_devices(self.args.osd_id, self.args.osd_fsid) + if (not target_lv.used_by_ceph): + self.migrate_to_new(self.args.osd_id, self.args.osd_fsid, + devices, + target_lv) + else: + if (target_lv.tags['ceph.osd_id'] != self.args.osd_id or + target_lv.tags['ceph.osd_fsid'] != self.args.osd_fsid): + mlogger.error( + 'Target Logical Volume isn\'t used by the specified OSD: ' + '{} FSID: {}'.format(self.args.osd_id, + self.args.osd_fsid)) + raise SystemExit( + 'Unable to migrate to : {}'.format(self.args.target)) + + self.migrate_to_existing(self.args.osd_id, self.args.osd_fsid, + devices, + target_lv) + + def parse_argv(self): + sub_command_help = dedent(""" + Moves BlueFS data from source volume(s) to the target one, source + volumes (except the main (i.e. data or block) one) are removed on + success. LVM volumes are permitted for Target only, both already + attached or new logical one. In the latter case it is attached to OSD + replacing one of the source devices. Following replacement rules apply + (in the order of precedence, stop on the first match): + * if source list has DB volume - target device replaces it. + * if source list has WAL volume - target device replace it. + * if source list has slow volume only - operation is not permitted, + requires explicit allocation via new-db/new-wal command. + + Example calls for supported scenarios: + + Moves BlueFS data from main device to LV already attached as DB: + + ceph-volume lvm migrate --osd-id 1 --osd-fsid --from data --target vgname/db + + Moves BlueFS data from shared main device to LV which will be attached + as a new DB: + + ceph-volume lvm migrate --osd-id 1 --osd-fsid --from data --target vgname/new_db + + Moves BlueFS data from DB device to new LV, DB is replaced: + + ceph-volume lvm migrate --osd-id 1 --osd-fsid --from db --target vgname/new_db + + Moves BlueFS data from main and DB devices to new LV, DB is replaced: + + ceph-volume lvm migrate --osd-id 1 --osd-fsid --from data db --target vgname/new_db + + Moves BlueFS data from main, DB and WAL devices to new LV, WAL is + removed and DB is replaced: + + ceph-volume lvm migrate --osd-id 1 --osd-fsid --from data db wal --target vgname/new_db + + Moves BlueFS data from main, DB and WAL devices to main device, WAL + and DB are removed: + + ceph-volume lvm migrate --osd-id 1 --osd-fsid --from db wal --target vgname/data + + """) + parser = argparse.ArgumentParser( + prog='ceph-volume lvm migrate', + formatter_class=argparse.RawDescriptionHelpFormatter, + description=sub_command_help, + ) + + parser.add_argument( + '--osd-id', + required=True, + help='Specify an OSD ID to detect associated devices for zapping', + ) + + parser.add_argument( + '--osd-fsid', + required=True, + help='Specify an OSD FSID to detect associated devices for zapping', + ) + parser.add_argument( + '--target', + required=True, + help='Specify target Logical Volume (LV) to migrate data to', + ) + parser.add_argument( + '--from', + nargs='*', + dest='from_', + required=True, + choices=['data', 'db', 'wal'], + help='Copy BlueFS data from DB device', + ) + + if len(self.argv) == 0: + print(sub_command_help) + return + self.args = parser.parse_args(self.argv) + + def main(self): + self.parse_argv() + self.migrate_osd() + +class NewVolume(object): + def __init__(self, create_type, argv): + self.create_type = create_type + self.argv = argv + + def make_parser(self, prog, sub_command_help): + parser = argparse.ArgumentParser( + prog=prog, + formatter_class=argparse.RawDescriptionHelpFormatter, + description=sub_command_help, + ) + + parser.add_argument( + '--osd-id', + required=True, + help='Specify an OSD ID to attach new volume to', + ) + + parser.add_argument( + '--osd-fsid', + required=True, + help='Specify an OSD FSIDto attach new volume to', + ) + parser.add_argument( + '--target', + required=True, + help='Specify target Logical Volume (LV) to attach', + ) + return parser + + @decorators.needs_root + def make_new_volume(self, osd_id, osd_fsid, devices, target_lv): + osd_path = get_osd_path(osd_id, osd_fsid) + mlogger.info( + 'Making new volume at {} for OSD: {} ({})'.format( + target_lv.lv_path, osd_id, osd_path)) + tag_tracker = VolumeTagTracker(devices, target_lv) + + try: + tag_tracker.update_tags_when_lv_create(self.create_type) + + stdout, stderr, exit_code = process.call([ + 'ceph-bluestore-tool', + '--path', + osd_path, + '--dev-target', + target_lv.lv_path, + '--command', + 'bluefs-bdev-new-{}'.format(self.create_type) + ]) + if exit_code != 0: + mlogger.error( + 'failed to attach new volume, error code:{}'.format( + exit_code)) + raise SystemExit( + "Failed to attach new volume: {}".format( + self.args.target)) + else: + system.chown(os.path.join(osd_path, "block.{}".format( + self.create_type))) + terminal.success('New volume attached.') + except: + tag_tracker.undo() + raise + return + + @decorators.needs_root + def new_volume(self): + if self.args.osd_id: + osd_is_running = systemctl.osd_is_active(self.args.osd_id) + if osd_is_running: + mlogger.error('OSD ID is running, stop it with:' + ' systemctl stop ceph-osd@{}'.format(self.args.osd_id)) + raise SystemExit( + 'Unable to attach new volume for OSD: {}'.format( + self.args.osd_id)) + + target_lv = api.get_lv_by_fullname(self.args.target) + if not target_lv: + mlogger.error( + 'Target path {} is not a Logical Volume'.format( + self.args.target)) + raise SystemExit( + 'Unable to attach new volume : {}'.format(self.args.target)) + if target_lv.used_by_ceph: + mlogger.error( + 'Target Logical Volume is already used by ceph: {}'.format( + self.args.target)) + raise SystemExit( + 'Unable to attach new volume : {}'.format(self.args.target)) + else: + devices = find_associated_devices(self.args.osd_id, + self.args.osd_fsid) + self.make_new_volume( + self.args.osd_id, + self.args.osd_fsid, + devices, + target_lv) + +class NewWAL(NewVolume): + + help = 'Allocate new WAL volume for OSD at specified Logical Volume' + + def __init__(self, argv): + super(NewWAL, self).__init__("wal", argv) + + def main(self): + sub_command_help = dedent(""" + Attaches the given logical volume to the given OSD as a WAL volume. + Logical volume format is vg/lv. Fails if OSD has already got attached DB. + + Example: + + Attach vgname/lvname as a WAL volume to OSD 1 + + ceph-volume lvm new-wal --osd-id 1 --osd-fsid 55BD4219-16A7-4037-BC20-0F158EFCC83D --target vgname/new_wal + """) + parser = self.make_parser('ceph-volume lvm new-wal', sub_command_help) + + if len(self.argv) == 0: + print(sub_command_help) + return + + self.args = parser.parse_args(self.argv) + + self.new_volume() + +class NewDB(NewVolume): + + help = 'Allocate new DB volume for OSD at specified Logical Volume' + + def __init__(self, argv): + super(NewDB, self).__init__("db", argv) + + def main(self): + sub_command_help = dedent(""" + Attaches the given logical volume to the given OSD as a DB volume. + Logical volume format is vg/lv. Fails if OSD has already got attached DB. + + Example: + + Attach vgname/lvname as a DB volume to OSD 1 + + ceph-volume lvm new-db --osd-id 1 --osd-fsid 55BD4219-16A7-4037-BC20-0F158EFCC83D --target vgname/new_db + """) + + parser = self.make_parser('ceph-volume lvm new-db', sub_command_help) + if len(self.argv) == 0: + print(sub_command_help) + return + self.args = parser.parse_args(self.argv) + + self.new_volume() diff --git a/ceph/src/ceph-volume/ceph_volume/tests/devices/lvm/test_migrate.py b/ceph/src/ceph-volume/ceph_volume/tests/devices/lvm/test_migrate.py new file mode 100644 index 000000000..ef65e74e5 --- /dev/null +++ b/ceph/src/ceph-volume/ceph_volume/tests/devices/lvm/test_migrate.py @@ -0,0 +1,1504 @@ +import pytest +from mock.mock import patch +from ceph_volume import process +from ceph_volume.api import lvm as api +from ceph_volume.devices.lvm import migrate +from ceph_volume.util.device import Device +from ceph_volume.util import system + +class TestGetClusterName(object): + + mock_volumes = [] + def mock_get_lvs(self, *args, **kwargs): + return self.mock_volumes.pop(0) + + def test_cluster_found(self, monkeypatch): + tags = 'ceph.osd_id=0,ceph.journal_uuid=x,ceph.type=data,ceph.osd_fsid=1234,ceph.cluster_name=name_of_the_cluster' + vol = api.Volume(lv_name='volume1', lv_uuid='y', vg_name='', + lv_path='/dev/VolGroup/lv1', lv_tags=tags) + self.mock_volumes = [] + self.mock_volumes.append([vol]) + + monkeypatch.setattr(migrate.api, 'get_lvs', self.mock_get_lvs) + monkeypatch.setattr(process, 'call', lambda x, **kw: ('', '', 0)) + + result = migrate.get_cluster_name(osd_id='0', osd_fsid='1234') + assert "name_of_the_cluster" == result + + def test_cluster_not_found(self, monkeypatch, capsys): + self.mock_volumes = [] + self.mock_volumes.append([]) + + monkeypatch.setattr(migrate.api, 'get_lvs', self.mock_get_lvs) + monkeypatch.setattr(process, 'call', lambda x, **kw: ('', '', 0)) + + with pytest.raises(SystemExit) as error: + migrate.get_cluster_name(osd_id='0', osd_fsid='1234') + stdout, stderr = capsys.readouterr() + expected = 'Unexpected error, terminating' + assert expected in str(error.value) + expected = 'Unable to find any LV for source OSD: id:0 fsid:1234' + assert expected in stderr + +class TestFindAssociatedDevices(object): + + mock_volumes = [] + def mock_get_lvs(self, *args, **kwargs): + return self.mock_volumes.pop(0) + + mock_single_volumes = {} + def mock_get_first_lv(self, *args, **kwargs): + p = kwargs['filters']['lv_path'] + return self.mock_single_volumes[p] + + def test_lv_is_matched_id(self, monkeypatch): + tags = 'ceph.osd_id=0,ceph.journal_uuid=x,ceph.type=data,ceph.osd_fsid=1234' + vol = api.Volume(lv_name='volume1', lv_uuid='y', vg_name='', + lv_path='/dev/VolGroup/lv1', lv_tags=tags) + self.mock_volumes = [] + self.mock_volumes.append([vol]) + self.mock_volumes.append([vol]) + self.mock_volumes.append([]) + self.mock_volumes.append([]) + + self.mock_single_volumes = {'/dev/VolGroup/lv1': vol} + + monkeypatch.setattr(migrate.api, 'get_lvs', self.mock_get_lvs) + monkeypatch.setattr(migrate.api, 'get_first_lv', self.mock_get_first_lv) + monkeypatch.setattr(process, 'call', lambda x, **kw: ('', '', 0)) + + result = migrate.find_associated_devices(osd_id='0', osd_fsid='1234') + assert len(result) == 1 + assert result[0][0].abspath == '/dev/VolGroup/lv1' + assert result[0][0].lvs == [vol] + assert result[0][1] == 'block' + + def test_lv_is_matched_id2(self, monkeypatch): + tags = 'ceph.osd_id=0,ceph.journal_uuid=x,ceph.type=data,ceph.osd_fsid=1234' + vol = api.Volume(lv_name='volume1', lv_uuid='y', vg_name='vg', + lv_path='/dev/VolGroup/lv1', lv_tags=tags) + tags2 = 'ceph.osd_id=0,ceph.journal_uuid=xx,ceph.type=wal,ceph.osd_fsid=1234' + vol2 = api.Volume(lv_name='volume2', lv_uuid='z', vg_name='vg', + lv_path='/dev/VolGroup/lv2', lv_tags=tags2) + self.mock_volumes = [] + self.mock_volumes.append([vol]) + self.mock_volumes.append([vol]) + self.mock_volumes.append([]) + self.mock_volumes.append([vol2]) + + self.mock_single_volumes = {'/dev/VolGroup/lv1': vol, '/dev/VolGroup/lv2': vol2} + + monkeypatch.setattr(migrate.api, 'get_lvs', self.mock_get_lvs) + monkeypatch.setattr(migrate.api, 'get_first_lv', self.mock_get_first_lv) + monkeypatch.setattr(process, 'call', lambda x, **kw: ('', '', 0)) + + result = migrate.find_associated_devices(osd_id='0', osd_fsid='1234') + assert len(result) == 2 + for d in result: + if d[1] == 'block': + assert d[0].abspath == '/dev/VolGroup/lv1' + assert d[0].lvs == [vol] + elif d[1] == 'wal': + assert d[0].abspath == '/dev/VolGroup/lv2' + assert d[0].lvs == [vol2] + else: + assert False + + def test_lv_is_matched_id3(self, monkeypatch): + tags = 'ceph.osd_id=0,ceph.journal_uuid=x,ceph.type=data,ceph.osd_fsid=1234' + vol = api.Volume(lv_name='volume1', lv_uuid='y', vg_name='vg', + lv_path='/dev/VolGroup/lv1', lv_tags=tags) + tags2 = 'ceph.osd_id=0,ceph.journal_uuid=xx,ceph.type=wal,ceph.osd_fsid=1234' + vol2 = api.Volume(lv_name='volume2', lv_uuid='z', vg_name='vg', + lv_path='/dev/VolGroup/lv2', lv_tags=tags2) + tags3 = 'ceph.osd_id=0,ceph.journal_uuid=xx,ceph.type=db,ceph.osd_fsid=1234' + vol3 = api.Volume(lv_name='volume3', lv_uuid='z', vg_name='vg', + lv_path='/dev/VolGroup/lv3', lv_tags=tags3) + + self.mock_volumes = [] + self.mock_volumes.append([vol]) + self.mock_volumes.append([vol]) + self.mock_volumes.append([vol3]) + self.mock_volumes.append([vol2]) + + self.mock_single_volumes = {'/dev/VolGroup/lv1': vol, + '/dev/VolGroup/lv2': vol2, + '/dev/VolGroup/lv3': vol3} + + monkeypatch.setattr(migrate.api, 'get_lvs', self.mock_get_lvs) + monkeypatch.setattr(migrate.api, 'get_first_lv', self.mock_get_first_lv) + monkeypatch.setattr(process, 'call', lambda x, **kw: ('', '', 0)) + + result = migrate.find_associated_devices(osd_id='0', osd_fsid='1234') + assert len(result) == 3 + for d in result: + if d[1] == 'block': + assert d[0].abspath == '/dev/VolGroup/lv1' + assert d[0].lvs == [vol] + elif d[1] == 'wal': + assert d[0].abspath == '/dev/VolGroup/lv2' + assert d[0].lvs == [vol2] + elif d[1] == 'db': + assert d[0].abspath == '/dev/VolGroup/lv3' + assert d[0].lvs == [vol3] + else: + assert False + + def test_lv_is_not_matched(self, monkeypatch, capsys): + self.mock_volumes = [None] + monkeypatch.setattr(migrate.api, 'get_lvs', self.mock_get_lvs) + monkeypatch.setattr(process, 'call', lambda x, **kw: ('', '', 0)) + + with pytest.raises(SystemExit) as error: + migrate.find_associated_devices(osd_id='1', osd_fsid='1234') + stdout, stderr = capsys.readouterr() + expected = 'Unexpected error, terminating' + assert expected in str(error.value) + expected = 'Unable to find any LV for source OSD: id:1 fsid:1234' + assert expected in stderr + +class TestVolumeTagTracker(object): + mock_single_volumes = {} + def mock_get_first_lv(self, *args, **kwargs): + p = kwargs['filters']['lv_path'] + return self.mock_single_volumes[p] + + mock_process_input = [] + def mock_process(self, *args, **kwargs): + self.mock_process_input.append(args[0]); + return ('', '', 0) + + def test_init(self, monkeypatch): + source_tags = 'ceph.osd_id=0,ceph.journal_uuid=x,ceph.type=data,ceph.osd_fsid=1234' + source_db_tags = 'ceph.osd_id=0,journal_uuid=x,ceph.type=db, osd_fsid=1234' + source_wal_tags = 'ceph.osd_id=0,ceph.journal_uuid=x,ceph.type=wal' + target_tags="ceph.a=1,ceph.b=2,c=3,ceph.d=4" # 'c' to be bypassed + devices=[] + + data_vol = api.Volume(lv_name='volume1', lv_uuid='y', vg_name='vg', + lv_path='/dev/VolGroup/lv1', lv_tags=source_tags) + db_vol = api.Volume(lv_name='volume2', lv_uuid='y', vg_name='vg', + lv_path='/dev/VolGroup/lv2', lv_tags=source_db_tags) + wal_vol = api.Volume(lv_name='volume3', lv_uuid='y', vg_name='vg', + lv_path='/dev/VolGroup/lv3', lv_tags=source_wal_tags) + + self.mock_single_volumes = {'/dev/VolGroup/lv1': data_vol, + '/dev/VolGroup/lv2': db_vol, + '/dev/VolGroup/lv3': wal_vol} + monkeypatch.setattr(migrate.api, 'get_first_lv', self.mock_get_first_lv) + + self.mock_process_input = [] + monkeypatch.setattr(process, 'call', self.mock_process) + + data_device = Device(path = '/dev/VolGroup/lv1') + db_device = Device(path = '/dev/VolGroup/lv2') + wal_device = Device(path = '/dev/VolGroup/lv3') + devices.append([data_device, 'block']) + devices.append([db_device, 'db']) + devices.append([wal_device, 'wal']) + + target = api.Volume(lv_name='target_name', lv_tags=target_tags, + lv_path='/dev/VolGroup/lv_target') + t = migrate.VolumeTagTracker(devices, target); + + assert 3 == len(t.old_target_tags) + + assert data_device == t.data_device + assert 4 == len(t.old_data_tags) + assert 'data' == t.old_data_tags['ceph.type'] + + assert db_device == t.db_device + assert 2 == len(t.old_db_tags) + assert 'db' == t.old_db_tags['ceph.type'] + + assert wal_device == t.wal_device + assert 3 == len(t.old_wal_tags) + assert 'wal' == t.old_wal_tags['ceph.type'] + + def test_update_tags_when_lv_create(self, monkeypatch): + source_tags = \ + 'ceph.osd_id=0,ceph.journal_uuid=x,' \ + 'ceph.type=data,ceph.osd_fsid=1234' + source_db_tags = \ + 'ceph.osd_id=0,journal_uuid=x,ceph.type=db,' \ + 'osd_fsid=1234' + + devices=[] + + data_vol = api.Volume(lv_name='volume1', lv_uuid='y', vg_name='vg', + lv_path='/dev/VolGroup/lv1', lv_tags=source_tags) + db_vol = api.Volume(lv_name='volume2', lv_uuid='y', vg_name='vg', + lv_path='/dev/VolGroup/lv2', lv_tags=source_db_tags) + + self.mock_single_volumes = {'/dev/VolGroup/lv1': data_vol, + '/dev/VolGroup/lv2': db_vol} + + monkeypatch.setattr(migrate.api, 'get_first_lv', self.mock_get_first_lv) + + self.mock_process_input = [] + monkeypatch.setattr(process, 'call', self.mock_process) + + data_device = Device(path = '/dev/VolGroup/lv1') + db_device = Device(path = '/dev/VolGroup/lv2') + devices.append([data_device, 'block']) + devices.append([db_device, 'db']) + + target = api.Volume(lv_name='target_name', lv_tags='', + lv_uuid='wal_uuid', + lv_path='/dev/VolGroup/lv_target') + t = migrate.VolumeTagTracker(devices, target); + + self.mock_process_input = [] + t.update_tags_when_lv_create('wal') + + assert 3 == len(self.mock_process_input) + + assert ['lvchange', + '--addtag', 'ceph.wal_uuid=wal_uuid', + '--addtag', 'ceph.wal_device=/dev/VolGroup/lv_target', + '/dev/VolGroup/lv1'].sort() == self.mock_process_input[0].sort() + + assert self.mock_process_input[1].sort() == [ + 'lvchange', + '--addtag', 'ceph.osd_id=0', + '--addtag', 'ceph.journal_uuid=x', + '--addtag', 'ceph.type=wal', + '--addtag', 'ceph.osd_fsid=1234', + '--addtag', 'ceph.wal_uuid=wal_uuid', + '--addtag', 'ceph.wal_device=/dev/VolGroup/lv_target', + '/dev/VolGroup/lv_target'].sort() + + assert ['lvchange', + '--addtag', 'ceph.wal_uuid=wal_uuid', + '--addtag', 'ceph.wal_device=/dev/VolGroup/lv_target', + '/dev/VolGroup/lv2'].sort() == self.mock_process_input[2].sort() + + def test_remove_lvs(self, monkeypatch): + source_tags = \ + 'ceph.osd_id=0,ceph.journal_uuid=x,' \ + 'ceph.type=data,ceph.osd_fsid=1234,ceph.wal_uuid=aaaaa' + source_db_tags = \ + 'ceph.osd_id=0,journal_uuid=x,ceph.type=db,' \ + 'osd_fsid=1234,ceph.wal_device=aaaaa' + source_wal_tags = \ + 'ceph.wal_uuid=uuid,ceph.wal_device=device,' \ + 'ceph.osd_id=0,ceph.type=wal' + + devices=[] + + data_vol = api.Volume(lv_name='volume1', lv_uuid='y', vg_name='vg', + lv_path='/dev/VolGroup/lv1', lv_tags=source_tags) + db_vol = api.Volume(lv_name='volume2', lv_uuid='y', vg_name='vg', + lv_path='/dev/VolGroup/lv2', lv_tags=source_db_tags) + wal_vol = api.Volume(lv_name='volume3', lv_uuid='y', vg_name='vg', + lv_path='/dev/VolGroup/lv3', lv_tags=source_wal_tags) + + self.mock_single_volumes = {'/dev/VolGroup/lv1': data_vol, + '/dev/VolGroup/lv2': db_vol, + '/dev/VolGroup/lv3': wal_vol} + + monkeypatch.setattr(migrate.api, 'get_first_lv', self.mock_get_first_lv) + + self.mock_process_input = [] + monkeypatch.setattr(process, 'call', self.mock_process) + + data_device = Device(path = '/dev/VolGroup/lv1') + db_device = Device(path = '/dev/VolGroup/lv2') + wal_device = Device(path = '/dev/VolGroup/lv3') + devices.append([data_device, 'block']) + devices.append([db_device, 'db']) + devices.append([wal_device, 'wal']) + + target = api.Volume(lv_name='target_name', lv_tags='', + lv_path='/dev/VolGroup/lv_target') + t = migrate.VolumeTagTracker(devices, target); + + device_to_remove = devices[:] + + self.mock_process_input = [] + t.remove_lvs(device_to_remove, 'db') + + assert 3 == len(self.mock_process_input) + assert ['lvchange', + '--deltag', 'ceph.wal_uuid=uuid', + '--deltag', 'ceph.wal_device=device', + '--deltag', 'ceph.osd_id=0', + '--deltag', 'ceph.type=wal', + '/dev/VolGroup/lv3'].sort() == self.mock_process_input[0].sort() + assert ['lvchange', + '--deltag', 'ceph.wal_uuid=aaaaa', + '/dev/VolGroup/lv1'] == self.mock_process_input[1] + assert ['lvchange', + '--deltag', 'ceph.wal_device=aaaaa', + '/dev/VolGroup/lv2'] == self.mock_process_input[2] + + def test_replace_lvs(self, monkeypatch): + source_tags = \ + 'ceph.osd_id=0,ceph.type=data,ceph.osd_fsid=1234,'\ + 'ceph.wal_uuid=wal_uuid,ceph.db_device=/dbdevice' + source_db_tags = \ + 'ceph.osd_id=0,ceph.type=db,ceph.osd_fsid=1234' + source_wal_tags = \ + 'ceph.wal_uuid=uuid,ceph.wal_device=device,' \ + 'ceph.osd_id=0,ceph.type=wal' + + devices=[] + + data_vol = api.Volume(lv_name='volume1', lv_uuid='datauuid', vg_name='vg', + lv_path='/dev/VolGroup/lv1', lv_tags=source_tags) + db_vol = api.Volume(lv_name='volume2', lv_uuid='dbuuid', vg_name='vg', + lv_path='/dev/VolGroup/lv2', lv_tags=source_db_tags) + wal_vol = api.Volume(lv_name='volume3', lv_uuid='waluuid', vg_name='vg', + lv_path='/dev/VolGroup/lv3', lv_tags=source_wal_tags) + + self.mock_single_volumes = {'/dev/VolGroup/lv1': data_vol, + '/dev/VolGroup/lv2': db_vol, + '/dev/VolGroup/lv3': wal_vol} + + monkeypatch.setattr(migrate.api, 'get_first_lv', self.mock_get_first_lv) + + self.mock_process_input = [] + monkeypatch.setattr(process, 'call', self.mock_process) + + data_device = Device(path = '/dev/VolGroup/lv1') + db_device = Device(path = '/dev/VolGroup/lv2') + wal_device = Device(path = '/dev/VolGroup/lv3') + devices.append([data_device, 'block']) + devices.append([db_device, 'db']) + devices.append([wal_device, 'wal']) + + target = api.Volume(lv_name='target_name', + lv_uuid='ttt', + lv_tags='ceph.tag_to_remove=aaa', + lv_path='/dev/VolGroup/lv_target') + t = migrate.VolumeTagTracker(devices, target); + + self.mock_process_input = [] + t.replace_lvs(devices, 'db') + + assert 5 == len(self.mock_process_input) + + assert ['lvchange', + '--deltag', 'ceph.osd_id=0', + '--deltag', 'ceph.type=db', + '--deltag', 'ceph.osd_fsid=1234', + '/dev/VolGroup/lv2'].sort() == self.mock_process_input[0].sort() + assert ['lvchange', + '--deltag', 'ceph.wal_uuid=uuid', + '--deltag', 'ceph.wal_device=device', + '--deltag', 'ceph.osd_id=0', + '--deltag', 'ceph.type=wal', + '/dev/VolGroup/lv3'].sort() == self.mock_process_input[1].sort() + assert ['lvchange', + '--deltag', 'ceph.db_device=/dbdevice', + '--deltag', 'ceph.wal_uuid=wal_uuid', + '/dev/VolGroup/lv1'].sort() == self.mock_process_input[2].sort() + + assert ['lvchange', + '--addtag', 'ceph.db_uuid=ttt', + '--addtag', 'ceph.db_device=/dev/VolGroup/lv_target', + '/dev/VolGroup/lv1'].sort() == self.mock_process_input[3].sort() + + assert self.mock_process_input[4].sort() == [ + 'lvchange', + '--addtag', 'ceph.osd_id=0', + '--addtag', 'ceph.osd_fsid=1234', + '--addtag', 'ceph.type=db', + '--addtag', 'ceph.db_uuid=ttt', + '--addtag', 'ceph.db_device=/dev/VolGroup/lv_target', + '/dev/VolGroup/lv_target'].sort() + + def test_undo(self, monkeypatch): + source_tags = 'ceph.osd_id=0,ceph.journal_uuid=x,ceph.type=data,ceph.osd_fsid=1234' + source_db_tags = 'ceph.osd_id=0,journal_uuid=x,ceph.type=db, osd_fsid=1234' + source_wal_tags = 'ceph.osd_id=0,ceph.journal_uuid=x,ceph.type=wal' + target_tags="" + devices=[] + + data_vol = api.Volume(lv_name='volume1', lv_uuid='y', vg_name='vg', + lv_path='/dev/VolGroup/lv1', lv_tags=source_tags) + db_vol = api.Volume(lv_name='volume2', lv_uuid='y', vg_name='vg', + lv_path='/dev/VolGroup/lv2', lv_tags=source_db_tags) + wal_vol = api.Volume(lv_name='volume3', lv_uuid='y', vg_name='vg', + lv_path='/dev/VolGroup/lv3', lv_tags=source_wal_tags) + + self.mock_single_volumes = {'/dev/VolGroup/lv1': data_vol, + '/dev/VolGroup/lv2': db_vol, + '/dev/VolGroup/lv3': wal_vol} + + monkeypatch.setattr(migrate.api, 'get_first_lv', self.mock_get_first_lv) + + self.mock_process_input = [] + monkeypatch.setattr(process, 'call', self.mock_process) + + data_device = Device(path = '/dev/VolGroup/lv1') + db_device = Device(path = '/dev/VolGroup/lv2') + wal_device = Device(path = '/dev/VolGroup/lv3') + devices.append([data_device, 'block']) + devices.append([db_device, 'db']) + devices.append([wal_device, 'wal']) + + target = api.Volume(lv_name='target_name', lv_tags=target_tags, + lv_path='/dev/VolGroup/lv_target') + t = migrate.VolumeTagTracker(devices, target); + + target.tags['ceph.a'] = 'aa'; + target.tags['ceph.b'] = 'bb'; + + data_vol.tags['ceph.journal_uuid'] = 'z'; + + db_vol.tags.pop('ceph.type') + + wal_vol.tags.clear() + + assert 2 == len(target.tags) + assert 4 == len(data_vol.tags) + assert 1 == len(db_vol.tags) + + self.mock_process_input = [] + t.undo() + + assert 0 == len(target.tags) + assert 4 == len(data_vol.tags) + assert 'x' == data_vol.tags['ceph.journal_uuid'] + + assert 2 == len(db_vol.tags) + assert 'db' == db_vol.tags['ceph.type'] + + assert 3 == len(wal_vol.tags) + assert 'wal' == wal_vol.tags['ceph.type'] + + assert 6 == len(self.mock_process_input) + assert 'lvchange' in self.mock_process_input[0] + assert '--deltag' in self.mock_process_input[0] + assert 'ceph.journal_uuid=z' in self.mock_process_input[0] + assert '/dev/VolGroup/lv1' in self.mock_process_input[0] + + assert 'lvchange' in self.mock_process_input[1] + assert '--addtag' in self.mock_process_input[1] + assert 'ceph.journal_uuid=x' in self.mock_process_input[1] + assert '/dev/VolGroup/lv1' in self.mock_process_input[1] + + assert 'lvchange' in self.mock_process_input[2] + assert '--deltag' in self.mock_process_input[2] + assert 'ceph.osd_id=0' in self.mock_process_input[2] + assert '/dev/VolGroup/lv2' in self.mock_process_input[2] + + assert 'lvchange' in self.mock_process_input[3] + assert '--addtag' in self.mock_process_input[3] + assert 'ceph.type=db' in self.mock_process_input[3] + assert '/dev/VolGroup/lv2' in self.mock_process_input[3] + + assert 'lvchange' in self.mock_process_input[4] + assert '--addtag' in self.mock_process_input[4] + assert 'ceph.type=wal' in self.mock_process_input[4] + assert '/dev/VolGroup/lv3' in self.mock_process_input[4] + + assert 'lvchange' in self.mock_process_input[5] + assert '--deltag' in self.mock_process_input[5] + assert 'ceph.a=aa' in self.mock_process_input[5] + assert 'ceph.b=bb' in self.mock_process_input[5] + assert '/dev/VolGroup/lv_target' in self.mock_process_input[5] + +class TestNew(object): + + mock_volume = None + def mock_get_lv_by_fullname(self, *args, **kwargs): + return self.mock_volume + + mock_process_input = [] + def mock_process(self, *args, **kwargs): + self.mock_process_input.append(args[0]); + return ('', '', 0) + + mock_single_volumes = {} + def mock_get_first_lv(self, *args, **kwargs): + p = kwargs['filters']['lv_path'] + return self.mock_single_volumes[p] + + mock_volumes = [] + def mock_get_lvs(self, *args, **kwargs): + return self.mock_volumes.pop(0) + + def test_newdb_non_root(self): + with pytest.raises(Exception) as error: + migrate.NewDB(argv=[ + '--osd-id', '1', + '--osd-fsid', '55BD4219-16A7-4037-BC20-0F158EFCC83D', + '--target', 'vgname/new_db']).main() + expected = 'This command needs to be executed with sudo or as root' + assert expected in str(error.value) + + @patch('os.getuid') + def test_newdb_not_target_lvm(self, m_getuid, capsys): + m_getuid.return_value = 0 + with pytest.raises(SystemExit) as error: + migrate.NewDB(argv=[ + '--osd-id', '1', + '--osd-fsid', '55BD4219-16A7-4037-BC20-0F158EFCC83D', + '--target', 'vgname/new_db']).main() + stdout, stderr = capsys.readouterr() + expected = 'Unable to attach new volume : vgname/new_db' + assert expected in str(error.value) + expected = 'Target path vgname/new_db is not a Logical Volume' + assert expected in stderr + + + @patch('os.getuid') + def test_newdb_already_in_use(self, m_getuid, monkeypatch, capsys): + m_getuid.return_value = 0 + + self.mock_volume = api.Volume(lv_name='volume1', + lv_uuid='y', + vg_name='vg', + lv_path='/dev/VolGroup/lv1', + lv_tags='ceph.osd_id=5') # this results in set used_by_ceph + monkeypatch.setattr(api, 'get_lv_by_fullname', self.mock_get_lv_by_fullname) + + with pytest.raises(SystemExit) as error: + migrate.NewDB(argv=[ + '--osd-id', '1', + '--osd-fsid', '55BD4219-16A7-4037-BC20-0F158EFCC83D', + '--target', 'vgname/new_db']).main() + stdout, stderr = capsys.readouterr() + expected = 'Unable to attach new volume : vgname/new_db' + assert expected in str(error.value) + expected = 'Target Logical Volume is already used by ceph: vgname/new_db' + assert expected in stderr + + @patch('os.getuid') + def test_newdb(self, m_getuid, monkeypatch, capsys): + m_getuid.return_value = 0 + + source_tags = \ + 'ceph.osd_id=0,ceph.type=data,ceph.osd_fsid=1234,'\ + 'ceph.wal_uuid=wal_uuid,ceph.db_device=/dbdevice' + source_wal_tags = \ + 'ceph.wal_uuid=uuid,ceph.wal_device=device,' \ + 'ceph.osd_id=0,ceph.type=wal' + + data_vol = api.Volume(lv_name='volume1', lv_uuid='datauuid', + vg_name='vg', + lv_path='/dev/VolGroup/lv1', + lv_tags=source_tags) + wal_vol = api.Volume(lv_name='volume3', + lv_uuid='waluuid', + vg_name='vg', + lv_path='/dev/VolGroup/lv3', + lv_tags=source_wal_tags) + + self.mock_single_volumes = {'/dev/VolGroup/lv1': data_vol, + '/dev/VolGroup/lv3': wal_vol} + + monkeypatch.setattr(migrate.api, 'get_first_lv', + self.mock_get_first_lv) + + self.mock_process_input = [] + monkeypatch.setattr(process, 'call', self.mock_process) + + self.mock_volume = api.Volume(lv_name='target_volume1', lv_uuid='y', + vg_name='vg', + lv_path='/dev/VolGroup/target_volume', + lv_tags='') + monkeypatch.setattr(api, 'get_lv_by_fullname', + self.mock_get_lv_by_fullname) + + monkeypatch.setattr("ceph_volume.systemd.systemctl.osd_is_active", + lambda id: False) + + #find_associated_devices will call get_lvs() 4 times + # and it this needs results to be arranged that way + self.mock_volumes = [] + self.mock_volumes.append([data_vol, wal_vol]) + self.mock_volumes.append([data_vol]) + self.mock_volumes.append([]) + self.mock_volumes.append([wal_vol]) + + monkeypatch.setattr(migrate.api, 'get_lvs', self.mock_get_lvs) + + monkeypatch.setattr(migrate, 'get_cluster_name', + lambda osd_id, osd_fsid: 'ceph_cluster') + monkeypatch.setattr(system, 'chown', lambda path: 0) + + migrate.NewDB(argv=[ + '--osd-id', '1', + '--osd-fsid', '55BD4219-16A7-4037-BC20-0F158EFCC83D', + '--target', 'vgname/new_db']).main() + + n = len(self.mock_process_input) + assert n >= 5 + + assert self.mock_process_input[n - 5] == [ + 'lvchange', + '--deltag', 'ceph.db_device=/dbdevice', + '/dev/VolGroup/lv1'] + assert self.mock_process_input[n - 4].sort() == [ + 'lvchange', + '--addtag', 'ceph.db_uuid=y', + '--addtag', 'ceph.db_device=/dev/VolGroup/target_volume', + '/dev/VolGroup/lv1'].sort() + + assert self.mock_process_input[n - 3].sort() == [ + 'lvchange', + '--addtag', 'ceph.wal_uuid=uuid', + '--addtag', 'ceph.osd_id=0', + '--addtag', 'ceph.type=db', + '--addtag', 'ceph.osd_fsid=1234', + '--addtag', 'ceph.db_uuid=y', + '--addtag', 'ceph.db_device=/dev/VolGroup/target_volume', + '/dev/VolGroup/target_volume'].sort() + + assert self.mock_process_input[n - 2].sort() == [ + 'lvchange', + '--addtag', 'ceph.db_uuid=y', + '--addtag', 'ceph.db_device=/dev/VolGroup/target_volume', + '/dev/VolGroup/lv3'].sort() + + assert self.mock_process_input[n - 1] == [ + 'ceph-bluestore-tool', + '--path', '/var/lib/ceph/osd/ceph_cluster-1', + '--dev-target', '/dev/VolGroup/target_volume', + '--command', 'bluefs-bdev-new-db'] + + @patch('os.getuid') + def test_newwal(self, m_getuid, monkeypatch, capsys): + m_getuid.return_value = 0 + + source_tags = \ + 'ceph.osd_id=0,ceph.type=data,ceph.osd_fsid=1234' + + data_vol = api.Volume(lv_name='volume1', lv_uuid='datauuid', vg_name='vg', + lv_path='/dev/VolGroup/lv1', lv_tags=source_tags) + + self.mock_single_volumes = {'/dev/VolGroup/lv1': data_vol} + + monkeypatch.setattr(migrate.api, 'get_first_lv', self.mock_get_first_lv) + + self.mock_process_input = [] + monkeypatch.setattr(process, 'call', self.mock_process) + + self.mock_volume = api.Volume(lv_name='target_volume1', lv_uuid='y', vg_name='vg', + lv_path='/dev/VolGroup/target_volume', + lv_tags='') + monkeypatch.setattr(api, 'get_lv_by_fullname', self.mock_get_lv_by_fullname) + + monkeypatch.setattr("ceph_volume.systemd.systemctl.osd_is_active", lambda id: False) + + #find_associated_devices will call get_lvs() 4 times + # and it this needs results to be arranged that way + self.mock_volumes = [] + self.mock_volumes.append([data_vol]) + self.mock_volumes.append([data_vol]) + self.mock_volumes.append([]) + self.mock_volumes.append([]) + + monkeypatch.setattr(migrate.api, 'get_lvs', self.mock_get_lvs) + + monkeypatch.setattr(migrate, 'get_cluster_name', lambda osd_id, osd_fsid: 'cluster') + monkeypatch.setattr(system, 'chown', lambda path: 0) + + migrate.NewWAL(argv=[ + '--osd-id', '2', + '--osd-fsid', '55BD4219-16A7-4037-BC20-0F158EFCC83D', + '--target', 'vgname/new_wal']).main() + + n = len(self.mock_process_input) + assert n >= 3 + + assert self.mock_process_input[n - 3].sort() == [ + 'lvchange', + '--addtag', 'ceph.wal_uuid=y', + '--addtag', 'ceph.wal_device=/dev/VolGroup/target_volume', + '/dev/VolGroup/lv1'].sort() + + assert self.mock_process_input[n - 2].sort() == [ + 'lvchange', + '--addtag', 'ceph.osd_id=0', + '--addtag', 'ceph.type=wal', + '--addtag', 'ceph.osd_fsid=1234', + '--addtag', 'ceph.wal_uuid=y', + '--addtag', 'ceph.wal_device=/dev/VolGroup/target_volume', + '/dev/VolGroup/target_volume'].sort() + + assert self.mock_process_input[n - 1] == [ + 'ceph-bluestore-tool', + '--path', '/var/lib/ceph/osd/cluster-2', + '--dev-target', '/dev/VolGroup/target_volume', + '--command', 'bluefs-bdev-new-wal'] + +class TestMigrate(object): + + mock_volume = None + def mock_get_lv_by_fullname(self, *args, **kwargs): + return self.mock_volume + + mock_process_input = [] + def mock_process(self, *args, **kwargs): + self.mock_process_input.append(args[0]); + return ('', '', 0) + + mock_single_volumes = {} + def mock_get_first_lv(self, *args, **kwargs): + p = kwargs['filters']['lv_path'] + return self.mock_single_volumes[p] + + mock_volumes = [] + def mock_get_lvs(self, *args, **kwargs): + return self.mock_volumes.pop(0) + + def test_get_source_devices(self, monkeypatch): + + source_tags = 'ceph.osd_id=2,ceph.type=data,ceph.osd_fsid=1234' + source_db_tags = 'ceph.osd_id=2,ceph.type=db,ceph.osd_fsid=1234' + source_wal_tags = 'ceph.osd_id=2,ceph.type=wal,ceph.osd_fsid=1234' + + data_vol = api.Volume(lv_name='volume1', + lv_uuid='datauuid', + vg_name='vg', + lv_path='/dev/VolGroup/lv1', + lv_tags=source_tags) + db_vol = api.Volume(lv_name='volume2', + lv_uuid='datauuid', + vg_name='vg', + lv_path='/dev/VolGroup/lv2', + lv_tags=source_db_tags) + + wal_vol = api.Volume(lv_name='volume3', + lv_uuid='datauuid', + vg_name='vg', + lv_path='/dev/VolGroup/lv3', + lv_tags=source_wal_tags) + + self.mock_single_volumes = { + '/dev/VolGroup/lv1': data_vol, + '/dev/VolGroup/lv2': db_vol, + '/dev/VolGroup/lv3': wal_vol, + } + monkeypatch.setattr(migrate.api, 'get_first_lv', + self.mock_get_first_lv) + + self.mock_volume = api.Volume(lv_name='volume2', lv_uuid='y', + vg_name='vg', + lv_path='/dev/VolGroup/lv2', + lv_tags='ceph.osd_id=5,ceph.osd_type=db') + monkeypatch.setattr(api, 'get_lv_by_fullname', + self.mock_get_lv_by_fullname) + + self.mock_process_input = [] + monkeypatch.setattr(process, 'call', self.mock_process) + + devices = [] + devices.append([Device('/dev/VolGroup/lv1'), 'block']) + devices.append([Device('/dev/VolGroup/lv2'), 'db']) + devices.append([Device('/dev/VolGroup/lv3'), 'wal']) + + monkeypatch.setattr(migrate, 'find_associated_devices', + lambda osd_id, osd_fsid: devices) + + + m = migrate.Migrate(argv=[ + '--osd-id', '2', + '--osd-fsid', '55BD4219-16A7-4037-BC20-0F158EFCC83D', + '--from', 'data', 'wal', + '--target', 'vgname/new_wal']) + m.parse_argv() + res_devices = m.get_source_devices(devices) + + assert 2 == len(res_devices) + assert devices[0] == res_devices[0] + assert devices[2] == res_devices[1] + + m = migrate.Migrate(argv=[ + '--osd-id', '2', + '--osd-fsid', '55BD4219-16A7-4037-BC20-0F158EFCC83D', + '--from', 'db', 'wal', 'data', + '--target', 'vgname/new_wal']) + m.parse_argv() + res_devices = m.get_source_devices(devices) + + assert 3 == len(res_devices) + assert devices[0] == res_devices[0] + assert devices[1] == res_devices[1] + assert devices[2] == res_devices[2] + + + @patch('os.getuid') + def test_migrate_data_db_to_new_db(self, m_getuid, monkeypatch): + m_getuid.return_value = 0 + + source_tags = 'ceph.osd_id=2,ceph.type=data,ceph.osd_fsid=1234,' \ + 'ceph.cluster_name=ceph,ceph.db_uuid=dbuuid,ceph.db_device=db_dev' + source_db_tags = 'ceph.osd_id=2,ceph.type=db,ceph.osd_fsid=1234,' \ + 'ceph.cluster_name=ceph,ceph.db_uuid=dbuuid,ceph.db_device=db_dev' + + data_vol = api.Volume(lv_name='volume1', + lv_uuid='datauuid', + vg_name='vg', + lv_path='/dev/VolGroup/lv1', + lv_tags=source_tags) + db_vol = api.Volume(lv_name='volume2', + lv_uuid='dbuuid', + vg_name='vg', + lv_path='/dev/VolGroup/lv2', + lv_tags=source_db_tags) + + self.mock_single_volumes = { + '/dev/VolGroup/lv1': data_vol, + '/dev/VolGroup/lv2': db_vol, + } + monkeypatch.setattr(migrate.api, 'get_first_lv', + self.mock_get_first_lv) + + self.mock_volume = api.Volume(lv_name='volume2_new', lv_uuid='new-db-uuid', + vg_name='vg', + lv_path='/dev/VolGroup/lv2_new', + lv_tags='') + monkeypatch.setattr(api, 'get_lv_by_fullname', + self.mock_get_lv_by_fullname) + + self.mock_process_input = [] + monkeypatch.setattr(process, 'call', self.mock_process) + + devices = [] + devices.append([Device('/dev/VolGroup/lv1'), 'block']) + devices.append([Device('/dev/VolGroup/lv2'), 'db']) + + monkeypatch.setattr(migrate, 'find_associated_devices', + lambda osd_id, osd_fsid: devices) + + + monkeypatch.setattr("ceph_volume.systemd.systemctl.osd_is_active", + lambda id: False) + + monkeypatch.setattr(migrate, 'get_cluster_name', + lambda osd_id, osd_fsid: 'ceph') + monkeypatch.setattr(system, 'chown', lambda path: 0) + m = migrate.Migrate(argv=[ + '--osd-id', '2', + '--osd-fsid', '1234', + '--from', 'data', 'db', 'wal', + '--target', 'vgname/new_wal']) + m.main() + + n = len(self.mock_process_input) + assert n >= 5 + + assert self. mock_process_input[n-5].sort() == [ + 'lvchange', + '--deltag', 'ceph.osd_id=2', + '--deltag', 'ceph.type=db', + '--deltag', 'ceph.osd_fsid=1234', + '--deltag', 'ceph.cluster_name=ceph', + '--deltag', 'ceph.db_uuid=dbuuid', + '--deltag', 'ceph.db_device=db_dev', + '/dev/VolGroup/lv2'].sort() + + assert self. mock_process_input[n-4].sort() == [ + 'lvchange', + '--deltag', 'ceph.db_uuid=dbuuid', + '--deltag', 'ceph.db_device=db_dev', + '/dev/VolGroup/lv1'].sort() + + assert self. mock_process_input[n-3].sort() == [ + 'lvchange', + '--addtag', 'ceph.db_uuid=new-db-uuid', + '--addtag', 'ceph.db_device=/dev/VolGroup/lv2_new', + '/dev/VolGroup/lv1'].sort() + + assert self. mock_process_input[n-2].sort() == [ + 'lvchange', + '--addtag', 'ceph.osd_id=2', + '--addtag', 'ceph.type=db', + '--addtag', 'ceph.osd_fsid=1234', + '--addtag', 'ceph.cluster_name=ceph', + '--addtag', 'ceph.db_uuid=new-db-uuid', + '--addtag', 'ceph.db_device=/dev/VolGroup/lv2_new', + '/dev/VolGroup/lv2_new'].sort() + + assert self. mock_process_input[n-1].sort() == [ + 'ceph-bluestore-tool', + '--path', '/var/lib/ceph/osd/ceph-2', + '--dev-target', '/dev/VolGroup/lv2_new', + '--command', 'bluefs-bdev-migrate', + '--devs-source', '/var/lib/ceph/osd/ceph-2/block', + '--devs-source', '/var/lib/ceph/osd/ceph-2/block.db'].sort() + + @patch('os.getuid') + def test_migrate_data_db_to_new_db_skip_wal(self, m_getuid, monkeypatch): + m_getuid.return_value = 0 + + source_tags = 'ceph.osd_id=2,ceph.type=data,ceph.osd_fsid=1234,' \ + 'ceph.cluster_name=ceph,ceph.db_uuid=dbuuid,ceph.db_device=db_dev' + source_db_tags = 'ceph.osd_id=2,ceph.type=db,ceph.osd_fsid=1234,' \ + 'ceph.cluster_name=ceph,ceph.db_uuid=dbuuid,ceph.db_device=db_dev' + source_wal_tags = 'ceph.osd_id=2,ceph.type=wal,ceph.osd_fsid=1234' \ + 'ceph.cluster_name=ceph,ceph.db_uuid=dbuuid,ceph.db_device=db_dev' + + data_vol = api.Volume(lv_name='volume1', + lv_uuid='datauuid', + vg_name='vg', + lv_path='/dev/VolGroup/lv1', + lv_tags=source_tags) + db_vol = api.Volume(lv_name='volume2', + lv_uuid='dbuuid', + vg_name='vg', + lv_path='/dev/VolGroup/lv2', + lv_tags=source_db_tags) + + wal_vol = api.Volume(lv_name='volume3', + lv_uuid='datauuid', + vg_name='vg', + lv_path='/dev/VolGroup/lv3', + lv_tags=source_wal_tags) + + self.mock_single_volumes = { + '/dev/VolGroup/lv1': data_vol, + '/dev/VolGroup/lv2': db_vol, + '/dev/VolGroup/lv3': wal_vol, + } + monkeypatch.setattr(migrate.api, 'get_first_lv', + self.mock_get_first_lv) + + self.mock_volume = api.Volume(lv_name='volume2_new', lv_uuid='new-db-uuid', + vg_name='vg', + lv_path='/dev/VolGroup/lv2_new', + lv_tags='') + monkeypatch.setattr(api, 'get_lv_by_fullname', + self.mock_get_lv_by_fullname) + + self.mock_process_input = [] + monkeypatch.setattr(process, 'call', self.mock_process) + + devices = [] + devices.append([Device('/dev/VolGroup/lv1'), 'block']) + devices.append([Device('/dev/VolGroup/lv2'), 'db']) + devices.append([Device('/dev/VolGroup/lv3'), 'wal']) + + monkeypatch.setattr(migrate, 'find_associated_devices', + lambda osd_id, osd_fsid: devices) + + monkeypatch.setattr("ceph_volume.systemd.systemctl.osd_is_active", + lambda id: False) + + monkeypatch.setattr(migrate, 'get_cluster_name', + lambda osd_id, osd_fsid: 'ceph') + monkeypatch.setattr(system, 'chown', lambda path: 0) + m = migrate.Migrate(argv=[ + '--osd-id', '2', + '--osd-fsid', '1234', + '--from', 'data', 'db', + '--target', 'vgname/new_wal']) + m.main() + + n = len(self.mock_process_input) + assert n >= 7 + + assert self. mock_process_input[n-7].sort() == [ + 'lvchange', + '--deltag', 'ceph.osd_id=2', + '--deltag', 'ceph.type=db', + '--deltag', 'ceph.osd_fsid=1234', + '--deltag', 'ceph.cluster_name=ceph', + '--deltag', 'ceph.db_uuid=dbuuid', + '--deltag', 'ceph.db_device=db_dev', + '/dev/VolGroup/lv2'].sort() + + assert self. mock_process_input[n-6].sort() == [ + 'lvchange', + '--deltag', 'ceph.db_uuid=dbuuid', + '--deltag', 'ceph.db_device=db_dev', + '/dev/VolGroup/lv1'].sort() + + assert self. mock_process_input[n-5].sort() == [ + 'lvchange', + '--addtag', 'ceph.db_uuid=new-db-uuid', + '--addtag', 'ceph.db_device=/dev/VolGroup/lv2_new', + '/dev/VolGroup/lv1'].sort() + + assert self. mock_process_input[n-4].sort() == [ + 'lvchange', + '--deltag', 'ceph.db_uuid=dbuuid', + '--deltag', 'ceph.db_device=db_dev', + '/dev/VolGroup/lv3'].sort() + + assert self. mock_process_input[n-3].sort() == [ + 'lvchange', + '--addtag', 'ceph.db_uuid=new-db-uuid', + '--addtag', 'ceph.db_device=/dev/VolGroup/lv2_new', + '/dev/VolGroup/lv3'].sort() + + assert self. mock_process_input[n-2].sort() == [ + 'lvchange', + '--addtag', 'ceph.osd_id=2', + '--addtag', 'ceph.type=db', + '--addtag', 'ceph.osd_fsid=1234', + '--addtag', 'ceph.cluster_name=ceph', + '--addtag', 'ceph.db_uuid=new-db-uuid', + '--addtag', 'ceph.db_device=/dev/VolGroup/lv2_new', + '/dev/VolGroup/lv2_new'].sort() + + assert self. mock_process_input[n-1].sort() == [ + 'ceph-bluestore-tool', + '--path', '/var/lib/ceph/osd/ceph-2', + '--dev-target', '/dev/VolGroup/lv2_new', + '--command', 'bluefs-bdev-migrate', + '--devs-source', '/var/lib/ceph/osd/ceph-2/block', + '--devs-source', '/var/lib/ceph/osd/ceph-2/block.db'].sort() + + @patch('os.getuid') + def test_migrate_data_db_wal_to_new_db(self, m_getuid, monkeypatch): + m_getuid.return_value = 0 + + source_tags = 'ceph.osd_id=2,ceph.type=data,ceph.osd_fsid=1234,' \ + 'ceph.cluster_name=ceph,ceph.db_uuid=dbuuid,ceph.db_device=db_dev,' \ + 'ceph.wal_uuid=waluuid,ceph.wal_device=wal_dev' + source_db_tags = 'ceph.osd_id=2,ceph.type=db,ceph.osd_fsid=1234,' \ + 'ceph.cluster_name=ceph,ceph.db_uuid=dbuuid,ceph.db_device=db_dev' + source_wal_tags = 'ceph.osd_id=0,ceph.type=wal,ceph.osd_fsid=1234,' \ + 'ceph.cluster_name=ceph,ceph.db_uuid=dbuuid,ceph.db_device=db_dev,' \ + 'ceph.wal_uuid=waluuid,ceph.wal_device=wal_dev' + + data_vol = api.Volume(lv_name='volume1', + lv_uuid='datauuid', + vg_name='vg', + lv_path='/dev/VolGroup/lv1', + lv_tags=source_tags) + db_vol = api.Volume(lv_name='volume2', + lv_uuid='dbuuid', + vg_name='vg', + lv_path='/dev/VolGroup/lv2', + lv_tags=source_db_tags) + + wal_vol = api.Volume(lv_name='volume3', + lv_uuid='waluuid', + vg_name='vg', + lv_path='/dev/VolGroup/lv3', + lv_tags=source_wal_tags) + + self.mock_single_volumes = { + '/dev/VolGroup/lv1': data_vol, + '/dev/VolGroup/lv2': db_vol, + '/dev/VolGroup/lv3': wal_vol, + } + monkeypatch.setattr(migrate.api, 'get_first_lv', + self.mock_get_first_lv) + + self.mock_volume = api.Volume(lv_name='volume2_new', lv_uuid='new-db-uuid', + vg_name='vg', + lv_path='/dev/VolGroup/lv2_new', + lv_tags='') + monkeypatch.setattr(api, 'get_lv_by_fullname', + self.mock_get_lv_by_fullname) + + self.mock_process_input = [] + monkeypatch.setattr(process, 'call', self.mock_process) + + devices = [] + devices.append([Device('/dev/VolGroup/lv1'), 'block']) + devices.append([Device('/dev/VolGroup/lv2'), 'db']) + devices.append([Device('/dev/VolGroup/lv3'), 'wal']) + + monkeypatch.setattr(migrate, 'find_associated_devices', + lambda osd_id, osd_fsid: devices) + + monkeypatch.setattr("ceph_volume.systemd.systemctl.osd_is_active", + lambda id: False) + + monkeypatch.setattr(migrate, 'get_cluster_name', + lambda osd_id, osd_fsid: 'ceph') + monkeypatch.setattr(system, 'chown', lambda path: 0) + m = migrate.Migrate(argv=[ + '--osd-id', '2', + '--osd-fsid', '1234', + '--from', 'data', 'db', 'wal', + '--target', 'vgname/new_wal']) + m.main() + + n = len(self.mock_process_input) + assert n >= 6 + + assert self. mock_process_input[n-6].sort() == [ + 'lvchange', + '--deltag', 'ceph.osd_id=2', + '--deltag', 'ceph.type=db', + '--deltag', 'ceph.osd_fsid=1234', + '--deltag', 'ceph.cluster_name=ceph', + '--deltag', 'ceph.db_uuid=dbuuid', + '--deltag', 'ceph.db_device=db_dev', + '/dev/VolGroup/lv2'].sort() + + assert self. mock_process_input[n-5].sort() == [ + 'lvchange', + '--deltag', 'ceph.osd_id=0', + '--deltag', 'ceph.type=wal', + '--deltag', 'ceph.osd_fsid=1234', + '--deltag', 'ceph.cluster_name=ceph', + '--deltag', 'ceph.db_uuid=dbuuid', + '--deltag', 'ceph.db_device=db_dev', + '--deltag', 'ceph.wal_uuid=waluuid', + '--deltag', 'ceph.wal_device=wal_dev', + '/dev/VolGroup/lv3'].sort() + + assert self. mock_process_input[n-4].sort() == [ + 'lvchange', + '--deltag', 'ceph.db_uuid=dbuuid', + '--deltag', 'ceph.db_device=db_dev', + '--deltag', 'ceph.wal_uuid=waluuid', + '--deltag', 'ceph.wal_device=wal_dev', + '/dev/VolGroup/lv1'].sort() + + assert self. mock_process_input[n-3].sort() == [ + 'lvchange', + '--addtag', 'ceph.db_uuid=new-db-uuid', + '--addtag', 'ceph.db_device=/dev/VolGroup/lv2_new', + '/dev/VolGroup/lv1'].sort() + + assert self. mock_process_input[n-2].sort() == [ + 'lvchange', + '--addtag', 'ceph.osd_id=2', + '--addtag', 'ceph.type=db', + '--addtag', 'ceph.osd_fsid=1234', + '--addtag', 'ceph.cluster_name=ceph', + '--addtag', 'ceph.db_uuid=new-db-uuid', + '--addtag', 'ceph.db_device=/dev/VolGroup/lv2_new', + '/dev/VolGroup/lv2_new'].sort() + + assert self. mock_process_input[n-1].sort() == [ + 'ceph-bluestore-tool', + '--path', '/var/lib/ceph/osd/ceph-2', + '--dev-target', '/dev/VolGroup/lv2_new', + '--command', 'bluefs-bdev-migrate', + '--devs-source', '/var/lib/ceph/osd/ceph-2/block', + '--devs-source', '/var/lib/ceph/osd/ceph-2/block.db', + '--devs-source', '/var/lib/ceph/osd/ceph-2/block.wal'].sort() + + @patch('os.getuid') + def test_dont_migrate_data_db_wal_to_new_data(self, + m_getuid, + monkeypatch, + capsys): + m_getuid.return_value = 0 + + source_tags = 'ceph.osd_id=2,ceph.type=data,ceph.osd_fsid=1234,' \ + 'ceph.cluster_name=ceph,ceph.db_uuid=dbuuid,ceph.db_device=db_dev' + source_db_tags = 'ceph.osd_id=2,ceph.type=db,ceph.osd_fsid=1234,' \ + 'ceph.cluster_name=ceph,ceph.db_uuid=dbuuid,ceph.db_device=db_dev' + + data_vol = api.Volume(lv_name='volume1', + lv_uuid='datauuid', + vg_name='vg', + lv_path='/dev/VolGroup/lv1', + lv_tags=source_tags) + db_vol = api.Volume(lv_name='volume2', + lv_uuid='dbuuid', + vg_name='vg', + lv_path='/dev/VolGroup/lv2', + lv_tags=source_db_tags) + + self.mock_single_volumes = { + '/dev/VolGroup/lv1': data_vol, + '/dev/VolGroup/lv2': db_vol, + } + monkeypatch.setattr(migrate.api, 'get_first_lv', + self.mock_get_first_lv) + + self.mock_volume = api.Volume(lv_name='volume2_new', lv_uuid='new-db-uuid', + vg_name='vg', + lv_path='/dev/VolGroup/lv2_new', + lv_tags='') + monkeypatch.setattr(api, 'get_lv_by_fullname', + self.mock_get_lv_by_fullname) + + self.mock_process_input = [] + monkeypatch.setattr(process, 'call', self.mock_process) + + devices = [] + devices.append([Device('/dev/VolGroup/lv1'), 'block']) + devices.append([Device('/dev/VolGroup/lv2'), 'db']) + + monkeypatch.setattr(migrate, 'find_associated_devices', + lambda osd_id, osd_fsid: devices) + + monkeypatch.setattr("ceph_volume.systemd.systemctl.osd_is_active", + lambda id: False) + + monkeypatch.setattr(migrate, 'get_cluster_name', + lambda osd_id, osd_fsid: 'ceph') + monkeypatch.setattr(system, 'chown', lambda path: 0) + m = migrate.Migrate(argv=[ + '--osd-id', '2', + '--osd-fsid', '1234', + '--from', 'data', + '--target', 'vgname/new_data']) + + with pytest.raises(SystemExit) as error: + m.main() + stdout, stderr = capsys.readouterr() + expected = 'Unable to migrate to : vgname/new_data' + assert expected in str(error.value) + expected = 'Unable to determine new volume type,' + ' please use new-db or new-wal command before.' + assert expected in stderr + + @patch('os.getuid') + def test_dont_migrate_db_to_wal(self, + m_getuid, + monkeypatch, + capsys): + m_getuid.return_value = 0 + + source_tags = 'ceph.osd_id=2,ceph.type=data,ceph.osd_fsid=1234,' \ + 'ceph.cluster_name=ceph,ceph.db_uuid=dbuuid,ceph.db_device=db_dev,' \ + 'ceph.wal_uuid=waluuid,ceph.wal_device=wal_dev' + source_db_tags = 'ceph.osd_id=2,ceph.type=db,ceph.osd_fsid=1234,' \ + 'ceph.cluster_name=ceph,ceph.db_uuid=dbuuid,ceph.db_device=db_dev' + source_wal_tags = 'ceph.osd_id=2,ceph.type=wal,ceph.osd_fsid=1234,' \ + 'ceph.cluster_name=ceph,ceph.db_uuid=dbuuid,ceph.db_device=db_dev,' \ + 'ceph.wal_uuid=waluuid,ceph.wal_device=wal_dev' + + data_vol = api.Volume(lv_name='volume1', + lv_uuid='datauuid', + vg_name='vg', + lv_path='/dev/VolGroup/lv1', + lv_tags=source_tags) + db_vol = api.Volume(lv_name='volume2', + lv_uuid='dbuuid', + vg_name='vg', + lv_path='/dev/VolGroup/lv2', + lv_tags=source_db_tags) + + wal_vol = api.Volume(lv_name='volume3', + lv_uuid='waluuid', + vg_name='vg', + lv_path='/dev/VolGroup/lv3', + lv_tags=source_wal_tags) + + self.mock_single_volumes = { + '/dev/VolGroup/lv1': data_vol, + '/dev/VolGroup/lv2': db_vol, + '/dev/VolGroup/lv3': wal_vol, + } + monkeypatch.setattr(migrate.api, 'get_first_lv', + self.mock_get_first_lv) + + self.mock_volume = wal_vol + monkeypatch.setattr(api, 'get_lv_by_fullname', + self.mock_get_lv_by_fullname) + + self.mock_process_input = [] + monkeypatch.setattr(process, 'call', self.mock_process) + + devices = [] + devices.append([Device('/dev/VolGroup/lv1'), 'block']) + devices.append([Device('/dev/VolGroup/lv2'), 'db']) + devices.append([Device('/dev/VolGroup/lv3'), 'wal']) + + monkeypatch.setattr(migrate, 'find_associated_devices', + lambda osd_id, osd_fsid: devices) + + monkeypatch.setattr("ceph_volume.systemd.systemctl.osd_is_active", + lambda id: False) + + monkeypatch.setattr(migrate, 'get_cluster_name', + lambda osd_id, osd_fsid: 'ceph') + monkeypatch.setattr(system, 'chown', lambda path: 0) + m = migrate.Migrate(argv=[ + '--osd-id', '2', + '--osd-fsid', '1234', + '--from', 'db', + '--target', 'vgname/wal']) + + with pytest.raises(SystemExit) as error: + m.main() + stdout, stderr = capsys.readouterr() + expected = 'Unable to migrate to : vgname/wal' + assert expected in str(error.value) + expected = 'Migrate to WAL is not supported' + assert expected in stderr + + @patch('os.getuid') + def test_migrate_data_db_to_db(self, + m_getuid, + monkeypatch, + capsys): + m_getuid.return_value = 0 + + source_tags = 'ceph.osd_id=2,ceph.type=data,ceph.osd_fsid=1234,' \ + 'ceph.cluster_name=ceph,ceph.db_uuid=dbuuid,ceph.db_device=db_dev,' \ + 'ceph.wal_uuid=waluuid,ceph.wal_device=wal_dev' + source_db_tags = 'ceph.osd_id=2,ceph.type=db,ceph.osd_fsid=1234,' \ + 'ceph.cluster_name=ceph,ceph.db_uuid=dbuuid,ceph.db_device=db_dev' + source_wal_tags = 'ceph.osd_id=2,ceph.type=wal,ceph.osd_fsid=1234,' \ + 'ceph.cluster_name=ceph,ceph.db_uuid=dbuuid,ceph.db_device=db_dev,' \ + 'ceph.wal_uuid=waluuid,ceph.wal_device=wal_dev' + + data_vol = api.Volume(lv_name='volume1', + lv_uuid='datauuid', + vg_name='vg', + lv_path='/dev/VolGroup/lv1', + lv_tags=source_tags) + db_vol = api.Volume(lv_name='volume2', + lv_uuid='dbuuid', + vg_name='vg', + lv_path='/dev/VolGroup/lv2', + lv_tags=source_db_tags) + + wal_vol = api.Volume(lv_name='volume3', + lv_uuid='waluuid', + vg_name='vg', + lv_path='/dev/VolGroup/lv3', + lv_tags=source_wal_tags) + + self.mock_single_volumes = { + '/dev/VolGroup/lv1': data_vol, + '/dev/VolGroup/lv2': db_vol, + '/dev/VolGroup/lv3': wal_vol, + } + monkeypatch.setattr(migrate.api, 'get_first_lv', + self.mock_get_first_lv) + + self.mock_volume = db_vol + monkeypatch.setattr(api, 'get_lv_by_fullname', + self.mock_get_lv_by_fullname) + + self.mock_process_input = [] + monkeypatch.setattr(process, 'call', self.mock_process) + + devices = [] + devices.append([Device('/dev/VolGroup/lv1'), 'block']) + devices.append([Device('/dev/VolGroup/lv2'), 'db']) + devices.append([Device('/dev/VolGroup/lv3'), 'wal']) + + monkeypatch.setattr(migrate, 'find_associated_devices', + lambda osd_id, osd_fsid: devices) + + monkeypatch.setattr("ceph_volume.systemd.systemctl.osd_is_active", + lambda id: False) + + monkeypatch.setattr(migrate, 'get_cluster_name', + lambda osd_id, osd_fsid: 'ceph') + monkeypatch.setattr(system, 'chown', lambda path: 0) + m = migrate.Migrate(argv=[ + '--osd-id', '2', + '--osd-fsid', '1234', + '--from', 'db', 'data', + '--target', 'vgname/db']) + + m.main() + + n = len(self.mock_process_input) + assert n >= 1 + for s in self.mock_process_input: + print(s) + + assert self. mock_process_input[n-1] == [ + 'ceph-bluestore-tool', + '--path', '/var/lib/ceph/osd/ceph-2', + '--dev-target', '/var/lib/ceph/osd/ceph-2/block.db', + '--command', 'bluefs-bdev-migrate', + '--devs-source', '/var/lib/ceph/osd/ceph-2/block'] + + @patch('os.getuid') + def test_migrate_data_wal_to_db(self, + m_getuid, + monkeypatch, + capsys): + m_getuid.return_value = 0 + + source_tags = 'ceph.osd_id=2,ceph.type=data,ceph.osd_fsid=1234,' \ + 'ceph.cluster_name=ceph,ceph.db_uuid=dbuuid,ceph.db_device=db_dev,' \ + 'ceph.wal_uuid=waluuid,ceph.wal_device=wal_dev' + source_db_tags = 'ceph.osd_id=2,ceph.type=db,ceph.osd_fsid=1234,' \ + 'ceph.cluster_name=ceph,ceph.db_uuid=dbuuid,ceph.db_device=db_dev,' \ + 'ceph.wal_uuid=waluuid,ceph.wal_device=wal_dev' + source_wal_tags = 'ceph.osd_id=2,ceph.type=wal,ceph.osd_fsid=1234,' \ + 'ceph.cluster_name=ceph,ceph.db_uuid=dbuuid,ceph.db_device=db_dev,' \ + 'ceph.wal_uuid=waluuid,ceph.wal_device=wal_dev' + + data_vol = api.Volume(lv_name='volume1', + lv_uuid='datauuid', + vg_name='vg', + lv_path='/dev/VolGroup/lv1', + lv_tags=source_tags) + db_vol = api.Volume(lv_name='volume2', + lv_uuid='dbuuid', + vg_name='vg', + lv_path='/dev/VolGroup/lv2', + lv_tags=source_db_tags) + + wal_vol = api.Volume(lv_name='volume3', + lv_uuid='waluuid', + vg_name='vg', + lv_path='/dev/VolGroup/lv3', + lv_tags=source_wal_tags) + + self.mock_single_volumes = { + '/dev/VolGroup/lv1': data_vol, + '/dev/VolGroup/lv2': db_vol, + '/dev/VolGroup/lv3': wal_vol, + } + monkeypatch.setattr(migrate.api, 'get_first_lv', + self.mock_get_first_lv) + + self.mock_volume = db_vol + monkeypatch.setattr(api, 'get_lv_by_fullname', + self.mock_get_lv_by_fullname) + + self.mock_process_input = [] + monkeypatch.setattr(process, 'call', self.mock_process) + + devices = [] + devices.append([Device('/dev/VolGroup/lv1'), 'block']) + devices.append([Device('/dev/VolGroup/lv2'), 'db']) + devices.append([Device('/dev/VolGroup/lv3'), 'wal']) + + monkeypatch.setattr(migrate, 'find_associated_devices', + lambda osd_id, osd_fsid: devices) + + monkeypatch.setattr("ceph_volume.systemd.systemctl.osd_is_active", + lambda id: False) + + monkeypatch.setattr(migrate, 'get_cluster_name', + lambda osd_id, osd_fsid: 'ceph') + monkeypatch.setattr(system, 'chown', lambda path: 0) + m = migrate.Migrate(argv=[ + '--osd-id', '2', + '--osd-fsid', '1234', + '--from', 'db', 'data', 'wal', + '--target', 'vgname/db']) + + m.main() + + n = len(self.mock_process_input) + assert n >= 1 + for s in self.mock_process_input: + print(s) + + assert self. mock_process_input[n-4].sort() == [ + 'lvchange', + '--deltag', 'ceph.osd_id=2', + '--deltag', 'ceph.type=wal', + '--deltag', 'ceph.osd_fsid=1234', + '--deltag', 'ceph.cluster_name=ceph', + '--deltag', 'ceph.db_uuid=dbuuid', + '--deltag', 'ceph.db_device=db_dev', + '--deltag', 'ceph.wal_uuid=waluuid', + '--deltag', 'ceph.wal_device=wal_dev', + '/dev/VolGroup/lv3'].sort() + assert self. mock_process_input[n-3].sort() == [ + 'lvchange', + '--deltag', 'ceph.wal_uuid=waluuid', + '--deltag', 'ceph.wal_device=wal_dev', + '/dev/VolGroup/lv1'].sort() + assert self. mock_process_input[n-2].sort() == [ + 'lvchange', + '--deltag', 'ceph.wal_uuid=waluuid', + '--deltag', 'ceph.wal_device=wal_dev', + '/dev/VolGroup/lv2'].sort() + assert self. mock_process_input[n-1].sort() == [ + 'ceph-bluestore-tool', + '--path', '/var/lib/ceph/osd/ceph-2', + '--dev-target', '/var/lib/ceph/osd/ceph-2/block.db', + '--command', 'bluefs-bdev-migrate', + '--devs-source', '/var/lib/ceph/osd/ceph-2/block', + '--devs-source', '/var/lib/ceph/osd/ceph-2/block.wal'].sort() diff --git a/ceph/src/ceph-volume/ceph_volume/util/disk.py b/ceph/src/ceph-volume/ceph_volume/util/disk.py index 2cf18cb52..f222109b8 100644 --- a/ceph/src/ceph-volume/ceph_volume/util/disk.py +++ b/ceph/src/ceph-volume/ceph_volume/util/disk.py @@ -24,7 +24,7 @@ def get_partuuid(device): device """ out, err, rc = process.call( - ['blkid', '-s', 'PARTUUID', '-o', 'value', device] + ['blkid', '-c', '/dev/null', '-s', 'PARTUUID', '-o', 'value', device] ) return ' '.join(out).strip() @@ -98,7 +98,7 @@ def blkid(device): PART_ENTRY_UUID PARTUUID """ out, err, rc = process.call( - ['blkid', '-p', device] + ['blkid', '-c', '/dev/null', '-p', device] ) return _blkid_parser(' '.join(out)) @@ -110,7 +110,7 @@ def get_part_entry_type(device): used for udev rules, but it is useful in this case as it is the only consistent way to retrieve the GUID used by ceph-disk to identify devices. """ - out, err, rc = process.call(['blkid', '-p', '-o', 'udev', device]) + out, err, rc = process.call(['blkid', '-c', '/dev/null', '-p', '-o', 'udev', device]) for line in out: if 'ID_PART_ENTRY_TYPE=' in line: return line.split('=')[-1].strip() @@ -123,7 +123,7 @@ def get_device_from_partuuid(partuuid): device is """ out, err, rc = process.call( - ['blkid', '-t', 'PARTUUID="%s"' % partuuid, '-o', 'device'] + ['blkid', '-c', '/dev/null', '-t', 'PARTUUID="%s"' % partuuid, '-o', 'device'] ) return ' '.join(out).strip() diff --git a/ceph/src/ceph.in b/ceph/src/ceph.in index 28562eccb..f281aa18a 100755 --- a/ceph/src/ceph.in +++ b/ceph/src/ceph.in @@ -1213,18 +1213,6 @@ def main(): errno.errorcode.get(ret, 'Unknown'), outs), file=sys.stderr) - if ret < 0: - ret = -ret - errstr = errno.errorcode.get(ret, 'Unknown') - print(u'Error {0}: {1}'.format(errstr, outs), file=sys.stderr) - if len(targets) > 1: - final_ret = ret - else: - return ret - - if outs: - print(prefix + outs, file=sys.stderr) - sys.stdout.flush() if parsed_args.output_file: @@ -1250,12 +1238,23 @@ def main(): except IOError as e: if e.errno != errno.EPIPE: raise e + final_e = None try: sys.stdout.flush() except IOError as e: if e.errno != errno.EPIPE: - raise e + final_e = e + if ret < 0: + ret = -ret + errstr = errno.errorcode.get(ret, 'Unknown') + print(u'Error {0}: {1}'.format(errstr, outs), file=sys.stderr) + final_ret = ret + elif outs: + print(prefix + outs, file=sys.stderr) + + if final_e: + raise final_e # Block until command completion (currently scrub and deep_scrub only) if block: diff --git a/ceph/src/ceph_mon.cc b/ceph/src/ceph_mon.cc index 24bfe1b7c..61804469f 100644 --- a/ceph/src/ceph_mon.cc +++ b/ceph/src/ceph_mon.cc @@ -109,6 +109,14 @@ int obtain_monmap(MonitorDBStore &store, bufferlist &bl) } } + if (store.exists("mon_sync", "temp_newer_monmap")) { + dout(10) << __func__ << " found temp_newer_monmap" << dendl; + int err = store.get("mon_sync", "temp_newer_monmap", bl); + ceph_assert(err == 0); + ceph_assert(bl.length() > 0); + return 0; + } + if (store.exists("mkfs", "monmap")) { dout(10) << __func__ << " found mkfs monmap" << dendl; int err = store.get("mkfs", "monmap", bl); diff --git a/ceph/src/client/Client.cc b/ceph/src/client/Client.cc index 22bd81dbb..11d7f7285 100755 --- a/ceph/src/client/Client.cc +++ b/ceph/src/client/Client.cc @@ -125,12 +125,24 @@ #define DEBUG_GETATTR_CAPS (CEPH_CAP_XATTR_SHARED) +#ifndef S_IXUGO +#define S_IXUGO (S_IXUSR|S_IXGRP|S_IXOTH) +#endif + void client_flush_set_callback(void *p, ObjectCacher::ObjectSet *oset) { Client *client = static_cast(p); client->flush_set_callback(oset); } +bool Client::is_reserved_vino(vinodeno_t &vino) { + if (MDS_IS_PRIVATE_INO(vino.ino)) { + ldout(cct, -1) << __func__ << " attempt to access reserved inode number " << vino << dendl; + return true; + } + return false; +} + // ------------- @@ -3192,7 +3204,7 @@ void Client::put_cap_ref(Inode *in, int cap) int put_nref = 0; int drop = last & ~in->caps_issued(); if (in->snapid == CEPH_NOSNAP) { - if ((last & CEPH_CAP_FILE_WR) && + if ((last & (CEPH_CAP_FILE_WR | CEPH_CAP_FILE_BUFFER)) && !in->cap_snaps.empty() && in->cap_snaps.rbegin()->second.writing) { ldout(cct, 10) << __func__ << " finishing pending cap_snap on " << *in << dendl; @@ -3661,9 +3673,9 @@ void Client::queue_cap_snap(Inode *in, SnapContext& old_snapc) capsnap.context = old_snapc; capsnap.issued = in->caps_issued(); capsnap.dirty = in->caps_dirty(); - + capsnap.dirty_data = (used & CEPH_CAP_FILE_BUFFER); - + capsnap.uid = in->uid; capsnap.gid = in->gid; capsnap.mode = in->mode; @@ -3672,7 +3684,7 @@ void Client::queue_cap_snap(Inode *in, SnapContext& old_snapc) capsnap.xattr_version = in->xattr_version; capsnap.cap_dirtier_uid = in->cap_dirtier_uid; capsnap.cap_dirtier_gid = in->cap_dirtier_gid; - + if (used & CEPH_CAP_FILE_WR) { ldout(cct, 10) << __func__ << " WR used on " << *in << dendl; capsnap.writing = 1; @@ -3707,6 +3719,7 @@ void Client::finish_cap_snap(Inode *in, CapSnap &capsnap, int used) } if (used & CEPH_CAP_FILE_BUFFER) { + capsnap.writing = 1; ldout(cct, 10) << __func__ << " " << *in << " cap_snap " << &capsnap << " used " << used << " WRBUFFER, delaying" << dendl; } else { @@ -3715,13 +3728,6 @@ void Client::finish_cap_snap(Inode *in, CapSnap &capsnap, int used) } } -void Client::_flushed_cap_snap(Inode *in, snapid_t seq) -{ - ldout(cct, 10) << __func__ << " seq " << seq << " on " << *in << dendl; - in->cap_snaps.at(seq).dirty_data = 0; - flush_snaps(in); -} - void Client::send_flush_snap(Inode *in, MetaSession *session, snapid_t follows, CapSnap& capsnap) { @@ -3789,7 +3795,7 @@ void Client::flush_snaps(Inode *in) << " on " << *in << dendl; if (capsnap.dirty_data || capsnap.writing) break; - + capsnap.flush_tid = ++last_flush_tid; session->flushing_caps_tids.insert(capsnap.flush_tid); in->flushing_cap_tids[capsnap.flush_tid] = 0; @@ -4336,7 +4342,7 @@ void Client::trim_caps(MetaSession *s, uint64_t max) ++q; if (dn->lru_is_expireable()) { if (can_invalidate_dentries && - dn->dir->parent_inode->ino == MDS_INO_ROOT) { + dn->dir->parent_inode->ino == CEPH_INO_ROOT) { // Only issue one of these per DN for inodes in root: handle // others more efficiently by calling for root-child DNs at // the end of this function. @@ -4349,10 +4355,10 @@ void Client::trim_caps(MetaSession *s, uint64_t max) all = false; } } - if (in->ll_ref == 1 && in->ino != MDS_INO_ROOT) { + if (in->ll_ref == 1 && in->ino != CEPH_INO_ROOT) { _schedule_ino_release_callback(in.get()); } - if (all && in->ino != MDS_INO_ROOT) { + if (all && in->ino != CEPH_INO_ROOT) { ldout(cct, 20) << __func__ << " counting as trimmed: " << *in << dendl; trimmed++; } @@ -4738,25 +4744,19 @@ void Client::update_snap_trace(const bufferlist& bl, SnapRealm **realm_ret, bool ldout(cct, 10) << __func__ << " " << *realm << " seq " << info.seq() << " <= " << realm->seq << " and same parent, SKIPPING" << dendl; } - + if (!first_realm) first_realm = realm; else put_snap_realm(realm); } - for (map::iterator q = dirty_realms.begin(); - q != dirty_realms.end(); - ++q) { - SnapRealm *realm = q->first; + for (auto &[realm, snapc] : dirty_realms) { // if there are new snaps ? - if (has_new_snaps(q->second, realm->get_snap_context())) { + if (has_new_snaps(snapc, realm->get_snap_context())) { ldout(cct, 10) << " flushing caps on " << *realm << dendl; - xlist::iterator r = realm->inodes_with_caps.begin(); - while (!r.end()) { - Inode *in = *r; - ++r; - queue_cap_snap(in, q->second); + for (auto&& in : realm->inodes_with_caps) { + queue_cap_snap(in, snapc); } } else { ldout(cct, 10) << " no new snap on " << *realm << dendl; @@ -5383,8 +5383,12 @@ void Client::handle_cap_grant(MetaSession *session, Inode *in, Cap *cap, const M int Client::inode_permission(Inode *in, const UserPerm& perms, unsigned want) { - if (perms.uid() == 0) + if (perms.uid() == 0) { + // Executable are overridable when there is at least one exec bit set + if((want & MAY_EXEC) && !(in->mode & S_IXUGO)) + return -EACCES; return 0; + } if (perms.uid() != in->uid && (in->mode & S_IRWXG)) { int ret = _posix_acl_permission(in, perms, want); @@ -8659,33 +8663,44 @@ int Client::lookup_hash(inodeno_t ino, inodeno_t dirino, const char *name, * the resulting Inode object in one operation, so that caller * can safely assume inode will still be there after return. */ -int Client::_lookup_ino(inodeno_t ino, const UserPerm& perms, Inode **inode) +int Client::_lookup_vino(vinodeno_t vino, const UserPerm& perms, Inode **inode) { - ldout(cct, 8) << __func__ << " enter(" << ino << ")" << dendl; + ldout(cct, 8) << __func__ << " enter(" << vino << ")" << dendl; if (unmounting) return -ENOTCONN; + if (is_reserved_vino(vino)) + return -ESTALE; + MetaRequest *req = new MetaRequest(CEPH_MDS_OP_LOOKUPINO); - filepath path(ino); + filepath path(vino.ino); req->set_filepath(path); + /* + * The MDS expects either a "real" snapid here or 0. The special value + * carveouts for the snapid are all at the end of the range so we can + * just look for any snapid below this value. + */ + if (vino.snapid < CEPH_NOSNAP) + req->head.args.lookupino.snapid = vino.snapid; + int r = make_request(req, perms, NULL, NULL, rand() % mdsmap->get_num_in_mds()); if (r == 0 && inode != NULL) { - vinodeno_t vino(ino, CEPH_NOSNAP); unordered_map::iterator p = inode_map.find(vino); ceph_assert(p != inode_map.end()); *inode = p->second; _ll_get(*inode); } - ldout(cct, 8) << __func__ << " exit(" << ino << ") = " << r << dendl; + ldout(cct, 8) << __func__ << " exit(" << vino << ") = " << r << dendl; return r; } int Client::lookup_ino(inodeno_t ino, const UserPerm& perms, Inode **inode) { + vinodeno_t vino(ino, CEPH_NOSNAP); std::lock_guard lock(client_lock); - return _lookup_ino(ino, perms, inode); + return _lookup_vino(vino, perms, inode); } /** @@ -9055,8 +9070,15 @@ void Client::lock_fh_pos(Fh *f) void Client::unlock_fh_pos(Fh *f) { + ceph_assert(client_lock.is_locked_by_me()); + ldout(cct, 10) << __func__ << " " << f << dendl; f->pos_locked = false; + if (!f->pos_waiters.empty()) { + // only wake up the oldest waiter + auto cond = f->pos_waiters.front(); + cond->SignalOne(); + } } int Client::uninline_data(Inode *in, Context *onfinish) @@ -10805,56 +10827,59 @@ int Client::ll_lookup(Inode *parent, const char *name, struct stat *attr, return r; } +int Client::ll_lookup_vino( + vinodeno_t vino, + const UserPerm& perms, + Inode **inode) +{ + ceph_assert(inode != NULL); + + if (unmounting) + return -ENOTCONN; + + if (is_reserved_vino(vino)) + return -ESTALE; + + std::lock_guard lock(client_lock); + ldout(cct, 3) << __func__ << vino << dendl; + + // Check the cache first + unordered_map::iterator p = inode_map.find(vino); + if (p != inode_map.end()) { + *inode = p->second; + _ll_get(*inode); + return 0; + } + + uint64_t snapid = vino.snapid; + + // for snapdir, find the non-snapped dir inode + if (snapid == CEPH_SNAPDIR) + vino.snapid = CEPH_NOSNAP; + + int r = _lookup_vino(vino, perms, inode); + if (r) + return r; + ceph_assert(*inode != NULL); + + if (snapid == CEPH_SNAPDIR) { + Inode *tmp = *inode; + + // open the snapdir and put the inode ref + *inode = open_snapdir(tmp); + _ll_forget(tmp, 1); + _ll_get(*inode); + } + return 0; +} + int Client::ll_lookup_inode( struct inodeno_t ino, const UserPerm& perms, Inode **inode) { - ceph_assert(inode != NULL); - std::lock_guard lock(client_lock); - ldout(cct, 3) << "ll_lookup_inode " << ino << dendl; - - if (unmounting) - return -ENOTCONN; - - // Num1: get inode and *inode - int r = _lookup_ino(ino, perms, inode); - if (r) - return r; - - ceph_assert(*inode != NULL); - - if (!(*inode)->dentries.empty()) { - ldout(cct, 8) << __func__ << " dentry already present" << dendl; - return 0; - } - - if ((*inode)->is_root()) { - ldout(cct, 8) << "ino is root, no parent" << dendl; - return 0; - } - - // Num2: Request the parent inode, so that we can look up the name - Inode *parent; - r = _lookup_parent(*inode, perms, &parent); - if (r) { - _ll_forget(*inode, 1); - return r; - } - - ceph_assert(parent != NULL); - - // Num3: Finally, get the name (dentry) of the requested inode - r = _lookup_name(*inode, parent, perms); - if (r) { - // Unexpected error - _ll_forget(parent, 1); - _ll_forget(*inode, 1); - return r; - } - - _ll_forget(parent, 1); - return 0; + vinodeno_t vino(ino, CEPH_NOSNAP); + return ll_lookup_vino(vino, perms, inode); } int Client::ll_lookupx(Inode *parent, const char *name, Inode **out, @@ -11066,6 +11091,9 @@ Inode *Client::ll_get_inode(vinodeno_t vino) if (unmounting) return NULL; + if (is_reserved_vino(vino)) + return NULL; + unordered_map::iterator p = inode_map.find(vino); if (p == inode_map.end()) return NULL; @@ -14262,6 +14290,10 @@ int Client::check_pool_perm(Inode *in, int need) if (!cct->_conf->client_check_pool_perm) return 0; + /* Only need to do this for regular files */ + if (!in->is_file()) + return 0; + int64_t pool_id = in->layout.pool_id; std::string pool_ns = in->layout.pool_ns; std::pair perm_key(pool_id, pool_ns); diff --git a/ceph/src/client/Client.h b/ceph/src/client/Client.h index 62b911904..bf3e39f34 100644 --- a/ceph/src/client/Client.h +++ b/ceph/src/client/Client.h @@ -479,6 +479,7 @@ public: int ll_lookup(Inode *parent, const char *name, struct stat *attr, Inode **out, const UserPerm& perms); int ll_lookup_inode(struct inodeno_t ino, const UserPerm& perms, Inode **inode); + int ll_lookup_vino(vinodeno_t vino, const UserPerm& perms, Inode **inode); int ll_lookupx(Inode *parent, const char *name, Inode **out, struct ceph_statx *stx, unsigned want, unsigned flags, const UserPerm& perms); @@ -664,7 +665,6 @@ public: void wait_sync_caps(ceph_tid_t want); void queue_cap_snap(Inode *in, SnapContext &old_snapc); void finish_cap_snap(Inode *in, CapSnap &capsnap, int used); - void _flushed_cap_snap(Inode *in, snapid_t seq); void _schedule_invalidate_dentry_callback(Dentry *dn, bool del); void _async_dentry_invalidate(vinodeno_t dirino, vinodeno_t ino, string& name); @@ -1012,6 +1012,7 @@ private: static const VXattr _common_vxattrs[]; + bool is_reserved_vino(vinodeno_t &vino); void fill_dirent(struct dirent *de, const char *name, int type, uint64_t ino, loff_t next_off); @@ -1182,7 +1183,7 @@ private: int _ll_getattr(Inode *in, int caps, const UserPerm& perms); int _lookup_parent(Inode *in, const UserPerm& perms, Inode **parent=NULL); int _lookup_name(Inode *in, Inode *parent, const UserPerm& perms); - int _lookup_ino(inodeno_t ino, const UserPerm& perms, Inode **inode=NULL); + int _lookup_vino(vinodeno_t ino, const UserPerm& perms, Inode **inode=NULL); bool _ll_forget(Inode *in, uint64_t count); diff --git a/ceph/src/client/Inode.h b/ceph/src/client/Inode.h index b918e6b0f..1b5862885 100644 --- a/ceph/src/client/Inode.h +++ b/ceph/src/client/Inode.h @@ -163,7 +163,7 @@ struct Inode { version_t inline_version; bufferlist inline_data; - bool is_root() const { return ino == MDS_INO_ROOT; } + bool is_root() const { return ino == CEPH_INO_ROOT; } bool is_symlink() const { return (mode & S_IFMT) == S_IFLNK; } bool is_dir() const { return (mode & S_IFMT) == S_IFDIR; } bool is_file() const { return (mode & S_IFMT) == S_IFREG; } diff --git a/ceph/src/cls/CMakeLists.txt b/ceph/src/cls/CMakeLists.txt index d62273d4c..dbe0a1b8a 100644 --- a/ceph/src/cls/CMakeLists.txt +++ b/ceph/src/cls/CMakeLists.txt @@ -86,6 +86,10 @@ if (WITH_RADOSGW) otp/cls_otp_types.cc ) add_library(cls_otp_client STATIC ${cls_otp_client_srcs}) + if (WITH_BOOST_CONTEXT) + target_include_directories(cls_otp_client PRIVATE + $) + endif() endif (WITH_RADOSGW) # cls_refcount diff --git a/ceph/src/cls/rgw/cls_rgw.cc b/ceph/src/cls/rgw/cls_rgw.cc index 741d8c571..73a57312c 100644 --- a/ceph/src/cls/rgw/cls_rgw.cc +++ b/ceph/src/cls/rgw/cls_rgw.cc @@ -35,6 +35,9 @@ static std::string bucket_index_prefixes[] = { "", /* special handling for the o /* this must be the last index */ "9999_",}; +static const std::string BI_PREFIX_END = string(1, BI_PREFIX_CHAR) + + bucket_index_prefixes[BI_BUCKET_LAST_INDEX]; + static bool bi_is_objs_index(const string& s) { return ((unsigned char)s[0] != BI_PREFIX_CHAR); } @@ -2322,29 +2325,29 @@ static int rgw_bi_put_op(cls_method_context_t hctx, bufferlist *in, bufferlist * return 0; } -static int list_plain_entries(cls_method_context_t hctx, const string& name, const string& marker, uint32_t max, - list *entries, bool *pmore) +static int list_plain_entries(cls_method_context_t hctx, + const string& filter, + const string& start_after_key, + const string& end_key, + uint32_t max, + list *entries, + bool *end_key_reached, + bool *pmore) { - string filter = name; - string start_key = marker; - - string end_key; // stop listing at bi_log_prefix - bi_log_prefix(end_key); - int count = 0; map keys; - int ret = cls_cxx_map_get_vals(hctx, start_key, filter, max, &keys, pmore); + int ret = cls_cxx_map_get_vals(hctx, start_after_key, filter, max, &keys, + pmore); if (ret < 0) { return ret; } - map::iterator iter; - for (iter = keys.begin(); iter != keys.end(); ++iter) { - if (iter->first >= end_key) { - /* past the end of plain namespace */ - if (pmore) { - *pmore = false; - } + *end_key_reached = false; + + for (auto iter = keys.begin(); iter != keys.end(); ++iter) { + if (!end_key.empty() && iter->first >= end_key) { + *end_key_reached = true; + *pmore = true; return count; } @@ -2363,13 +2366,12 @@ static int list_plain_entries(cls_method_context_t hctx, const string& name, con return -EIO; } - CLS_LOG(20, "%s(): entry.idx=%s e.key.name=%s", __func__, escape_str(entry.idx).c_str(), escape_str(e.key.name).c_str()); + CLS_LOG(20, "%s(): entry.idx=%s e.key.name=%s", __func__, + escape_str(entry.idx).c_str(), escape_str(e.key.name).c_str()); - if (!name.empty() && e.key.name != name) { + if (!filter.empty() && e.key.name != filter) { /* we are skipping the rest of the entries */ - if (pmore) { - *pmore = false; - } + *pmore = false; return count; } @@ -2378,12 +2380,54 @@ static int list_plain_entries(cls_method_context_t hctx, const string& name, con if (count >= (int)max) { return count; } - start_key = entry.idx; } return count; } +static int list_plain_entries(cls_method_context_t hctx, + const string& name, + const string& marker, + uint32_t max, + list *entries, + bool *pmore) { + string start_after_key = marker; + string end_key; + bi_log_prefix(end_key); + int r; + bool end_key_reached; + bool more; + + if (start_after_key < end_key) { + // listing ascii plain namespace + int r = list_plain_entries(hctx, name, start_after_key, end_key, max, + entries, &end_key_reached, &more); + if (r < 0) { + return r; + } + if (r >= (int)max || !end_key_reached || !more) { + if (pmore) { + *pmore = more; + } + return r; + } + start_after_key = BI_PREFIX_END; + max = max - r; + } + + // listing non-ascii plain namespace + r = list_plain_entries(hctx, name, start_after_key, {}, max, entries, + &end_key_reached, &more); + if (r < 0) { + return r; + } + if (pmore) { + *pmore = more; + } + + return r; +} + static int list_instance_entries(cls_method_context_t hctx, const string& name, const string& marker, uint32_t max, list *entries, bool *pmore) { diff --git a/ceph/src/common/AsyncOpTracker.h b/ceph/src/common/AsyncOpTracker.h index d913032aa..dfa913ad4 100644 --- a/ceph/src/common/AsyncOpTracker.h +++ b/ceph/src/common/AsyncOpTracker.h @@ -5,8 +5,7 @@ #define CEPH_ASYNC_OP_TRACKER_H #include "common/ceph_mutex.h" - -struct Context; +#include "include/Context.h" class AsyncOpTracker { public: @@ -27,4 +26,23 @@ private: }; +class C_TrackedOp : public Context { +public: + C_TrackedOp(AsyncOpTracker& async_op_tracker, Context* on_finish) + : m_async_op_tracker(async_op_tracker), m_on_finish(on_finish) { + m_async_op_tracker.start_op(); + } + + void finish(int r) override { + if (m_on_finish != nullptr) { + m_on_finish->complete(r); + } + m_async_op_tracker.finish_op(); + } + +private: + AsyncOpTracker& m_async_op_tracker; + Context* m_on_finish; +}; + #endif // CEPH_ASYNC_OP_TRACKER_H diff --git a/ceph/src/common/CMakeLists.txt b/ceph/src/common/CMakeLists.txt index b77159f4d..684819cc0 100644 --- a/ceph/src/common/CMakeLists.txt +++ b/ceph/src/common/CMakeLists.txt @@ -168,7 +168,7 @@ elseif(HAVE_ARMV8_CRC) crc32c_aarch64.c) endif(HAVE_INTEL) -add_library(crc32 ${crc32_srcs}) +add_library(crc32 STATIC ${crc32_srcs}) if(HAVE_ARMV8_CRC) set_target_properties(crc32 PROPERTIES COMPILE_FLAGS "${CMAKE_C_FLAGS} ${ARMV8_CRC_COMPILE_FLAGS}") diff --git a/ceph/src/common/LogClient.cc b/ceph/src/common/LogClient.cc index 982bbe35b..17e20094f 100644 --- a/ceph/src/common/LogClient.cc +++ b/ceph/src/common/LogClient.cc @@ -145,6 +145,14 @@ LogClientTemp::~LogClientTemp() parent.do_log(type, ss); } +void LogChannel::set_log_to_monitors(bool v) +{ + if (log_to_monitors != v) { + parent->reset(); + log_to_monitors = v; + } +} + void LogChannel::update_config(map &log_to_monitors, map &log_to_syslog, map &log_channels, @@ -342,6 +350,15 @@ version_t LogClient::queue(LogEntry &entry) return entry.seq; } +void LogClient::reset() +{ + std::lock_guard l(log_lock); + if (log_queue.size()) { + log_queue.clear(); + } + last_log_sent = last_log; +} + uint64_t LogClient::get_next_seq() { std::lock_guard l(log_lock); diff --git a/ceph/src/common/LogClient.h b/ceph/src/common/LogClient.h index e138beac0..e633620e1 100644 --- a/ceph/src/common/LogClient.h +++ b/ceph/src/common/LogClient.h @@ -134,9 +134,7 @@ public: do_log(CLOG_SEC, s); } - void set_log_to_monitors(bool v) { - log_to_monitors = v; - } + void set_log_to_monitors(bool v); void set_log_to_syslog(bool v) { log_to_syslog = v; } @@ -253,6 +251,7 @@ public: const EntityName& get_myname(); entity_name_t get_myrank(); version_t queue(LogEntry &entry); + void reset(); private: Message *_get_mon_log_message(); diff --git a/ceph/src/common/async/yield_context.h b/ceph/src/common/async/yield_context.h index 436192c02..fdfb3f5b8 100644 --- a/ceph/src/common/async/yield_context.h +++ b/ceph/src/common/async/yield_context.h @@ -22,31 +22,28 @@ #ifndef HAVE_BOOST_CONTEXT -// hide the dependencies on boost::context and boost::coroutines -namespace boost::asio { +// hide the dependency on boost::context +namespace spawn { struct yield_context; } #else // HAVE_BOOST_CONTEXT -#ifndef BOOST_COROUTINES_NO_DEPRECATION_WARNING -#define BOOST_COROUTINES_NO_DEPRECATION_WARNING -#endif -#include +#include #endif // HAVE_BOOST_CONTEXT -/// optional-like wrapper for a boost::asio::yield_context and its associated +/// optional-like wrapper for a spawn::yield_context and its associated /// boost::asio::io_context. operations that take an optional_yield argument /// will, when passed a non-empty yield context, suspend this coroutine instead /// of the blocking the thread of execution class optional_yield { boost::asio::io_context *c = nullptr; - boost::asio::yield_context *y = nullptr; + spawn::yield_context *y = nullptr; public: /// construct with a valid io and yield_context explicit optional_yield(boost::asio::io_context& c, - boost::asio::yield_context& y) noexcept + spawn::yield_context& y) noexcept : c(&c), y(&y) {} /// type tag to construct an empty object @@ -60,7 +57,7 @@ class optional_yield { boost::asio::io_context& get_io_context() const noexcept { return *c; } /// return a reference to the yield_context. only valid if non-empty - boost::asio::yield_context& get_yield_context() const noexcept { return *y; } + spawn::yield_context& get_yield_context() const noexcept { return *y; } }; // type tag object to construct an empty optional_yield diff --git a/ceph/src/common/buffer.cc b/ceph/src/common/buffer.cc index 8caaae907..4854e9b6f 100644 --- a/ceph/src/common/buffer.cc +++ b/ceph/src/common/buffer.cc @@ -108,8 +108,8 @@ static ceph::spinlock debug_lock; static raw_combined *create(unsigned len, unsigned align, int mempool = mempool::mempool_buffer_anon) { - if (!align) - align = sizeof(size_t); + // posix_memalign() requires a multiple of sizeof(void *) + align = std::max(align, sizeof(void *)); size_t rawlen = round_up_to(sizeof(buffer::raw_combined), alignof(buffer::raw_combined)); size_t datalen = round_up_to(len, alignof(buffer::raw_combined)); @@ -169,8 +169,8 @@ static ceph::spinlock debug_lock; MEMPOOL_CLASS_HELPERS(); raw_posix_aligned(unsigned l, unsigned _align) : raw(l) { - align = _align; - ceph_assert((align >= sizeof(void *)) && (align & (align - 1)) == 0); + // posix_memalign() requires a multiple of sizeof(void *) + align = std::max(_align, sizeof(void *)); #ifdef DARWIN data = (char *) valloc(len); #else diff --git a/ceph/src/common/config.cc b/ceph/src/common/config.cc index 863525efe..5e19e8c06 100644 --- a/ceph/src/common/config.cc +++ b/ceph/src/common/config.cc @@ -651,9 +651,6 @@ int md_config_t::parse_argv(ConfigValues& values, else if (ceph_argparse_flag(args, i, "--no-mon-config", (char*)NULL)) { values.no_mon_config = true; } - else if (ceph_argparse_flag(args, i, "--log-early", (char*)NULL)) { - values.log_early = true; - } else if (ceph_argparse_flag(args, i, "--mon-config", (char*)NULL)) { values.no_mon_config = false; } diff --git a/ceph/src/common/config_values.h b/ceph/src/common/config_values.h index ab52060e4..fdfff2b16 100644 --- a/ceph/src/common/config_values.h +++ b/ceph/src/common/config_values.h @@ -28,7 +28,6 @@ public: string cluster; ceph::logging::SubsystemMap subsys; bool no_mon_config = false; - bool log_early = false; // Set of configuration options that have changed since the last // apply_changes using changed_set_t = std::set; diff --git a/ceph/src/common/ipaddr.cc b/ceph/src/common/ipaddr.cc index 0abf7f20c..bd11cbfc1 100644 --- a/ceph/src/common/ipaddr.cc +++ b/ceph/src/common/ipaddr.cc @@ -3,7 +3,6 @@ #include #include #include -#include #if defined(__FreeBSD__) #include #include @@ -29,54 +28,23 @@ void netmask_ipv4(const struct in_addr *addr, out->s_addr = addr->s_addr & mask; } - -static bool match_numa_node(const string& if_name, int numa_node) +bool matches_ipv4_in_subnet(const struct ifaddrs& addrs, + const struct sockaddr_in* net, + unsigned int prefix_len) { -#ifdef WITH_SEASTAR - return true; -#else - int if_node = -1; - int r = get_iface_numa_node(if_name, &if_node); - if (r < 0) { + if (addrs.ifa_addr == nullptr) return false; - } - return if_node == numa_node; -#endif -} - -const struct ifaddrs *find_ipv4_in_subnet(const struct ifaddrs *addrs, - const struct sockaddr_in *net, - unsigned int prefix_len, - int numa_node) { - struct in_addr want, temp; + if (addrs.ifa_addr->sa_family != net->sin_family) + return false; + struct in_addr want; netmask_ipv4(&net->sin_addr, prefix_len, &want); - for (; addrs != NULL; addrs = addrs->ifa_next) { - - if (addrs->ifa_addr == NULL) - continue; - - if (strcmp(addrs->ifa_name, "lo") == 0 || boost::starts_with(addrs->ifa_name, "lo:")) - continue; - - if (numa_node >= 0 && !match_numa_node(addrs->ifa_name, numa_node)) - continue; - - if (addrs->ifa_addr->sa_family != net->sin_family) - continue; - - struct in_addr *cur = &((struct sockaddr_in*)addrs->ifa_addr)->sin_addr; - netmask_ipv4(cur, prefix_len, &temp); - - if (temp.s_addr == want.s_addr) { - return addrs; - } - } - - return NULL; + struct in_addr *cur = &((struct sockaddr_in*)addrs.ifa_addr)->sin_addr; + struct in_addr temp; + netmask_ipv4(cur, prefix_len, &temp); + return temp.s_addr == want.s_addr; } - void netmask_ipv6(const struct in6_addr *addr, unsigned int prefix_len, struct in6_addr *out) { @@ -90,59 +58,25 @@ void netmask_ipv6(const struct in6_addr *addr, memset(out->s6_addr+prefix_len/8+1, 0, 16-prefix_len/8-1); } +bool matches_ipv6_in_subnet(const struct ifaddrs& addrs, + const struct sockaddr_in6* net, + unsigned int prefix_len) +{ + if (addrs.ifa_addr == nullptr) + return false; -const struct ifaddrs *find_ipv6_in_subnet(const struct ifaddrs *addrs, - const struct sockaddr_in6 *net, - unsigned int prefix_len, - int numa_node) { - struct in6_addr want, temp; - + if (addrs.ifa_addr->sa_family != net->sin6_family) + return false; + struct in6_addr want; netmask_ipv6(&net->sin6_addr, prefix_len, &want); - for (; addrs != NULL; addrs = addrs->ifa_next) { - - if (addrs->ifa_addr == NULL) - continue; - - if (strcmp(addrs->ifa_name, "lo") == 0 || boost::starts_with(addrs->ifa_name, "lo:")) - continue; - - if (numa_node >= 0 && !match_numa_node(addrs->ifa_name, numa_node)) - continue; - - if (addrs->ifa_addr->sa_family != net->sin6_family) - continue; - - struct in6_addr *cur = &((struct sockaddr_in6*)addrs->ifa_addr)->sin6_addr; - if (IN6_IS_ADDR_LINKLOCAL(cur)) - continue; - netmask_ipv6(cur, prefix_len, &temp); - - if (IN6_ARE_ADDR_EQUAL(&temp, &want)) - return addrs; - } - - return NULL; + struct in6_addr temp; + struct in6_addr *cur = &((struct sockaddr_in6*)addrs.ifa_addr)->sin6_addr; + if (IN6_IS_ADDR_LINKLOCAL(cur)) + return false; + netmask_ipv6(cur, prefix_len, &temp); + return IN6_ARE_ADDR_EQUAL(&temp, &want); } - -const struct ifaddrs *find_ip_in_subnet(const struct ifaddrs *addrs, - const struct sockaddr *net, - unsigned int prefix_len, - int numa_node) { - switch (net->sa_family) { - case AF_INET: - return find_ipv4_in_subnet(addrs, (struct sockaddr_in*)net, prefix_len, - numa_node); - - case AF_INET6: - return find_ipv6_in_subnet(addrs, (struct sockaddr_in6*)net, prefix_len, - numa_node); - } - - return NULL; -} - - bool parse_network(const char *s, struct sockaddr_storage *network, unsigned int *prefix_len) { char *slash = strchr((char*)s, '/'); if (!slash) { diff --git a/ceph/src/common/legacy_config_opts.h b/ceph/src/common/legacy_config_opts.h index 1ff14bd61..ad98c90e7 100644 --- a/ceph/src/common/legacy_config_opts.h +++ b/ceph/src/common/legacy_config_opts.h @@ -801,6 +801,7 @@ OPTION(osd_op_history_slow_op_threshold, OPT_DOUBLE) // track the op if over thi OPTION(osd_target_transaction_size, OPT_INT) // to adjust various transactions that batch smaller items OPTION(osd_failsafe_full_ratio, OPT_FLOAT) // what % full makes an OSD "full" (failsafe) OPTION(osd_fast_shutdown, OPT_BOOL) +OPTION(osd_fast_shutdown_notify_mon, OPT_BOOL) // tell mon the OSD is shutting down on osd_fast_shutdown OPTION(osd_fast_fail_on_connection_refused, OPT_BOOL) // immediately mark OSDs as down once they refuse to accept connections OPTION(osd_pg_object_context_cache_count, OPT_INT) diff --git a/ceph/src/common/options.cc b/ceph/src/common/options.cc index 768d6505d..41cd87c57 100644 --- a/ceph/src/common/options.cc +++ b/ceph/src/common/options.cc @@ -2198,16 +2198,23 @@ std::vector