mirror of
https://git.proxmox.com/git/ceph.git
synced 2025-04-28 15:01:36 +00:00
import ceph pacific 16.2.13 source
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
This commit is contained in:
parent
d1416e76a8
commit
42a1f1800a
@ -2,7 +2,7 @@ cmake_minimum_required(VERSION 3.10.2)
|
|||||||
# remove cmake/modules/FindPython* once 3.12 is required
|
# remove cmake/modules/FindPython* once 3.12 is required
|
||||||
|
|
||||||
project(ceph
|
project(ceph
|
||||||
VERSION 16.2.12
|
VERSION 16.2.13
|
||||||
LANGUAGES CXX C ASM)
|
LANGUAGES CXX C ASM)
|
||||||
|
|
||||||
foreach(policy
|
foreach(policy
|
||||||
|
@ -32,11 +32,18 @@
|
|||||||
in certain recovery scenarios, e.g., monitor database lost and rebuilt, and
|
in certain recovery scenarios, e.g., monitor database lost and rebuilt, and
|
||||||
the restored file system is expected to have the same ID as before.
|
the restored file system is expected to have the same ID as before.
|
||||||
|
|
||||||
|
>=16.2.12
|
||||||
|
---------
|
||||||
|
|
||||||
* CEPHFS: Rename the `mds_max_retries_on_remount_failure` option to
|
* CEPHFS: Rename the `mds_max_retries_on_remount_failure` option to
|
||||||
`client_max_retries_on_remount_failure` and move it from mds.yaml.in to
|
`client_max_retries_on_remount_failure` and move it from mds.yaml.in to
|
||||||
mds-client.yaml.in because this option was only used by MDS client from its
|
mds-client.yaml.in because this option was only used by MDS client from its
|
||||||
birth.
|
birth.
|
||||||
|
|
||||||
|
* `ceph mgr dump` command now outputs `last_failure_osd_epoch` and
|
||||||
|
`active_clients` fields at the top level. Previously, these fields were
|
||||||
|
output under `always_on_modules` field.
|
||||||
|
|
||||||
>=16.2.11
|
>=16.2.11
|
||||||
--------
|
--------
|
||||||
|
|
||||||
|
@ -135,7 +135,7 @@
|
|||||||
# main package definition
|
# main package definition
|
||||||
#################################################################################
|
#################################################################################
|
||||||
Name: ceph
|
Name: ceph
|
||||||
Version: 16.2.12
|
Version: 16.2.13
|
||||||
Release: 0%{?dist}
|
Release: 0%{?dist}
|
||||||
%if 0%{?fedora} || 0%{?rhel}
|
%if 0%{?fedora} || 0%{?rhel}
|
||||||
Epoch: 2
|
Epoch: 2
|
||||||
@ -151,7 +151,7 @@ License: LGPL-2.1 and LGPL-3.0 and CC-BY-SA-3.0 and GPL-2.0 and BSL-1.0 and BSD-
|
|||||||
Group: System/Filesystems
|
Group: System/Filesystems
|
||||||
%endif
|
%endif
|
||||||
URL: http://ceph.com/
|
URL: http://ceph.com/
|
||||||
Source0: %{?_remote_tarball_prefix}ceph-16.2.12.tar.bz2
|
Source0: %{?_remote_tarball_prefix}ceph-16.2.13.tar.bz2
|
||||||
%if 0%{?suse_version}
|
%if 0%{?suse_version}
|
||||||
# _insert_obs_source_lines_here
|
# _insert_obs_source_lines_here
|
||||||
ExclusiveArch: x86_64 aarch64 ppc64le s390x
|
ExclusiveArch: x86_64 aarch64 ppc64le s390x
|
||||||
@ -1208,7 +1208,7 @@ This package provides Ceph default alerts for Prometheus.
|
|||||||
# common
|
# common
|
||||||
#################################################################################
|
#################################################################################
|
||||||
%prep
|
%prep
|
||||||
%autosetup -p1 -n ceph-16.2.12
|
%autosetup -p1 -n ceph-16.2.13
|
||||||
|
|
||||||
%build
|
%build
|
||||||
# Disable lto on systems that do not support symver attribute
|
# Disable lto on systems that do not support symver attribute
|
||||||
|
@ -1,7 +1,13 @@
|
|||||||
ceph (16.2.12-1focal) focal; urgency=medium
|
ceph (16.2.13-1focal) focal; urgency=medium
|
||||||
|
|
||||||
|
|
||||||
-- Jenkins Build Slave User <jenkins-build@braggi17.front.sepia.ceph.com> Thu, 13 Apr 2023 22:05:57 +0000
|
-- Jenkins Build Slave User <jenkins-build@braggi17.front.sepia.ceph.com> Mon, 08 May 2023 20:49:59 +0000
|
||||||
|
|
||||||
|
ceph (16.2.13-1) stable; urgency=medium
|
||||||
|
|
||||||
|
* New upstream release
|
||||||
|
|
||||||
|
-- Ceph Release Team <ceph-maintainers@ceph.io> Mon, 08 May 2023 20:39:33 +0000
|
||||||
|
|
||||||
ceph (16.2.12-1) stable; urgency=medium
|
ceph (16.2.12-1) stable; urgency=medium
|
||||||
|
|
||||||
|
@ -199,6 +199,8 @@ For details on configuring monitors, see the `Monitor Config Reference`_.
|
|||||||
|
|
||||||
.. index:: architecture; high availability authentication
|
.. index:: architecture; high availability authentication
|
||||||
|
|
||||||
|
.. _arch_high_availability_authentication:
|
||||||
|
|
||||||
High Availability Authentication
|
High Availability Authentication
|
||||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
@ -2,38 +2,44 @@
|
|||||||
CephFS Mirroring
|
CephFS Mirroring
|
||||||
================
|
================
|
||||||
|
|
||||||
CephFS supports asynchronous replication of snapshots to a remote CephFS file system via
|
CephFS supports asynchronous replication of snapshots to a remote CephFS file
|
||||||
`cephfs-mirror` tool. Snapshots are synchronized by mirroring snapshot data followed by
|
system via `cephfs-mirror` tool. Snapshots are synchronized by mirroring
|
||||||
creating a snapshot with the same name (for a given directory on the remote file system) as
|
snapshot data followed by creating a snapshot with the same name (for a given
|
||||||
the snapshot being synchronized.
|
directory on the remote file system) as the snapshot being synchronized.
|
||||||
|
|
||||||
Requirements
|
Requirements
|
||||||
------------
|
------------
|
||||||
|
|
||||||
The primary (local) and secondary (remote) Ceph clusters version should be Pacific or later.
|
The primary (local) and secondary (remote) Ceph clusters version should be
|
||||||
|
Pacific or later.
|
||||||
|
|
||||||
Key Idea
|
Key Idea
|
||||||
--------
|
--------
|
||||||
|
|
||||||
For a given snapshot pair in a directory, `cephfs-mirror` daemon will rely on readdir diff
|
For a given snapshot pair in a directory, `cephfs-mirror` daemon will rely on
|
||||||
to identify changes in a directory tree. The diffs are applied to directory in the remote
|
readdir diff to identify changes in a directory tree. The diffs are applied to
|
||||||
file system thereby only synchronizing files that have changed between two snapshots.
|
directory in the remote file system thereby only synchronizing files that have
|
||||||
|
changed between two snapshots.
|
||||||
|
|
||||||
This feature is tracked here: https://tracker.ceph.com/issues/47034.
|
This feature is tracked here: https://tracker.ceph.com/issues/47034.
|
||||||
|
|
||||||
Currently, snapshot data is synchronized by bulk copying to the remote filesystem.
|
Currently, snapshot data is synchronized by bulk copying to the remote
|
||||||
|
filesystem.
|
||||||
|
|
||||||
.. note:: Synchronizing hardlinks is not supported -- hardlinked files get synchronized
|
.. note:: Synchronizing hardlinks is not supported -- hardlinked files get
|
||||||
as separate files.
|
synchronized as separate files.
|
||||||
|
|
||||||
Creating Users
|
Creating Users
|
||||||
--------------
|
--------------
|
||||||
|
|
||||||
Start by creating a user (on the primary/local cluster) for the mirror daemon. This user
|
Start by creating a user (on the primary/local cluster) for the mirror daemon.
|
||||||
requires write capability on the metadata pool to create RADOS objects (index objects)
|
This user requires write capability on the metadata pool to create RADOS
|
||||||
for watch/notify operation and read capability on the data pool(s).
|
objects (index objects) for watch/notify operation and read capability on the
|
||||||
|
data pool(s).
|
||||||
|
|
||||||
$ ceph auth get-or-create client.mirror mon 'profile cephfs-mirror' mds 'allow r' osd 'allow rw tag cephfs metadata=*, allow r tag cephfs data=*' mgr 'allow r'
|
.. prompt:: bash $
|
||||||
|
|
||||||
|
ceph auth get-or-create client.mirror mon 'profile cephfs-mirror' mds 'allow r' osd 'allow rw tag cephfs metadata=*, allow r tag cephfs data=*' mgr 'allow r'
|
||||||
|
|
||||||
Create a user for each file system peer (on the secondary/remote cluster). This user needs
|
Create a user for each file system peer (on the secondary/remote cluster). This user needs
|
||||||
to have full capabilities on the MDS (to take snapshots) and the OSDs::
|
to have full capabilities on the MDS (to take snapshots) and the OSDs::
|
||||||
|
@ -188,9 +188,12 @@
|
|||||||
Storage Clusters receive data from :term:`Ceph Client`\s.
|
Storage Clusters receive data from :term:`Ceph Client`\s.
|
||||||
|
|
||||||
CephX
|
CephX
|
||||||
The Ceph authentication protocol. CephX operates like Kerberos,
|
The Ceph authentication protocol. CephX authenticates users and
|
||||||
but it has no single point of failure. See the :ref:`CephX
|
daemons. CephX operates like Kerberos, but it has no single
|
||||||
Configuration Reference<rados-cephx-config-ref>`.
|
point of failure. See the :ref:`High-availability
|
||||||
|
Authentication section<arch_high_availability_authentication>`
|
||||||
|
of the Architecture document and the :ref:`CephX Configuration
|
||||||
|
Reference<rados-cephx-config-ref>`.
|
||||||
|
|
||||||
Client
|
Client
|
||||||
A client is any program external to Ceph that uses a Ceph
|
A client is any program external to Ceph that uses a Ceph
|
||||||
@ -249,6 +252,9 @@
|
|||||||
Any single machine or server in a Ceph Cluster. See :term:`Ceph
|
Any single machine or server in a Ceph Cluster. See :term:`Ceph
|
||||||
Node`.
|
Node`.
|
||||||
|
|
||||||
|
Hybrid OSD
|
||||||
|
Refers to an OSD that has both HDD and SSD drives.
|
||||||
|
|
||||||
LVM tags
|
LVM tags
|
||||||
Extensible metadata for LVM volumes and groups. It is used to
|
Extensible metadata for LVM volumes and groups. It is used to
|
||||||
store Ceph-specific information about devices and its
|
store Ceph-specific information about devices and its
|
||||||
@ -303,6 +309,20 @@
|
|||||||
state of a multi-site configuration. When the period is updated,
|
state of a multi-site configuration. When the period is updated,
|
||||||
the "epoch" is said thereby to have been changed.
|
the "epoch" is said thereby to have been changed.
|
||||||
|
|
||||||
|
Placement Groups (PGs)
|
||||||
|
Placement groups (PGs) are subsets of each logical Ceph pool.
|
||||||
|
Placement groups perform the function of placing objects (as a
|
||||||
|
group) into OSDs. Ceph manages data internally at
|
||||||
|
placement-group granularity: this scales better than would
|
||||||
|
managing individual (and therefore more numerous) RADOS
|
||||||
|
objects. A cluster that has a larger number of placement groups
|
||||||
|
(for example, 100 per OSD) is better balanced than an otherwise
|
||||||
|
identical cluster with a smaller number of placement groups.
|
||||||
|
|
||||||
|
Ceph's internal RADOS objects are each mapped to a specific
|
||||||
|
placement group, and each placement group belongs to exactly
|
||||||
|
one Ceph pool.
|
||||||
|
|
||||||
:ref:`Pool<rados_pools>`
|
:ref:`Pool<rados_pools>`
|
||||||
A pool is a logical partition used to store objects.
|
A pool is a logical partition used to store objects.
|
||||||
|
|
||||||
|
@ -64,7 +64,7 @@ in conjunction with multiple Prometheus instances, overload the manager and lead
|
|||||||
to unresponsive or crashing Ceph manager instances. Hence, the cache is enabled
|
to unresponsive or crashing Ceph manager instances. Hence, the cache is enabled
|
||||||
by default. This means that there is a possibility that the cache becomes
|
by default. This means that there is a possibility that the cache becomes
|
||||||
stale. The cache is considered stale when the time to fetch the metrics from
|
stale. The cache is considered stale when the time to fetch the metrics from
|
||||||
Ceph exceeds the configured :confval:``mgr/prometheus/scrape_interval``.
|
Ceph exceeds the configured ``mgr/prometheus/scrape_interval``.
|
||||||
|
|
||||||
If that is the case, **a warning will be logged** and the module will either
|
If that is the case, **a warning will be logged** and the module will either
|
||||||
|
|
||||||
|
@ -374,13 +374,6 @@ Most health mutes disappear if the unhealthy condition that triggered the health
|
|||||||
For example, suppose that there is one OSD down and the health check is muted. In that case, if
|
For example, suppose that there is one OSD down and the health check is muted. In that case, if
|
||||||
one or more additional OSDs go down, then the health mute disappears. This behavior occurs in any health check with a threshold value.
|
one or more additional OSDs go down, then the health mute disappears. This behavior occurs in any health check with a threshold value.
|
||||||
|
|
||||||
Detecting Configuration Issues
|
|
||||||
==============================
|
|
||||||
|
|
||||||
Although Ceph continuously monitors itself, some configuration issues can be
|
|
||||||
detected only with an external tool called ``ceph-medic``.
|
|
||||||
>>>>>>> 41684ebd33b (doc/rados: edit ops/monitoring.rst (2 of 3))
|
|
||||||
|
|
||||||
Checking a Cluster's Usage Stats
|
Checking a Cluster's Usage Stats
|
||||||
================================
|
================================
|
||||||
|
|
||||||
@ -404,48 +397,49 @@ The output of ``ceph df`` resembles the following::
|
|||||||
cephfs.a.meta 2 32 6.8 KiB 6.8 KiB 0 B 22 96 KiB 96 KiB 0 B 0 297 GiB N/A N/A 22 0 B 0 B
|
cephfs.a.meta 2 32 6.8 KiB 6.8 KiB 0 B 22 96 KiB 96 KiB 0 B 0 297 GiB N/A N/A 22 0 B 0 B
|
||||||
cephfs.a.data 3 32 0 B 0 B 0 B 0 0 B 0 B 0 B 0 99 GiB N/A N/A 0 0 B 0 B
|
cephfs.a.data 3 32 0 B 0 B 0 B 0 0 B 0 B 0 B 0 99 GiB N/A N/A 0 0 B 0 B
|
||||||
test 4 32 22 MiB 22 MiB 50 KiB 248 19 MiB 19 MiB 50 KiB 0 297 GiB N/A N/A 248 0 B 0 B
|
test 4 32 22 MiB 22 MiB 50 KiB 248 19 MiB 19 MiB 50 KiB 0 297 GiB N/A N/A 248 0 B 0 B
|
||||||
|
|
||||||
- **CLASS:** for example, "ssd" or "hdd"
|
- **CLASS:** For example, "ssd" or "hdd".
|
||||||
- **SIZE:** The amount of storage capacity managed by the cluster.
|
- **SIZE:** The amount of storage capacity managed by the cluster.
|
||||||
- **AVAIL:** The amount of free space available in the cluster.
|
- **AVAIL:** The amount of free space available in the cluster.
|
||||||
- **USED:** The amount of raw storage consumed by user data (excluding
|
- **USED:** The amount of raw storage consumed by user data (excluding
|
||||||
BlueStore's database)
|
BlueStore's database).
|
||||||
- **RAW USED:** The amount of raw storage consumed by user data, internal
|
- **RAW USED:** The amount of raw storage consumed by user data, internal
|
||||||
overhead, or reserved capacity.
|
overhead, and reserved capacity.
|
||||||
- **%RAW USED:** The percentage of raw storage used. Use this number in
|
- **%RAW USED:** The percentage of raw storage used. Watch this number in
|
||||||
conjunction with the ``full ratio`` and ``near full ratio`` to ensure that
|
conjunction with ``full ratio`` and ``near full ratio`` to be forewarned when
|
||||||
you are not reaching your cluster's capacity. See `Storage Capacity`_ for
|
your cluster approaches the fullness thresholds. See `Storage Capacity`_.
|
||||||
additional details.
|
|
||||||
|
|
||||||
|
|
||||||
**POOLS:**
|
**POOLS:**
|
||||||
|
|
||||||
The **POOLS** section of the output provides a list of pools and the notional
|
The POOLS section of the output provides a list of pools and the *notional*
|
||||||
usage of each pool. The output from this section **DOES NOT** reflect replicas,
|
usage of each pool. This section of the output **DOES NOT** reflect replicas,
|
||||||
clones or snapshots. For example, if you store an object with 1MB of data, the
|
clones, or snapshots. For example, if you store an object with 1MB of data,
|
||||||
notional usage will be 1MB, but the actual usage may be 2MB or more depending
|
then the notional usage will be 1MB, but the actual usage might be 2MB or more
|
||||||
on the number of replicas, clones and snapshots.
|
depending on the number of replicas, clones, and snapshots.
|
||||||
|
|
||||||
- **ID:** The number of the node within the pool.
|
- **ID:** The number of the specific node within the pool.
|
||||||
- **STORED:** actual amount of data user/Ceph has stored in a pool. This is
|
- **STORED:** The actual amount of data that the user has stored in a pool.
|
||||||
similar to the USED column in earlier versions of Ceph but the calculations
|
This is similar to the USED column in earlier versions of Ceph, but the
|
||||||
(for BlueStore!) are more precise (gaps are properly handled).
|
calculations (for BlueStore!) are more precise (in that gaps are properly
|
||||||
|
handled).
|
||||||
|
|
||||||
- **(DATA):** usage for RBD (RADOS Block Device), CephFS file data, and RGW
|
- **(DATA):** Usage for RBD (RADOS Block Device), CephFS file data, and RGW
|
||||||
(RADOS Gateway) object data.
|
(RADOS Gateway) object data.
|
||||||
- **(OMAP):** key-value pairs. Used primarily by CephFS and RGW (RADOS
|
- **(OMAP):** Key-value pairs. Used primarily by CephFS and RGW (RADOS
|
||||||
Gateway) for metadata storage.
|
Gateway) for metadata storage.
|
||||||
|
|
||||||
- **OBJECTS:** The notional number of objects stored per pool. "Notional" is
|
- **OBJECTS:** The notional number of objects stored per pool (that is, the
|
||||||
defined above in the paragraph immediately under "POOLS".
|
number of objects other than replicas, clones, or snapshots).
|
||||||
- **USED:** The space allocated for a pool over all OSDs. This includes
|
- **USED:** The space allocated for a pool over all OSDs. This includes space
|
||||||
replication, allocation granularity, and erasure-coding overhead. Compression
|
for replication, space for allocation granularity, and space for the overhead
|
||||||
savings and object content gaps are also taken into account. BlueStore's
|
associated with erasure-coding. Compression savings and object-content gaps
|
||||||
database is not included in this amount.
|
are also taken into account. However, BlueStore's database is not included in
|
||||||
|
the amount reported under USED.
|
||||||
|
|
||||||
- **(DATA):** object usage for RBD (RADOS Block Device), CephFS file data, and RGW
|
- **(DATA):** Object usage for RBD (RADOS Block Device), CephFS file data,
|
||||||
(RADOS Gateway) object data.
|
and RGW (RADOS Gateway) object data.
|
||||||
- **(OMAP):** object key-value pairs. Used primarily by CephFS and RGW (RADOS
|
- **(OMAP):** Object key-value pairs. Used primarily by CephFS and RGW (RADOS
|
||||||
Gateway) for metadata storage.
|
Gateway) for metadata storage.
|
||||||
|
|
||||||
- **%USED:** The notional percentage of storage used per pool.
|
- **%USED:** The notional percentage of storage used per pool.
|
||||||
@ -454,50 +448,50 @@ on the number of replicas, clones and snapshots.
|
|||||||
- **QUOTA OBJECTS:** The number of quota objects.
|
- **QUOTA OBJECTS:** The number of quota objects.
|
||||||
- **QUOTA BYTES:** The number of bytes in the quota objects.
|
- **QUOTA BYTES:** The number of bytes in the quota objects.
|
||||||
- **DIRTY:** The number of objects in the cache pool that have been written to
|
- **DIRTY:** The number of objects in the cache pool that have been written to
|
||||||
the cache pool but have not been flushed yet to the base pool. This field is
|
the cache pool but have not yet been flushed to the base pool. This field is
|
||||||
only available when cache tiering is in use.
|
available only when cache tiering is in use.
|
||||||
- **USED COMPR:** amount of space allocated for compressed data (i.e. this
|
- **USED COMPR:** The amount of space allocated for compressed data. This
|
||||||
includes comrpessed data plus all the allocation, replication and erasure
|
includes compressed data in addition to all of the space required for
|
||||||
coding overhead).
|
replication, allocation granularity, and erasure- coding overhead.
|
||||||
- **UNDER COMPR:** amount of data passed through compression (summed over all
|
- **UNDER COMPR:** The amount of data that has passed through compression
|
||||||
replicas) and beneficial enough to be stored in a compressed form.
|
(summed over all replicas) and that is worth storing in a compressed form.
|
||||||
|
|
||||||
|
.. note:: The numbers in the POOLS section are notional. They do not include
|
||||||
|
the number of replicas, clones, or snapshots. As a result, the sum of the
|
||||||
|
USED and %USED amounts in the POOLS section of the output will not be equal
|
||||||
|
to the sum of the USED and %USED amounts in the RAW section of the output.
|
||||||
|
|
||||||
.. note:: The numbers in the POOLS section are notional. They are not
|
.. note:: The MAX AVAIL value is a complicated function of the replication or
|
||||||
inclusive of the number of replicas, snapshots or clones. As a result, the
|
the kind of erasure coding used, the CRUSH rule that maps storage to
|
||||||
sum of the USED and %USED amounts will not add up to the USED and %USED
|
devices, the utilization of those devices, and the configured
|
||||||
amounts in the RAW section of the output.
|
``mon_osd_full_ratio`` setting.
|
||||||
|
|
||||||
.. note:: The MAX AVAIL value is a complicated function of the replication
|
|
||||||
or erasure code used, the CRUSH rule that maps storage to devices, the
|
|
||||||
utilization of those devices, and the configured ``mon_osd_full_ratio``.
|
|
||||||
|
|
||||||
|
|
||||||
Checking OSD Status
|
Checking OSD Status
|
||||||
===================
|
===================
|
||||||
|
|
||||||
You can check OSDs to ensure they are ``up`` and ``in`` by executing the
|
To check if OSDs are ``up`` and ``in``, run the
|
||||||
following command:
|
following command:
|
||||||
|
|
||||||
.. prompt:: bash #
|
.. prompt:: bash #
|
||||||
|
|
||||||
ceph osd stat
|
ceph osd stat
|
||||||
|
|
||||||
Or:
|
Alternatively, you can run the following command:
|
||||||
|
|
||||||
.. prompt:: bash #
|
.. prompt:: bash #
|
||||||
|
|
||||||
ceph osd dump
|
ceph osd dump
|
||||||
|
|
||||||
You can also check view OSDs according to their position in the CRUSH map by
|
To view OSDs according to their position in the CRUSH map, run the following
|
||||||
using the folloiwng command:
|
command:
|
||||||
|
|
||||||
.. prompt:: bash #
|
.. prompt:: bash #
|
||||||
|
|
||||||
ceph osd tree
|
ceph osd tree
|
||||||
|
|
||||||
Ceph will print out a CRUSH tree with a host, its OSDs, whether they are up
|
To print out a CRUSH tree that displays a host, its OSDs, whether the OSDs are
|
||||||
and their weight:
|
``up``, and the weight of the OSDs, run the following command:
|
||||||
|
|
||||||
.. code-block:: bash
|
.. code-block:: bash
|
||||||
|
|
||||||
@ -509,88 +503,90 @@ and their weight:
|
|||||||
1 ssd 1.00000 osd.1 up 1.00000 1.00000
|
1 ssd 1.00000 osd.1 up 1.00000 1.00000
|
||||||
2 ssd 1.00000 osd.2 up 1.00000 1.00000
|
2 ssd 1.00000 osd.2 up 1.00000 1.00000
|
||||||
|
|
||||||
For a detailed discussion, refer to `Monitoring OSDs and Placement Groups`_.
|
See `Monitoring OSDs and Placement Groups`_.
|
||||||
|
|
||||||
Checking Monitor Status
|
Checking Monitor Status
|
||||||
=======================
|
=======================
|
||||||
|
|
||||||
If your cluster has multiple monitors (likely), you should check the monitor
|
If your cluster has multiple monitors, then you need to perform certain
|
||||||
quorum status after you start the cluster and before reading and/or writing data. A
|
"monitor status" checks. After starting the cluster and before reading or
|
||||||
quorum must be present when multiple monitors are running. You should also check
|
writing data, you should check quorum status. A quorum must be present when
|
||||||
monitor status periodically to ensure that they are running.
|
multiple monitors are running to ensure proper functioning of your Ceph
|
||||||
|
cluster. Check monitor status regularly in order to ensure that all of the
|
||||||
|
monitors are running.
|
||||||
|
|
||||||
To see display the monitor map, execute the following:
|
To display the monitor map, run the following command:
|
||||||
|
|
||||||
.. prompt:: bash $
|
.. prompt:: bash $
|
||||||
|
|
||||||
ceph mon stat
|
ceph mon stat
|
||||||
|
|
||||||
Or:
|
Alternatively, you can run the following command:
|
||||||
|
|
||||||
.. prompt:: bash $
|
.. prompt:: bash $
|
||||||
|
|
||||||
ceph mon dump
|
ceph mon dump
|
||||||
|
|
||||||
To check the quorum status for the monitor cluster, execute the following:
|
To check the quorum status for the monitor cluster, run the following command:
|
||||||
|
|
||||||
.. prompt:: bash $
|
.. prompt:: bash $
|
||||||
|
|
||||||
ceph quorum_status
|
ceph quorum_status
|
||||||
|
|
||||||
Ceph will return the quorum status. For example, a Ceph cluster consisting of
|
Ceph returns the quorum status. For example, a Ceph cluster that consists of
|
||||||
three monitors may return the following:
|
three monitors might return the following:
|
||||||
|
|
||||||
.. code-block:: javascript
|
.. code-block:: javascript
|
||||||
|
|
||||||
{ "election_epoch": 10,
|
{ "election_epoch": 10,
|
||||||
"quorum": [
|
"quorum": [
|
||||||
0,
|
0,
|
||||||
1,
|
1,
|
||||||
2],
|
2],
|
||||||
"quorum_names": [
|
"quorum_names": [
|
||||||
"a",
|
"a",
|
||||||
"b",
|
"b",
|
||||||
"c"],
|
"c"],
|
||||||
"quorum_leader_name": "a",
|
"quorum_leader_name": "a",
|
||||||
"monmap": { "epoch": 1,
|
"monmap": { "epoch": 1,
|
||||||
"fsid": "444b489c-4f16-4b75-83f0-cb8097468898",
|
"fsid": "444b489c-4f16-4b75-83f0-cb8097468898",
|
||||||
"modified": "2011-12-12 13:28:27.505520",
|
"modified": "2011-12-12 13:28:27.505520",
|
||||||
"created": "2011-12-12 13:28:27.505520",
|
"created": "2011-12-12 13:28:27.505520",
|
||||||
"features": {"persistent": [
|
"features": {"persistent": [
|
||||||
"kraken",
|
"kraken",
|
||||||
"luminous",
|
"luminous",
|
||||||
"mimic"],
|
"mimic"],
|
||||||
"optional": []
|
"optional": []
|
||||||
},
|
},
|
||||||
"mons": [
|
"mons": [
|
||||||
{ "rank": 0,
|
{ "rank": 0,
|
||||||
"name": "a",
|
"name": "a",
|
||||||
"addr": "127.0.0.1:6789/0",
|
"addr": "127.0.0.1:6789/0",
|
||||||
"public_addr": "127.0.0.1:6789/0"},
|
"public_addr": "127.0.0.1:6789/0"},
|
||||||
{ "rank": 1,
|
{ "rank": 1,
|
||||||
"name": "b",
|
"name": "b",
|
||||||
"addr": "127.0.0.1:6790/0",
|
"addr": "127.0.0.1:6790/0",
|
||||||
"public_addr": "127.0.0.1:6790/0"},
|
"public_addr": "127.0.0.1:6790/0"},
|
||||||
{ "rank": 2,
|
{ "rank": 2,
|
||||||
"name": "c",
|
"name": "c",
|
||||||
"addr": "127.0.0.1:6791/0",
|
"addr": "127.0.0.1:6791/0",
|
||||||
"public_addr": "127.0.0.1:6791/0"}
|
"public_addr": "127.0.0.1:6791/0"}
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
Checking MDS Status
|
Checking MDS Status
|
||||||
===================
|
===================
|
||||||
|
|
||||||
Metadata servers provide metadata services for CephFS. Metadata servers have
|
Metadata servers provide metadata services for CephFS. Metadata servers have
|
||||||
two sets of states: ``up | down`` and ``active | inactive``. To ensure your
|
two sets of states: ``up | down`` and ``active | inactive``. To check if your
|
||||||
metadata servers are ``up`` and ``active``, execute the following:
|
metadata servers are ``up`` and ``active``, run the following command:
|
||||||
|
|
||||||
.. prompt:: bash $
|
.. prompt:: bash $
|
||||||
|
|
||||||
ceph mds stat
|
ceph mds stat
|
||||||
|
|
||||||
To display details of the metadata cluster, execute the following:
|
To display details of the metadata servers, run the following command:
|
||||||
|
|
||||||
.. prompt:: bash $
|
.. prompt:: bash $
|
||||||
|
|
||||||
@ -600,9 +596,9 @@ To display details of the metadata cluster, execute the following:
|
|||||||
Checking Placement Group States
|
Checking Placement Group States
|
||||||
===============================
|
===============================
|
||||||
|
|
||||||
Placement groups map objects to OSDs. When you monitor your
|
Placement groups (PGs) map objects to OSDs. PGs are monitored in order to
|
||||||
placement groups, you will want them to be ``active`` and ``clean``.
|
ensure that they are ``active`` and ``clean``. See `Monitoring OSDs and
|
||||||
For a detailed discussion, refer to `Monitoring OSDs and Placement Groups`_.
|
Placement Groups`_.
|
||||||
|
|
||||||
.. _Monitoring OSDs and Placement Groups: ../monitoring-osd-pg
|
.. _Monitoring OSDs and Placement Groups: ../monitoring-osd-pg
|
||||||
|
|
||||||
@ -611,36 +607,36 @@ For a detailed discussion, refer to `Monitoring OSDs and Placement Groups`_.
|
|||||||
Using the Admin Socket
|
Using the Admin Socket
|
||||||
======================
|
======================
|
||||||
|
|
||||||
The Ceph admin socket allows you to query a daemon via a socket interface.
|
The Ceph admin socket allows you to query a daemon via a socket interface. By
|
||||||
By default, Ceph sockets reside under ``/var/run/ceph``. To access a daemon
|
default, Ceph sockets reside under ``/var/run/ceph``. To access a daemon via
|
||||||
via the admin socket, login to the host running the daemon and use the
|
the admin socket, log in to the host that is running the daemon and run one of
|
||||||
following command:
|
the two following commands:
|
||||||
|
|
||||||
.. prompt:: bash $
|
.. prompt:: bash $
|
||||||
|
|
||||||
ceph daemon {daemon-name}
|
ceph daemon {daemon-name}
|
||||||
ceph daemon {path-to-socket-file}
|
ceph daemon {path-to-socket-file}
|
||||||
|
|
||||||
For example, the following are equivalent:
|
For example, the following commands are equivalent to each other:
|
||||||
|
|
||||||
.. prompt:: bash $
|
.. prompt:: bash $
|
||||||
|
|
||||||
ceph daemon osd.0 foo
|
ceph daemon osd.0 foo
|
||||||
ceph daemon /var/run/ceph/ceph-osd.0.asok foo
|
ceph daemon /var/run/ceph/ceph-osd.0.asok foo
|
||||||
|
|
||||||
To view the available admin socket commands, execute the following command:
|
To view the available admin-socket commands, run the following command:
|
||||||
|
|
||||||
.. prompt:: bash $
|
.. prompt:: bash $
|
||||||
|
|
||||||
ceph daemon {daemon-name} help
|
ceph daemon {daemon-name} help
|
||||||
|
|
||||||
The admin socket command enables you to show and set your configuration at
|
Admin-socket commands enable you to view and set your configuration at runtime.
|
||||||
runtime. See `Viewing a Configuration at Runtime`_ for details.
|
For more on viewing your configuration, see `Viewing a Configuration at
|
||||||
|
Runtime`_. There are two methods of setting configuration value at runtime: (1)
|
||||||
Additionally, you can set configuration values at runtime directly (i.e., the
|
using the admin socket, which bypasses the monitor and requires a direct login
|
||||||
admin socket bypasses the monitor, unlike ``ceph tell {daemon-type}.{id}
|
to the host in question, and (2) using the ``ceph tell {daemon-type}.{id}
|
||||||
config set``, which relies on the monitor but doesn't require you to login
|
config set`` command, which relies on the monitor and does not require a direct
|
||||||
directly to the host in question ).
|
login.
|
||||||
|
|
||||||
.. _Viewing a Configuration at Runtime: ../../configuration/ceph-conf#viewing-a-configuration-at-runtime
|
.. _Viewing a Configuration at Runtime: ../../configuration/ceph-conf#viewing-a-configuration-at-runtime
|
||||||
.. _Storage Capacity: ../../configuration/mon-config-ref#storage-capacity
|
.. _Storage Capacity: ../../configuration/mon-config-ref#storage-capacity
|
||||||
|
@ -1011,7 +1011,7 @@ Making a Zonegroup the Default
|
|||||||
One zonegroup in the list of zonegroups must be the default zonegroup. There
|
One zonegroup in the list of zonegroups must be the default zonegroup. There
|
||||||
can be only one default zonegroup. In the case that there is only one zonegroup
|
can be only one default zonegroup. In the case that there is only one zonegroup
|
||||||
which was not designated the default zonegroup when it was created, use the
|
which was not designated the default zonegroup when it was created, use the
|
||||||
folloiwng command to make it the default zonegroup. Commands of this form can
|
following command to make it the default zonegroup. Commands of this form can
|
||||||
be used to change which zonegroup is the default.
|
be used to change which zonegroup is the default.
|
||||||
|
|
||||||
#. Designate a zonegroup as the default zonegroup:
|
#. Designate a zonegroup as the default zonegroup:
|
||||||
@ -1184,8 +1184,8 @@ The zonegroup configuration looks like this:
|
|||||||
Setting a Zonegroup
|
Setting a Zonegroup
|
||||||
~~~~~~~~~~~~~~~~~~~~
|
~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
The process of defining a zonegroup consists of creating a JSON object and, at
|
The process of defining a zonegroup consists of creating a JSON object and
|
||||||
a minimum, specifying the required settings:
|
specifying the required settings. Here is a list of the required settings:
|
||||||
|
|
||||||
1. ``name``: The name of the zonegroup. Required.
|
1. ``name``: The name of the zonegroup. Required.
|
||||||
|
|
||||||
@ -1223,26 +1223,26 @@ a minimum, specifying the required settings:
|
|||||||
object data. Set to ``default-placement`` by default. It is also possible
|
object data. Set to ``default-placement`` by default. It is also possible
|
||||||
to set a per-user default placement in the user info for each user.
|
to set a per-user default placement in the user info for each user.
|
||||||
|
|
||||||
To set a zonegroup, create a JSON object that contains the required fields,
|
Setting a Zonegroup - Procedure
|
||||||
save the object to a file (e.g., ``zonegroup.json``), and run the following
|
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||||
command:
|
|
||||||
|
|
||||||
.. prompt:: bash #
|
#. To set a zonegroup, create a JSON object that contains the required fields,
|
||||||
|
save the object to a file (for example, ``zonegroup.json``), and run the
|
||||||
|
following command:
|
||||||
|
|
||||||
|
.. prompt:: bash #
|
||||||
|
|
||||||
radosgw-admin zonegroup set --infile zonegroup.json
|
radosgw-admin zonegroup set --infile zonegroup.json
|
||||||
|
|
||||||
Where ``zonegroup.json`` is the JSON file you created.
|
Where ``zonegroup.json`` is the JSON file you created.
|
||||||
|
|
||||||
.. important:: The ``default`` zonegroup ``is_master`` setting is ``true`` by
|
.. important:: The ``default`` zonegroup ``is_master`` setting is ``true`` by default. If you create an additional zonegroup and want to make it the master zonegroup, you must either set the ``default`` zonegroup ``is_master`` setting to ``false`` or delete the ``default`` zonegroup.
|
||||||
default. If you create a new zonegroup and want to make it the master
|
|
||||||
zonegroup, you must either set the ``default`` zonegroup ``is_master``
|
|
||||||
setting to ``false``, or delete the ``default`` zonegroup.
|
|
||||||
|
|
||||||
Finally, update the period:
|
#. Update the period:
|
||||||
|
|
||||||
.. prompt:: bash #
|
.. prompt:: bash #
|
||||||
|
|
||||||
radosgw-admin period update --commit
|
radosgw-admin period update --commit
|
||||||
|
|
||||||
Setting a Zonegroup Map
|
Setting a Zonegroup Map
|
||||||
~~~~~~~~~~~~~~~~~~~~~~~~
|
~~~~~~~~~~~~~~~~~~~~~~~~
|
||||||
|
@ -7,44 +7,63 @@
|
|||||||
Overview
|
Overview
|
||||||
--------
|
--------
|
||||||
|
|
||||||
| The purpose of the **s3 select** engine is to create an efficient pipe between user client and storage nodes (the engine should be close as possible to storage).
|
The purpose of the **s3 select** engine is to create an efficient pipe between
|
||||||
| It enables selection of a restricted subset of (structured) data stored in an S3 object using an SQL-like syntax.
|
user client and storage nodes (the engine should be close as possible to
|
||||||
| It also enables for higher level analytic-applications (such as SPARK-SQL) , using that feature to improve their latency and throughput.
|
storage). It enables the selection of a restricted subset of (structured) data
|
||||||
|
stored in an S3 object using an SQL-like syntax. It also enables for higher
|
||||||
|
level analytic-applications (such as SPARK-SQL), using that feature to improve
|
||||||
|
their latency and throughput.
|
||||||
|
|
||||||
| For example, a s3-object of several GB (CSV file), a user needs to extract a single column which filtered by another column.
|
For example, an s3-object of several GB (CSV file), a user needs to extract a
|
||||||
| As the following query:
|
single column filtered by another column. As the following query: ``select
|
||||||
| ``select customer-id from s3Object where age>30 and age<65;``
|
customer-id from s3Object where age>30 and age<65;``
|
||||||
|
|
||||||
| Currently the whole s3-object must retrieve from OSD via RGW before filtering and extracting data.
|
Currently the whole s3-object must be retrieved from OSD via RGW before
|
||||||
| By "pushing down" the query into OSD , it's possible to save a lot of network and CPU(serialization / deserialization).
|
filtering and extracting data. By "pushing down" the query into radosgw, it's
|
||||||
|
possible to save a lot of network and CPU(serialization / deserialization).
|
||||||
|
|
||||||
| **The bigger the object, and the more accurate the query, the better the performance**.
|
**The bigger the object, and the more accurate the query, the better the
|
||||||
|
performance**.
|
||||||
|
|
||||||
Basic workflow
|
Basic workflow
|
||||||
--------------
|
--------------
|
||||||
|
|
||||||
| S3-select query is sent to RGW via `AWS-CLI <https://docs.aws.amazon.com/cli/latest/reference/s3api/select-object-content.html>`_
|
S3-select query is sent to RGW via `AWS-CLI
|
||||||
|
<https://docs.aws.amazon.com/cli/latest/reference/s3api/select-object-content.html>`_
|
||||||
|
|
||||||
| It passes the authentication and permission process as an incoming message (POST).
|
It passes the authentication and permission process as an incoming message
|
||||||
| **RGWSelectObj_ObjStore_S3::send_response_data** is the “entry point”, it handles each fetched chunk according to input object-key.
|
(POST). **RGWSelectObj_ObjStore_S3::send_response_data** is the “entry point”,
|
||||||
| **send_response_data** is first handling the input query, it extracts the query and other CLI parameters.
|
it handles each fetched chunk according to input object-key.
|
||||||
|
**send_response_data** is first handling the input query, it extracts the query
|
||||||
|
and other CLI parameters.
|
||||||
|
|
||||||
| Per each new fetched chunk (~4m), RGW executes s3-select query on it.
|
Per each new fetched chunk (~4m), RGW executes an s3-select query on it. The
|
||||||
| The current implementation supports CSV objects and since chunks are randomly “cutting” the CSV rows in the middle, those broken-lines (first or last per chunk) are skipped while processing the query.
|
current implementation supports CSV objects and since chunks are randomly
|
||||||
| Those “broken” lines are stored and later merged with the next broken-line (belong to the next chunk), and finally processed.
|
“cutting” the CSV rows in the middle, those broken-lines (first or last per
|
||||||
|
chunk) are skipped while processing the query. Those “broken” lines are
|
||||||
|
stored and later merged with the next broken-line (belong to the next chunk),
|
||||||
|
and finally processed.
|
||||||
|
|
||||||
| Per each processed chunk an output message is formatted according to `AWS specification <https://docs.aws.amazon.com/AmazonS3/latest/API/archive-RESTObjectSELECTContent.html#archive-RESTObjectSELECTContent-responses>`_ and sent back to the client.
|
Per each processed chunk an output message is formatted according to `AWS
|
||||||
| RGW supports the following response: ``{:event-type,records} {:content-type,application/octet-stream} {:message-type,event}``.
|
specification
|
||||||
| For aggregation queries the last chunk should be identified as the end of input, following that the s3-select-engine initiates end-of-process and produces an aggregate result.
|
<https://docs.aws.amazon.com/AmazonS3/latest/API/archive-RESTObjectSELECTContent.html#archive-RESTObjectSELECTContent-responses>`_
|
||||||
|
and sent back to the client. RGW supports the following response:
|
||||||
|
``{:event-type,records} {:content-type,application/octet-stream}
|
||||||
|
{:message-type,event}``. For aggregation queries the last chunk should be
|
||||||
|
identified as the end of input, following that the s3-select-engine initiates
|
||||||
|
end-of-process and produces an aggregated result.
|
||||||
|
|
||||||
|
|
||||||
Basic functionalities
|
Basic functionalities
|
||||||
~~~~~~~~~~~~~~~~~~~~~
|
~~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
| **S3select** has a definite set of functionalities that should be implemented (if we wish to stay compliant with AWS), currently only a portion of it is implemented.
|
**S3select** has a definite set of functionalities compliant with AWS.
|
||||||
|
|
||||||
| The implemented software architecture supports basic arithmetic expressions, logical and compare expressions, including nested function calls and casting operators, that alone enables the user reasonable flexibility.
|
The implemented software architecture supports basic arithmetic expressions,
|
||||||
| review the below s3-select-feature-table_.
|
logical and compare expressions, including nested function calls and casting
|
||||||
|
operators, which enables the user great flexibility.
|
||||||
|
|
||||||
|
review the below s3-select-feature-table_.
|
||||||
|
|
||||||
|
|
||||||
Error Handling
|
Error Handling
|
||||||
|
@ -2,11 +2,8 @@
|
|||||||
Documenting Ceph
|
Documenting Ceph
|
||||||
==================
|
==================
|
||||||
|
|
||||||
The **easiest way** to help the Ceph project is to contribute to the
|
You can help the Ceph project by contributing to the documentation. Even
|
||||||
documentation. As the Ceph user base grows and the development pace quickens, an
|
small contributions help the Ceph project.
|
||||||
increasing number of people are updating the documentation and adding new
|
|
||||||
information. Even small contributions like fixing spelling errors or clarifying
|
|
||||||
instructions will help the Ceph project immensely.
|
|
||||||
|
|
||||||
The Ceph documentation source resides in the ``ceph/doc`` directory of the Ceph
|
The Ceph documentation source resides in the ``ceph/doc`` directory of the Ceph
|
||||||
repository, and Python Sphinx renders the source into HTML and manpages. The
|
repository, and Python Sphinx renders the source into HTML and manpages. The
|
||||||
@ -47,10 +44,10 @@ branch name you prefer (for example, ``pacific``, to create a URL that reads
|
|||||||
Making Contributions
|
Making Contributions
|
||||||
====================
|
====================
|
||||||
|
|
||||||
Making a documentation contribution generally involves the same procedural
|
Making a documentation contribution involves the same basic procedure as making
|
||||||
sequence as making a code contribution, except that you must build documentation
|
a code contribution, with one exception: you must build documentation source
|
||||||
source instead of compiling program source. The sequence includes the following
|
instead of compiling program source. This sequence (the sequence of building
|
||||||
steps:
|
the documentation source) includes the following steps:
|
||||||
|
|
||||||
#. `Get the Source`_
|
#. `Get the Source`_
|
||||||
#. `Select a Branch`_
|
#. `Select a Branch`_
|
||||||
@ -64,32 +61,33 @@ steps:
|
|||||||
Get the Source
|
Get the Source
|
||||||
--------------
|
--------------
|
||||||
|
|
||||||
Ceph documentation lives in the Ceph repository right alongside the Ceph source
|
The source of the Ceph documentation is a collection of ReStructured Text files
|
||||||
code under the ``ceph/doc`` directory. For details on github and Ceph,
|
that are in the Ceph repository in the ``ceph/doc`` directory. For details
|
||||||
see :ref:`Get Involved`.
|
on GitHub and Ceph, see :ref:`Get Involved`.
|
||||||
|
|
||||||
The most common way to make contributions is to use the `Fork and Pull`_
|
Use the `Fork and Pull`_ approach to make documentation contributions. To do
|
||||||
approach. You must:
|
this, you must:
|
||||||
|
|
||||||
#. Install git locally. For Debian/Ubuntu, execute:
|
#. Install git locally. In Debian or Ubuntu, run the following command:
|
||||||
|
|
||||||
.. prompt:: bash $
|
.. prompt:: bash $
|
||||||
|
|
||||||
sudo apt-get install git
|
sudo apt-get install git
|
||||||
|
|
||||||
For Fedora, execute:
|
In Fedora, run the following command:
|
||||||
|
|
||||||
.. prompt:: bash $
|
.. prompt:: bash $
|
||||||
|
|
||||||
sudo yum install git
|
sudo yum install git
|
||||||
|
|
||||||
For CentOS/RHEL, execute:
|
In CentOS/RHEL, run the following command:
|
||||||
|
|
||||||
.. prompt:: bash $
|
.. prompt:: bash $
|
||||||
|
|
||||||
sudo yum install git
|
sudo yum install git
|
||||||
|
|
||||||
#. Ensure your ``.gitconfig`` file has your name and email address. :
|
#. Make sure that your ``.gitconfig`` file has been configured to include your
|
||||||
|
name and email address:
|
||||||
|
|
||||||
.. code-block:: ini
|
.. code-block:: ini
|
||||||
|
|
||||||
@ -109,37 +107,36 @@ approach. You must:
|
|||||||
|
|
||||||
#. Fork the Ceph project. See https://github.com/ceph/ceph.
|
#. Fork the Ceph project. See https://github.com/ceph/ceph.
|
||||||
|
|
||||||
#. Clone your fork of the Ceph project to your local host.
|
#. Clone your fork of the Ceph project to your local host. This creates what is
|
||||||
|
known as a "local working copy".
|
||||||
|
|
||||||
|
The Ceph documentation is organized by component:
|
||||||
|
|
||||||
Ceph organizes documentation into an information architecture primarily by its
|
- **Ceph Storage Cluster:** The Ceph Storage Cluster documentation is
|
||||||
main components.
|
in the ``doc/rados`` directory.
|
||||||
|
|
||||||
- **Ceph Storage Cluster:** The Ceph Storage Cluster documentation resides
|
|
||||||
under the ``doc/rados`` directory.
|
|
||||||
|
|
||||||
- **Ceph Block Device:** The Ceph Block Device documentation resides under
|
- **Ceph Block Device:** The Ceph Block Device documentation is in
|
||||||
the ``doc/rbd`` directory.
|
the ``doc/rbd`` directory.
|
||||||
|
|
||||||
- **Ceph Object Storage:** The Ceph Object Storage documentation resides under
|
- **Ceph Object Storage:** The Ceph Object Storage documentation is in
|
||||||
the ``doc/radosgw`` directory.
|
the ``doc/radosgw`` directory.
|
||||||
|
|
||||||
- **Ceph File System:** The Ceph File System documentation resides under the
|
- **Ceph File System:** The Ceph File System documentation is in the
|
||||||
``doc/cephfs`` directory.
|
``doc/cephfs`` directory.
|
||||||
|
|
||||||
- **Installation (Quick):** Quick start documentation resides under the
|
- **Installation (Quick):** Quick start documentation is in the
|
||||||
``doc/start`` directory.
|
``doc/start`` directory.
|
||||||
|
|
||||||
- **Installation (Manual):** Manual installation documentation resides under
|
- **Installation (Manual):** Documentaton concerning the manual installation of
|
||||||
the ``doc/install`` directory.
|
Ceph is in the ``doc/install`` directory.
|
||||||
|
|
||||||
- **Manpage:** Manpage source resides under the ``doc/man`` directory.
|
- **Manpage:** Manpage source is in the ``doc/man`` directory.
|
||||||
|
|
||||||
- **Developer:** Developer documentation resides under the ``doc/dev``
|
- **Developer:** Developer documentation is in the ``doc/dev``
|
||||||
directory.
|
directory.
|
||||||
|
|
||||||
- **Images:** If you include images such as JPEG or PNG files, you should
|
- **Images:** Images including JPEG and PNG files are stored in the
|
||||||
store them under the ``doc/images`` directory.
|
``doc/images`` directory.
|
||||||
|
|
||||||
|
|
||||||
Select a Branch
|
Select a Branch
|
||||||
|
@ -0,0 +1,30 @@
|
|||||||
|
overrides:
|
||||||
|
ceph:
|
||||||
|
conf:
|
||||||
|
mgr:
|
||||||
|
debug mgr: 20
|
||||||
|
debug ms: 1
|
||||||
|
debug finisher: 20
|
||||||
|
debug client: 20
|
||||||
|
log-whitelist:
|
||||||
|
- OSD full dropping all updates
|
||||||
|
- OSD near full
|
||||||
|
- pausewr flag
|
||||||
|
- failsafe engaged, dropping updates
|
||||||
|
- failsafe disengaged, no longer dropping
|
||||||
|
- is full \(reached quota
|
||||||
|
- POOL_FULL
|
||||||
|
- POOL_BACKFILLFULL
|
||||||
|
|
||||||
|
overrides:
|
||||||
|
kclient:
|
||||||
|
snapdirname: .customsnapkernel
|
||||||
|
ceph:
|
||||||
|
conf:
|
||||||
|
client:
|
||||||
|
client snapdir: .customsnapfuse
|
||||||
|
|
||||||
|
tasks:
|
||||||
|
- cephfs_test_runner:
|
||||||
|
modules:
|
||||||
|
- tasks.cephfs.test_snap_schedules.TestSnapSchedulesSnapdir
|
0
ceph/qa/suites/fs/workload/ms_mode/$
Normal file
0
ceph/qa/suites/fs/workload/ms_mode/$
Normal file
3
ceph/qa/suites/fs/workload/ms_mode/crc.yaml
Normal file
3
ceph/qa/suites/fs/workload/ms_mode/crc.yaml
Normal file
@ -0,0 +1,3 @@
|
|||||||
|
overrides:
|
||||||
|
kclient:
|
||||||
|
mntopts: ["ms_mode=crc"]
|
3
ceph/qa/suites/fs/workload/ms_mode/legacy.yaml
Normal file
3
ceph/qa/suites/fs/workload/ms_mode/legacy.yaml
Normal file
@ -0,0 +1,3 @@
|
|||||||
|
overrides:
|
||||||
|
kclient:
|
||||||
|
mntopts: ["ms_mode=legacy"]
|
3
ceph/qa/suites/fs/workload/ms_mode/secure.yaml
Normal file
3
ceph/qa/suites/fs/workload/ms_mode/secure.yaml
Normal file
@ -0,0 +1,3 @@
|
|||||||
|
overrides:
|
||||||
|
kclient:
|
||||||
|
mntopts: ["ms_mode=secure"]
|
@ -0,0 +1,20 @@
|
|||||||
|
roles:
|
||||||
|
- - host.a
|
||||||
|
- osd.0
|
||||||
|
- osd.1
|
||||||
|
- osd.2
|
||||||
|
- mon.a
|
||||||
|
- mgr.a
|
||||||
|
- client.0
|
||||||
|
tasks:
|
||||||
|
- install:
|
||||||
|
- cephadm:
|
||||||
|
- cephadm.shell:
|
||||||
|
host.a:
|
||||||
|
- ceph osd pool create foo
|
||||||
|
- rbd pool init foo
|
||||||
|
- ceph orch apply iscsi foo u p
|
||||||
|
- workunit:
|
||||||
|
clients:
|
||||||
|
client.0:
|
||||||
|
- cephadm/test_iscsi_pids_limit.sh
|
@ -1,5 +1,3 @@
|
|||||||
# Amazon/S3.pm (cpan) not available as an rpm
|
|
||||||
os_type: ubuntu
|
|
||||||
tasks:
|
tasks:
|
||||||
- install:
|
- install:
|
||||||
- ceph:
|
- ceph:
|
||||||
|
@ -1,5 +1,3 @@
|
|||||||
# Amazon::S3 is not available on el7
|
|
||||||
os_type: ubuntu
|
|
||||||
tasks:
|
tasks:
|
||||||
- install:
|
- install:
|
||||||
- ceph:
|
- ceph:
|
||||||
|
@ -1,5 +1,3 @@
|
|||||||
# Amazon/S3.pm (cpan) not available as an rpm
|
|
||||||
os_type: ubuntu
|
|
||||||
tasks:
|
tasks:
|
||||||
- install:
|
- install:
|
||||||
- ceph:
|
- ceph:
|
||||||
|
1
ceph/qa/suites/rgw/multifs/ubuntu_latest.yaml
Symbolic link
1
ceph/qa/suites/rgw/multifs/ubuntu_latest.yaml
Symbolic link
@ -0,0 +1 @@
|
|||||||
|
.qa/distros/supported/ubuntu_latest.yaml
|
1
ceph/qa/suites/rgw/multisite/supported-random-distro$
Symbolic link
1
ceph/qa/suites/rgw/multisite/supported-random-distro$
Symbolic link
@ -0,0 +1 @@
|
|||||||
|
.qa/distros/supported-random-distro$
|
@ -1,8 +1,3 @@
|
|||||||
# see http://tracker.ceph.com/issues/20360 and http://tracker.ceph.com/issues/18126
|
|
||||||
#os_type: centos
|
|
||||||
# ubuntu and no valgrind until we migrate test to py3
|
|
||||||
os_type: ubuntu
|
|
||||||
|
|
||||||
tasks:
|
tasks:
|
||||||
- install:
|
- install:
|
||||||
- ceph: {cluster: c1}
|
- ceph: {cluster: c1}
|
||||||
@ -10,16 +5,12 @@ tasks:
|
|||||||
- rgw:
|
- rgw:
|
||||||
c1.client.0:
|
c1.client.0:
|
||||||
port: 8000
|
port: 8000
|
||||||
# valgrind: [--tool=memcheck, --max-threads=1024] # http://tracker.ceph.com/issues/25214
|
|
||||||
c1.client.1:
|
c1.client.1:
|
||||||
port: 8001
|
port: 8001
|
||||||
# valgrind: [--tool=memcheck, --max-threads=1024]
|
|
||||||
c2.client.0:
|
c2.client.0:
|
||||||
port: 8000
|
port: 8000
|
||||||
# valgrind: [--tool=memcheck, --max-threads=1024]
|
|
||||||
c2.client.1:
|
c2.client.1:
|
||||||
port: 8001
|
port: 8001
|
||||||
# valgrind: [--tool=memcheck, --max-threads=1024]
|
|
||||||
- rgw-multisite:
|
- rgw-multisite:
|
||||||
- rgw-multisite-tests:
|
- rgw-multisite-tests:
|
||||||
config:
|
config:
|
||||||
|
@ -1,20 +0,0 @@
|
|||||||
# see http://tracker.ceph.com/issues/20360 and http://tracker.ceph.com/issues/18126
|
|
||||||
#os_type: centos
|
|
||||||
# ubuntu and no valgrind until we migrate test to py3
|
|
||||||
os_type: ubuntu
|
|
||||||
|
|
||||||
overrides:
|
|
||||||
install:
|
|
||||||
ceph:
|
|
||||||
ceph:
|
|
||||||
conf:
|
|
||||||
global:
|
|
||||||
osd heartbeat grace: 40
|
|
||||||
mon:
|
|
||||||
mon osd crush smoke test: false
|
|
||||||
osd:
|
|
||||||
osd fast shutdown: false
|
|
||||||
# valgrind:
|
|
||||||
# mon: [--tool=memcheck, --leak-check=full, --show-reachable=yes]
|
|
||||||
# osd: [--tool=memcheck]
|
|
||||||
# mds: [--tool=memcheck]
|
|
20
ceph/qa/suites/rgw/multisite/valgrind.yaml.disabled
Normal file
20
ceph/qa/suites/rgw/multisite/valgrind.yaml.disabled
Normal file
@ -0,0 +1,20 @@
|
|||||||
|
overrides:
|
||||||
|
install:
|
||||||
|
ceph:
|
||||||
|
ceph:
|
||||||
|
conf:
|
||||||
|
global:
|
||||||
|
osd heartbeat grace: 40
|
||||||
|
mon:
|
||||||
|
mon osd crush smoke test: false
|
||||||
|
osd:
|
||||||
|
osd fast shutdown: false
|
||||||
|
rgw:
|
||||||
|
c1.client.0:
|
||||||
|
valgrind: [--tool=memcheck, --max-threads=1024] # http://tracker.ceph.com/issues/25214
|
||||||
|
c1.client.1:
|
||||||
|
valgrind: [--tool=memcheck, --max-threads=1024]
|
||||||
|
c2.client.0:
|
||||||
|
valgrind: [--tool=memcheck, --max-threads=1024]
|
||||||
|
c2.client.1:
|
||||||
|
valgrind: [--tool=memcheck, --max-threads=1024]
|
@ -1,7 +1,3 @@
|
|||||||
# ubuntu for py2 until we move to py3
|
|
||||||
os_type: ubuntu
|
|
||||||
|
|
||||||
# see http://tracker.ceph.com/issues/20360 and http://tracker.ceph.com/issues/18126
|
|
||||||
tasks:
|
tasks:
|
||||||
- install:
|
- install:
|
||||||
- ceph:
|
- ceph:
|
||||||
|
1
ceph/qa/suites/rgw/thrash/ubuntu_latest.yaml
Symbolic link
1
ceph/qa/suites/rgw/thrash/ubuntu_latest.yaml
Symbolic link
@ -0,0 +1 @@
|
|||||||
|
.qa/distros/supported/ubuntu_latest.yaml
|
@ -1,5 +1,3 @@
|
|||||||
# Amazon/S3.pm (cpan) not available as an rpm
|
|
||||||
os_type: ubuntu
|
|
||||||
tasks:
|
tasks:
|
||||||
- workunit:
|
- workunit:
|
||||||
clients:
|
clients:
|
||||||
|
@ -1,5 +1,3 @@
|
|||||||
# Amazon::S3 is not available on el7
|
|
||||||
os_type: ubuntu
|
|
||||||
tasks:
|
tasks:
|
||||||
- workunit:
|
- workunit:
|
||||||
clients:
|
clients:
|
||||||
|
@ -1,5 +1,3 @@
|
|||||||
# Amazon/S3.pm (cpan) not available as an rpm
|
|
||||||
os_type: ubuntu
|
|
||||||
tasks:
|
tasks:
|
||||||
- workunit:
|
- workunit:
|
||||||
clients:
|
clients:
|
||||||
|
1
ceph/qa/suites/upgrade-clients/client-upgrade-pacific-reef/.qa
Symbolic link
1
ceph/qa/suites/upgrade-clients/client-upgrade-pacific-reef/.qa
Symbolic link
@ -0,0 +1 @@
|
|||||||
|
../../.qa
|
@ -0,0 +1 @@
|
|||||||
|
../../.qa
|
@ -0,0 +1 @@
|
|||||||
|
../.qa/
|
@ -0,0 +1,4 @@
|
|||||||
|
openstack:
|
||||||
|
- volumes: # attached to each instance
|
||||||
|
count: 4
|
||||||
|
size: 30 # GB
|
@ -0,0 +1,19 @@
|
|||||||
|
meta:
|
||||||
|
- desc: |
|
||||||
|
Install and run ceph on one node,
|
||||||
|
with a separate client 1.
|
||||||
|
Upgrade client 1 to reef
|
||||||
|
Run tests against old cluster
|
||||||
|
roles:
|
||||||
|
- - mon.a
|
||||||
|
- mon.b
|
||||||
|
- mon.c
|
||||||
|
- osd.0
|
||||||
|
- osd.1
|
||||||
|
- osd.2
|
||||||
|
- client.0
|
||||||
|
- mgr.x
|
||||||
|
- - client.1
|
||||||
|
overrides:
|
||||||
|
ceph:
|
||||||
|
fs: xfs
|
@ -0,0 +1 @@
|
|||||||
|
../.qa/
|
@ -0,0 +1,11 @@
|
|||||||
|
tasks:
|
||||||
|
- install:
|
||||||
|
branch: pacific
|
||||||
|
exclude_packages: ['ceph-mgr','libcephfs2','libcephfs-devel','libcephfs-dev','python34-cephfs','python34-rados']
|
||||||
|
- print: "**** done install pacific"
|
||||||
|
- install.upgrade:
|
||||||
|
exclude_packages: ['ceph-test', 'ceph-test-dbg','libcephfs1', 'python-ceph']
|
||||||
|
client.1:
|
||||||
|
- print: "**** done install.upgrade to -x on client.1"
|
||||||
|
- ceph:
|
||||||
|
- print: "**** done ceph task"
|
@ -0,0 +1 @@
|
|||||||
|
../.qa/
|
@ -0,0 +1,34 @@
|
|||||||
|
tasks:
|
||||||
|
- parallel:
|
||||||
|
- workunit:
|
||||||
|
branch: pacific
|
||||||
|
clients:
|
||||||
|
client.0:
|
||||||
|
- rbd/notify_master.sh
|
||||||
|
env:
|
||||||
|
RBD_FEATURES: "61"
|
||||||
|
- workunit:
|
||||||
|
branch: reef
|
||||||
|
clients:
|
||||||
|
client.1:
|
||||||
|
- rbd/notify_slave.sh
|
||||||
|
env:
|
||||||
|
RBD_FEATURES: "61"
|
||||||
|
RBD_DISABLE_UPDATE_FEATURES: "1"
|
||||||
|
- print: "**** done rbd: old librbd -> new librbd"
|
||||||
|
- parallel:
|
||||||
|
- workunit:
|
||||||
|
branch: pacific
|
||||||
|
clients:
|
||||||
|
client.0:
|
||||||
|
- rbd/notify_slave.sh
|
||||||
|
env:
|
||||||
|
RBD_FEATURES: "61"
|
||||||
|
- workunit:
|
||||||
|
branch: reef
|
||||||
|
clients:
|
||||||
|
client.1:
|
||||||
|
- rbd/notify_master.sh
|
||||||
|
env:
|
||||||
|
RBD_FEATURES: "61"
|
||||||
|
- print: "**** done rbd: new librbd -> old librbd"
|
@ -0,0 +1 @@
|
|||||||
|
../.qa/
|
@ -0,0 +1 @@
|
|||||||
|
../../../../../../distros/all/ubuntu_20.04.yaml
|
@ -1474,9 +1474,8 @@ def healthy(ctx, config):
|
|||||||
|
|
||||||
if ctx.cluster.only(teuthology.is_type('mds', cluster_name)).remotes:
|
if ctx.cluster.only(teuthology.is_type('mds', cluster_name)).remotes:
|
||||||
# Some MDSs exist, wait for them to be healthy
|
# Some MDSs exist, wait for them to be healthy
|
||||||
ceph_fs = Filesystem(ctx) # TODO: make Filesystem cluster-aware
|
for fs in Filesystem.get_all_fs(ctx):
|
||||||
ceph_fs.wait_for_daemons(timeout=300)
|
fs.wait_for_daemons(timeout=300)
|
||||||
|
|
||||||
|
|
||||||
def wait_for_mon_quorum(ctx, config):
|
def wait_for_mon_quorum(ctx, config):
|
||||||
"""
|
"""
|
||||||
|
@ -156,7 +156,7 @@ def task(ctx, config):
|
|||||||
mount_x.cephfs_mntpt = config.get("mount_path")
|
mount_x.cephfs_mntpt = config.get("mount_path")
|
||||||
if config.get("mountpoint"):
|
if config.get("mountpoint"):
|
||||||
mount_x.hostfs_mntpt = config.get("mountpoint")
|
mount_x.hostfs_mntpt = config.get("mountpoint")
|
||||||
mount_x.mount(createfs=False)
|
mount_x.mount()
|
||||||
|
|
||||||
for info in mounted_by_me.values():
|
for info in mounted_by_me.values():
|
||||||
info["mount"].wait_until_mounted()
|
info["mount"].wait_until_mounted()
|
||||||
|
@ -486,6 +486,17 @@ class MDSCluster(CephCluster):
|
|||||||
|
|
||||||
|
|
||||||
class Filesystem(MDSCluster):
|
class Filesystem(MDSCluster):
|
||||||
|
|
||||||
|
"""
|
||||||
|
Generator for all Filesystems in the cluster.
|
||||||
|
"""
|
||||||
|
@classmethod
|
||||||
|
def get_all_fs(cls, ctx):
|
||||||
|
mdsc = MDSCluster(ctx)
|
||||||
|
status = mdsc.status()
|
||||||
|
for fs in status.get_filesystems():
|
||||||
|
yield cls(ctx, fscid=fs['id'])
|
||||||
|
|
||||||
"""
|
"""
|
||||||
This object is for driving a CephFS filesystem. The MDS daemons driven by
|
This object is for driving a CephFS filesystem. The MDS daemons driven by
|
||||||
MDSCluster may be shared with other Filesystems.
|
MDSCluster may be shared with other Filesystems.
|
||||||
|
@ -31,18 +31,12 @@ class FuseMount(CephFSMount):
|
|||||||
self.inst = None
|
self.inst = None
|
||||||
self.addr = None
|
self.addr = None
|
||||||
|
|
||||||
def mount(self, mntopts=[], createfs=True, check_status=True, **kwargs):
|
def mount(self, mntopts=[], check_status=True, **kwargs):
|
||||||
self.update_attrs(**kwargs)
|
self.update_attrs(**kwargs)
|
||||||
self.assert_and_log_minimum_mount_details()
|
self.assert_and_log_minimum_mount_details()
|
||||||
|
|
||||||
self.setup_netns()
|
self.setup_netns()
|
||||||
|
|
||||||
if createfs:
|
|
||||||
# TODO: don't call setupfs() from within mount(), since it's
|
|
||||||
# absurd. The proper order should be: create FS first and then
|
|
||||||
# call mount().
|
|
||||||
self.setupfs(name=self.cephfs_name)
|
|
||||||
|
|
||||||
try:
|
try:
|
||||||
return self._mount(mntopts, check_status)
|
return self._mount(mntopts, check_status)
|
||||||
except RuntimeError:
|
except RuntimeError:
|
||||||
|
@ -34,17 +34,12 @@ class KernelMount(CephFSMount):
|
|||||||
self.inst = None
|
self.inst = None
|
||||||
self.addr = None
|
self.addr = None
|
||||||
|
|
||||||
def mount(self, mntopts=[], createfs=True, check_status=True, **kwargs):
|
def mount(self, mntopts=[], check_status=True, **kwargs):
|
||||||
self.update_attrs(**kwargs)
|
self.update_attrs(**kwargs)
|
||||||
self.assert_and_log_minimum_mount_details()
|
self.assert_and_log_minimum_mount_details()
|
||||||
|
|
||||||
self.setup_netns()
|
self.setup_netns()
|
||||||
|
|
||||||
# TODO: don't call setupfs() from within mount(), since it's
|
|
||||||
# absurd. The proper order should be: create FS first and then
|
|
||||||
# call mount().
|
|
||||||
if createfs:
|
|
||||||
self.setupfs(name=self.cephfs_name)
|
|
||||||
if not self.cephfs_mntpt:
|
if not self.cephfs_mntpt:
|
||||||
self.cephfs_mntpt = '/'
|
self.cephfs_mntpt = '/'
|
||||||
|
|
||||||
|
@ -123,6 +123,18 @@ class CephFSMount(object):
|
|||||||
def netns_name(self, name):
|
def netns_name(self, name):
|
||||||
self._netns_name = name
|
self._netns_name = name
|
||||||
|
|
||||||
|
def assert_that_ceph_fs_exists(self):
|
||||||
|
output = self.client_remote.run(args='ceph fs ls', stdout=StringIO()).\
|
||||||
|
stdout.getvalue()
|
||||||
|
if self.cephfs_name:
|
||||||
|
assert self.cephfs_name in output, \
|
||||||
|
'expected ceph fs is not present on the cluster'
|
||||||
|
log.info(f'Mounting Ceph FS {self.cephfs_name}; just confirmed its presence on cluster')
|
||||||
|
else:
|
||||||
|
assert 'No filesystems enabled' not in output, \
|
||||||
|
'ceph cluster has no ceph fs, not even the default ceph fs'
|
||||||
|
log.info('Mounting default Ceph FS; just confirmed its presence on cluster')
|
||||||
|
|
||||||
def assert_and_log_minimum_mount_details(self):
|
def assert_and_log_minimum_mount_details(self):
|
||||||
"""
|
"""
|
||||||
Make sure we have minimum details required for mounting. Ideally, this
|
Make sure we have minimum details required for mounting. Ideally, this
|
||||||
@ -136,6 +148,8 @@ class CephFSMount(object):
|
|||||||
'3. the remote machine where CephFS will be mounted.\n')
|
'3. the remote machine where CephFS will be mounted.\n')
|
||||||
raise RuntimeError(errmsg)
|
raise RuntimeError(errmsg)
|
||||||
|
|
||||||
|
self.assert_that_ceph_fs_exists()
|
||||||
|
|
||||||
log.info('Mounting Ceph FS. Following are details of mount; remember '
|
log.info('Mounting Ceph FS. Following are details of mount; remember '
|
||||||
'"None" represents Python type None -')
|
'"None" represents Python type None -')
|
||||||
log.info(f'self.client_remote.hostname = {self.client_remote.hostname}')
|
log.info(f'self.client_remote.hostname = {self.client_remote.hostname}')
|
||||||
@ -395,7 +409,7 @@ class CephFSMount(object):
|
|||||||
args = ['sudo', 'ip', 'link', 'set', 'brx.{0}'.format(self.nsid), 'up']
|
args = ['sudo', 'ip', 'link', 'set', 'brx.{0}'.format(self.nsid), 'up']
|
||||||
self.client_remote.run(args=args, timeout=(5*60), omit_sudo=False)
|
self.client_remote.run(args=args, timeout=(5*60), omit_sudo=False)
|
||||||
|
|
||||||
def mount(self, mntopts=[], createfs=True, check_status=True, **kwargs):
|
def mount(self, mntopts=[], check_status=True, **kwargs):
|
||||||
"""
|
"""
|
||||||
kwargs expects its members to be same as the arguments accepted by
|
kwargs expects its members to be same as the arguments accepted by
|
||||||
self.update_attrs().
|
self.update_attrs().
|
||||||
@ -469,22 +483,19 @@ class CephFSMount(object):
|
|||||||
2. Run update_attrs().
|
2. Run update_attrs().
|
||||||
3. Run mount().
|
3. Run mount().
|
||||||
|
|
||||||
Accepts arguments of self.mount() and self.update_attrs() with 2 exceptions -
|
Accepts arguments of self.mount() and self.update_attrs() with 1
|
||||||
1. Accepts wait too which can be True or False.
|
exception: wait accepted too which can be True or False.
|
||||||
2. The default value of createfs is False.
|
|
||||||
"""
|
"""
|
||||||
self.umount_wait()
|
self.umount_wait()
|
||||||
assert not self.mounted
|
assert not self.mounted
|
||||||
|
|
||||||
mntopts = kwargs.pop('mntopts', [])
|
mntopts = kwargs.pop('mntopts', [])
|
||||||
createfs = kwargs.pop('createfs', False)
|
|
||||||
check_status = kwargs.pop('check_status', True)
|
check_status = kwargs.pop('check_status', True)
|
||||||
wait = kwargs.pop('wait', True)
|
wait = kwargs.pop('wait', True)
|
||||||
|
|
||||||
self.update_attrs(**kwargs)
|
self.update_attrs(**kwargs)
|
||||||
|
|
||||||
retval = self.mount(mntopts=mntopts, createfs=createfs,
|
retval = self.mount(mntopts=mntopts, check_status=check_status)
|
||||||
check_status=check_status)
|
|
||||||
# avoid this scenario (again): mount command might've failed and
|
# avoid this scenario (again): mount command might've failed and
|
||||||
# check_status might have silenced the exception, yet we attempt to
|
# check_status might have silenced the exception, yet we attempt to
|
||||||
# wait which might lead to an error.
|
# wait which might lead to an error.
|
||||||
|
@ -667,7 +667,7 @@ class LocalKernelMount(KernelMount):
|
|||||||
path = "{0}/client.{1}.*.asok".format(d, self.client_id)
|
path = "{0}/client.{1}.*.asok".format(d, self.client_id)
|
||||||
return path
|
return path
|
||||||
|
|
||||||
def mount(self, mntopts=[], createfs=True, check_status=True, **kwargs):
|
def mount(self, mntopts=[], check_status=True, **kwargs):
|
||||||
self.update_attrs(**kwargs)
|
self.update_attrs(**kwargs)
|
||||||
self.assert_and_log_minimum_mount_details()
|
self.assert_and_log_minimum_mount_details()
|
||||||
|
|
||||||
@ -679,9 +679,6 @@ class LocalKernelMount(KernelMount):
|
|||||||
|
|
||||||
if not self.cephfs_mntpt:
|
if not self.cephfs_mntpt:
|
||||||
self.cephfs_mntpt = "/"
|
self.cephfs_mntpt = "/"
|
||||||
# TODO: don't call setupfs() from within mount()
|
|
||||||
if createfs:
|
|
||||||
self.setupfs(name=self.cephfs_name)
|
|
||||||
|
|
||||||
opts = 'norequire_active_mds'
|
opts = 'norequire_active_mds'
|
||||||
if self.client_id:
|
if self.client_id:
|
||||||
@ -801,7 +798,7 @@ class LocalFuseMount(FuseMount):
|
|||||||
path = "{0}/client.{1}.*.asok".format(d, self.client_id)
|
path = "{0}/client.{1}.*.asok".format(d, self.client_id)
|
||||||
return path
|
return path
|
||||||
|
|
||||||
def mount(self, mntopts=[], createfs=True, check_status=True, **kwargs):
|
def mount(self, mntopts=[], check_status=True, **kwargs):
|
||||||
self.update_attrs(**kwargs)
|
self.update_attrs(**kwargs)
|
||||||
self.assert_and_log_minimum_mount_details()
|
self.assert_and_log_minimum_mount_details()
|
||||||
|
|
||||||
@ -811,10 +808,6 @@ class LocalFuseMount(FuseMount):
|
|||||||
else:
|
else:
|
||||||
self.using_namespace = False
|
self.using_namespace = False
|
||||||
|
|
||||||
# TODO: don't call setupfs() from within mount()
|
|
||||||
if createfs:
|
|
||||||
self.setupfs(name=self.cephfs_name)
|
|
||||||
|
|
||||||
stderr = StringIO()
|
stderr = StringIO()
|
||||||
try:
|
try:
|
||||||
self.client_remote.run(args=['mkdir', '-p', self.hostfs_mntpt],
|
self.client_remote.run(args=['mkdir', '-p', self.hostfs_mntpt],
|
||||||
|
24
ceph/qa/workunits/cephadm/test_iscsi_pids_limit.sh
Executable file
24
ceph/qa/workunits/cephadm/test_iscsi_pids_limit.sh
Executable file
@ -0,0 +1,24 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
# checks if the containers default pids-limit (4096) is removed and Iscsi
|
||||||
|
# containers continue to run
|
||||||
|
# exits 1 if fails
|
||||||
|
|
||||||
|
set -ex
|
||||||
|
|
||||||
|
ISCSI_CONT_IDS=$(sudo podman ps -qa --filter='name=iscsi')
|
||||||
|
CONT_COUNT=$(echo ${ISCSI_CONT_IDS} | wc -w)
|
||||||
|
test ${CONT_COUNT} -eq 2
|
||||||
|
|
||||||
|
for i in ${ISCSI_CONT_IDS}
|
||||||
|
do
|
||||||
|
sudo podman exec ${i} /bin/sh -c 'for j in {0..20000}; do sleep 30 & done'
|
||||||
|
done
|
||||||
|
|
||||||
|
for i in ${ISCSI_CONT_IDS}
|
||||||
|
do
|
||||||
|
SLEEP_COUNT=$(sudo podman exec ${i} /bin/sh -c 'ps -ef | grep -c sleep')
|
||||||
|
test ${SLEEP_COUNT} -gt 20000
|
||||||
|
done
|
||||||
|
|
||||||
|
echo OK
|
@ -1,2 +1,2 @@
|
|||||||
5a2d516ce4b134bfafc80c4274532ac0d56fc1e2
|
5378749ba6be3a0868b51803968ee9cde4833a3e
|
||||||
16.2.12
|
16.2.13
|
||||||
|
@ -806,6 +806,10 @@ OPTION(rocksdb_collect_compaction_stats, OPT_BOOL) //For rocksdb, this behavior
|
|||||||
OPTION(rocksdb_collect_extended_stats, OPT_BOOL) //For rocksdb, this behavior will be an overhead of 5%~10%, collected only rocksdb_perf is enabled.
|
OPTION(rocksdb_collect_extended_stats, OPT_BOOL) //For rocksdb, this behavior will be an overhead of 5%~10%, collected only rocksdb_perf is enabled.
|
||||||
OPTION(rocksdb_collect_memory_stats, OPT_BOOL) //For rocksdb, this behavior will be an overhead of 5%~10%, collected only rocksdb_perf is enabled.
|
OPTION(rocksdb_collect_memory_stats, OPT_BOOL) //For rocksdb, this behavior will be an overhead of 5%~10%, collected only rocksdb_perf is enabled.
|
||||||
|
|
||||||
|
OPTION(rocksdb_cf_compact_on_deletion, OPT_BOOL)
|
||||||
|
OPTION(rocksdb_cf_compact_on_deletion_sliding_window, OPT_INT)
|
||||||
|
OPTION(rocksdb_cf_compact_on_deletion_trigger, OPT_INT)
|
||||||
|
|
||||||
// rocksdb options that will be used for omap(if omap_backend is rocksdb)
|
// rocksdb options that will be used for omap(if omap_backend is rocksdb)
|
||||||
OPTION(filestore_rocksdb_options, OPT_STR)
|
OPTION(filestore_rocksdb_options, OPT_STR)
|
||||||
// rocksdb options that will be used in monstore
|
// rocksdb options that will be used in monstore
|
||||||
@ -1264,6 +1268,11 @@ OPTION(rgw_override_bucket_index_max_shards, OPT_U32)
|
|||||||
*/
|
*/
|
||||||
OPTION(rgw_bucket_index_max_aio, OPT_U32)
|
OPTION(rgw_bucket_index_max_aio, OPT_U32)
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Represents the maximum AIO pending requests for multi object delete requests.
|
||||||
|
*/
|
||||||
|
OPTION(rgw_multi_obj_del_max_aio, OPT_U32)
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* whether or not the quota/gc threads should be started
|
* whether or not the quota/gc threads should be started
|
||||||
*/
|
*/
|
||||||
|
@ -3972,6 +3972,22 @@ std::vector<Option> get_global_options() {
|
|||||||
.set_default(4_K)
|
.set_default(4_K)
|
||||||
.set_description("The block size for index partitions. (0 = rocksdb default)"),
|
.set_description("The block size for index partitions. (0 = rocksdb default)"),
|
||||||
|
|
||||||
|
Option("rocksdb_cf_compact_on_deletion", Option::TYPE_BOOL, Option::LEVEL_DEV)
|
||||||
|
.set_default(false)
|
||||||
|
.set_description("Compact the column family when a certain number of tombstones are observed within a given window.")
|
||||||
|
.set_long_description("This setting instructs RocksDB to compact a column family when a certain number of tombstones are observed during iteration within a certain sliding window. For instance if rocksdb_cf_compact_on_deletion_sliding_window is 8192 and rocksdb_cf_compact_on_deletion_trigger is 4096, then once 4096 tombstones are observed after iteration over 8192 entries, the column family will be compacted.")
|
||||||
|
.add_see_also({"rocksdb_cf_compact_on_deletion_sliding_window", "rocksdb_cf_compact_on_deletion_trigger"}),
|
||||||
|
|
||||||
|
Option("rocksdb_cf_compact_on_deletion_sliding_window", Option::TYPE_INT, Option::LEVEL_DEV)
|
||||||
|
.set_default(32768)
|
||||||
|
.set_description("The sliding window to use when rocksdb_cf_compact_on_deletion is enabled.")
|
||||||
|
.add_see_also({"rocksdb_cf_compact_on_deletion"}),
|
||||||
|
|
||||||
|
Option("rocksdb_cf_compact_on_deletion_trigger", Option::TYPE_INT, Option::LEVEL_DEV)
|
||||||
|
.set_default(16384)
|
||||||
|
.set_description("The trigger to use when rocksdb_cf_compact_on_deletion is enabled.")
|
||||||
|
.add_see_also({"rocksdb_cf_compact_on_deletion"}),
|
||||||
|
|
||||||
Option("mon_rocksdb_options", Option::TYPE_STR, Option::LEVEL_ADVANCED)
|
Option("mon_rocksdb_options", Option::TYPE_STR, Option::LEVEL_ADVANCED)
|
||||||
.set_default("write_buffer_size=33554432,"
|
.set_default("write_buffer_size=33554432,"
|
||||||
"compression=kNoCompression,"
|
"compression=kNoCompression,"
|
||||||
@ -5868,6 +5884,10 @@ std::vector<Option> get_rgw_options() {
|
|||||||
.set_default(128)
|
.set_default(128)
|
||||||
.set_description("Max number of concurrent RADOS requests when handling bucket shards."),
|
.set_description("Max number of concurrent RADOS requests when handling bucket shards."),
|
||||||
|
|
||||||
|
Option("rgw_multi_obj_del_max_aio", Option::TYPE_UINT, Option::LEVEL_ADVANCED)
|
||||||
|
.set_default(16)
|
||||||
|
.set_description("Max number of concurrent RADOS requests per multi-object delete request."),
|
||||||
|
|
||||||
Option("rgw_enable_quota_threads", Option::TYPE_BOOL, Option::LEVEL_ADVANCED)
|
Option("rgw_enable_quota_threads", Option::TYPE_BOOL, Option::LEVEL_ADVANCED)
|
||||||
.set_default(true)
|
.set_default(true)
|
||||||
.set_description("Enables the quota maintenance thread.")
|
.set_description("Enables the quota maintenance thread.")
|
||||||
|
@ -1468,8 +1468,8 @@ CEPH_RBD_API int rbd_quiesce_watch(rbd_image_t image,
|
|||||||
* @param handle which watch is complete
|
* @param handle which watch is complete
|
||||||
* @param r the return code
|
* @param r the return code
|
||||||
*/
|
*/
|
||||||
CEPH_RADOS_API void rbd_quiesce_complete(rbd_image_t image, uint64_t handle,
|
CEPH_RBD_API void rbd_quiesce_complete(rbd_image_t image, uint64_t handle,
|
||||||
int r);
|
int r);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Unregister a quiesce/unquiesce watcher.
|
* Unregister a quiesce/unquiesce watcher.
|
||||||
|
@ -24,6 +24,7 @@ namespace fs = std::experimental::filesystem;
|
|||||||
#include "rocksdb/cache.h"
|
#include "rocksdb/cache.h"
|
||||||
#include "rocksdb/filter_policy.h"
|
#include "rocksdb/filter_policy.h"
|
||||||
#include "rocksdb/utilities/convenience.h"
|
#include "rocksdb/utilities/convenience.h"
|
||||||
|
#include "rocksdb/utilities/table_properties_collectors.h"
|
||||||
#include "rocksdb/merge_operator.h"
|
#include "rocksdb/merge_operator.h"
|
||||||
|
|
||||||
#include "common/perf_counters.h"
|
#include "common/perf_counters.h"
|
||||||
@ -938,6 +939,14 @@ int RocksDBStore::update_column_family_options(const std::string& base_name,
|
|||||||
return r;
|
return r;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Set Compact on Deletion Factory
|
||||||
|
if (cct->_conf->rocksdb_cf_compact_on_deletion) {
|
||||||
|
size_t sliding_window = cct->_conf->rocksdb_cf_compact_on_deletion_sliding_window;
|
||||||
|
size_t trigger = cct->_conf->rocksdb_cf_compact_on_deletion_trigger;
|
||||||
|
cf_opt->table_properties_collector_factories.emplace_back(
|
||||||
|
rocksdb::NewCompactOnDeletionCollectorFactory(sliding_window, trigger));
|
||||||
|
}
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -299,6 +299,15 @@ int list_mirror_images(librados::IoCtx& io_ctx,
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
template <typename I>
|
||||||
|
const char *pool_or_namespace(I *ictx) {
|
||||||
|
if (!ictx->md_ctx.get_namespace().empty()) {
|
||||||
|
return "namespace";
|
||||||
|
} else {
|
||||||
|
return "pool";
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
struct C_ImageGetInfo : public Context {
|
struct C_ImageGetInfo : public Context {
|
||||||
mirror_image_info_t *mirror_image_info;
|
mirror_image_info_t *mirror_image_info;
|
||||||
mirror_image_mode_t *mirror_image_mode;
|
mirror_image_mode_t *mirror_image_mode;
|
||||||
@ -434,9 +443,15 @@ int Mirror<I>::image_enable(I *ictx, mirror_image_mode_t mode,
|
|||||||
return r;
|
return r;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (mirror_mode == cls::rbd::MIRROR_MODE_DISABLED) {
|
||||||
|
lderr(cct) << "cannot enable mirroring: mirroring is not enabled on a "
|
||||||
|
<< pool_or_namespace(ictx) << dendl;
|
||||||
|
return -EINVAL;
|
||||||
|
}
|
||||||
|
|
||||||
if (mirror_mode != cls::rbd::MIRROR_MODE_IMAGE) {
|
if (mirror_mode != cls::rbd::MIRROR_MODE_IMAGE) {
|
||||||
lderr(cct) << "cannot enable mirroring in the current pool mirroring mode"
|
lderr(cct) << "cannot enable mirroring: " << pool_or_namespace(ictx)
|
||||||
<< dendl;
|
<< " is not in image mirror mode" << dendl;
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -514,13 +514,13 @@ public:
|
|||||||
}
|
}
|
||||||
f->close_section();
|
f->close_section();
|
||||||
}
|
}
|
||||||
|
f->close_section(); // always_on_modules
|
||||||
f->dump_int("last_failure_osd_epoch", last_failure_osd_epoch);
|
f->dump_int("last_failure_osd_epoch", last_failure_osd_epoch);
|
||||||
f->open_array_section("active_clients");
|
f->open_array_section("active_clients");
|
||||||
for (const auto &c : clients) {
|
for (const auto &c : clients) {
|
||||||
f->dump_object("client", c);
|
f->dump_object("client", c);
|
||||||
}
|
}
|
||||||
f->close_section();
|
f->close_section(); // active_clients
|
||||||
f->close_section();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void generate_test_instances(std::list<MgrMap*> &l) {
|
static void generate_test_instances(std::list<MgrMap*> &l) {
|
||||||
|
@ -5,6 +5,8 @@ set -x
|
|||||||
export PATH=/root/bin:$PATH
|
export PATH=/root/bin:$PATH
|
||||||
mkdir /root/bin
|
mkdir /root/bin
|
||||||
|
|
||||||
|
export CEPHADM_IMAGE='quay.ceph.io/ceph-ci/ceph:pacific'
|
||||||
|
|
||||||
cp /mnt/{{ ceph_dev_folder }}/src/cephadm/cephadm /root/bin/cephadm
|
cp /mnt/{{ ceph_dev_folder }}/src/cephadm/cephadm /root/bin/cephadm
|
||||||
chmod +x /root/bin/cephadm
|
chmod +x /root/bin/cephadm
|
||||||
mkdir -p /etc/ceph
|
mkdir -p /etc/ceph
|
||||||
|
@ -328,8 +328,9 @@ class CephFS(RESTController):
|
|||||||
raise cherrypy.HTTPError(404,
|
raise cherrypy.HTTPError(404,
|
||||||
"Client {0} does not exist in cephfs {1}".format(client_id,
|
"Client {0} does not exist in cephfs {1}".format(client_id,
|
||||||
fs_id))
|
fs_id))
|
||||||
|
filters = [f'id={client_id}']
|
||||||
CephService.send_command('mds', 'client evict',
|
CephService.send_command('mds', 'client evict',
|
||||||
srv_spec='{0}:0'.format(fs_id), id=client_id)
|
srv_spec='{0}:0'.format(fs_id), filters=filters)
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def _cephfs_instance(fs_id):
|
def _cephfs_instance(fs_id):
|
||||||
|
@ -9,7 +9,8 @@ afterEach(() => {
|
|||||||
Cypress.on('uncaught:exception', (err: Error) => {
|
Cypress.on('uncaught:exception', (err: Error) => {
|
||||||
if (
|
if (
|
||||||
err.message.includes('ResizeObserver loop limit exceeded') ||
|
err.message.includes('ResizeObserver loop limit exceeded') ||
|
||||||
err.message.includes('api/prometheus/rules')
|
err.message.includes('api/prometheus/rules') ||
|
||||||
|
err.message.includes('NG0100: ExpressionChangedAfterItHasBeenCheckedError')
|
||||||
) {
|
) {
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
1
ceph/src/pybind/mgr/dashboard/frontend/dist/en-US/281.57d0494f276bf42af928.js
vendored
Normal file
1
ceph/src/pybind/mgr/dashboard/frontend/dist/en-US/281.57d0494f276bf42af928.js
vendored
Normal file
File diff suppressed because one or more lines are too long
1
ceph/src/pybind/mgr/dashboard/frontend/dist/en-US/483.e54c767c9033c13a1c71.js
vendored
Normal file
1
ceph/src/pybind/mgr/dashboard/frontend/dist/en-US/483.e54c767c9033c13a1c71.js
vendored
Normal file
File diff suppressed because one or more lines are too long
@ -18,6 +18,6 @@
|
|||||||
</noscript>
|
</noscript>
|
||||||
|
|
||||||
<cd-root></cd-root>
|
<cd-root></cd-root>
|
||||||
<script src="runtime.89a9f685232e870f1afe.js" defer></script><script src="polyfills.2068f3f22a496426465b.js" defer></script><script src="scripts.6bda3fa7e09a87cd4228.js" defer></script><script src="main.f539fe276e7cf959ad9d.js" defer></script>
|
<script src="runtime.89a9f685232e870f1afe.js" defer></script><script src="polyfills.2068f3f22a496426465b.js" defer></script><script src="scripts.6bda3fa7e09a87cd4228.js" defer></script><script src="main.863ed935b3f00f328481.js" defer></script>
|
||||||
|
|
||||||
</body></html>
|
</body></html>
|
3
ceph/src/pybind/mgr/dashboard/frontend/dist/en-US/main.863ed935b3f00f328481.js
vendored
Normal file
3
ceph/src/pybind/mgr/dashboard/frontend/dist/en-US/main.863ed935b3f00f328481.js
vendored
Normal file
File diff suppressed because one or more lines are too long
1
ceph/src/pybind/mgr/dashboard/frontend/dist/en-US/runtime.89a9f685232e870f1afe.js
vendored
Normal file
1
ceph/src/pybind/mgr/dashboard/frontend/dist/en-US/runtime.89a9f685232e870f1afe.js
vendored
Normal file
@ -0,0 +1 @@
|
|||||||
|
(()=>{"use strict";var e,h={},v={};function r(e){var n=v[e];if(void 0!==n)return n.exports;var t=v[e]={id:e,loaded:!1,exports:{}};return h[e].call(t.exports,t,t.exports,r),t.loaded=!0,t.exports}r.m=h,e=[],r.O=(n,t,f,i)=>{if(!t){var a=1/0;for(o=0;o<e.length;o++){for(var[t,f,i]=e[o],d=!0,s=0;s<t.length;s++)(!1&i||a>=i)&&Object.keys(r.O).every(b=>r.O[b](t[s]))?t.splice(s--,1):(d=!1,i<a&&(a=i));if(d){e.splice(o--,1);var l=f();void 0!==l&&(n=l)}}return n}i=i||0;for(var o=e.length;o>0&&e[o-1][2]>i;o--)e[o]=e[o-1];e[o]=[t,f,i]},r.n=e=>{var n=e&&e.__esModule?()=>e.default:()=>e;return r.d(n,{a:n}),n},(()=>{var n,e=Object.getPrototypeOf?t=>Object.getPrototypeOf(t):t=>t.__proto__;r.t=function(t,f){if(1&f&&(t=this(t)),8&f||"object"==typeof t&&t&&(4&f&&t.__esModule||16&f&&"function"==typeof t.then))return t;var i=Object.create(null);r.r(i);var o={};n=n||[null,e({}),e([]),e(e)];for(var a=2&f&&t;"object"==typeof a&&!~n.indexOf(a);a=e(a))Object.getOwnPropertyNames(a).forEach(d=>o[d]=()=>t[d]);return o.default=()=>t,r.d(i,o),i}})(),r.d=(e,n)=>{for(var t in n)r.o(n,t)&&!r.o(e,t)&&Object.defineProperty(e,t,{enumerable:!0,get:n[t]})},r.f={},r.e=e=>Promise.all(Object.keys(r.f).reduce((n,t)=>(r.f[t](e,n),n),[])),r.u=e=>e+"."+{117:"9781bbf8cc6a4aaa7e8e",281:"57d0494f276bf42af928",483:"e54c767c9033c13a1c71"}[e]+".js",r.miniCssF=e=>"styles.c0c3da54c9c7b1207ad8.css",r.o=(e,n)=>Object.prototype.hasOwnProperty.call(e,n),(()=>{var e={},n="ceph-dashboard:";r.l=(t,f,i,o)=>{if(e[t])e[t].push(f);else{var a,d;if(void 0!==i)for(var s=document.getElementsByTagName("script"),l=0;l<s.length;l++){var c=s[l];if(c.getAttribute("src")==t||c.getAttribute("data-webpack")==n+i){a=c;break}}a||(d=!0,(a=document.createElement("script")).charset="utf-8",a.timeout=120,r.nc&&a.setAttribute("nonce",r.nc),a.setAttribute("data-webpack",n+i),a.src=r.tu(t)),e[t]=[f];var u=(_,b)=>{a.onerror=a.onload=null,clearTimeout(p);var g=e[t];if(delete e[t],a.parentNode&&a.parentNode.removeChild(a),g&&g.forEach(y=>y(b)),_)return _(b)},p=setTimeout(u.bind(null,void 0,{type:"timeout",target:a}),12e4);a.onerror=u.bind(null,a.onerror),a.onload=u.bind(null,a.onload),d&&document.head.appendChild(a)}}})(),r.r=e=>{"undefined"!=typeof Symbol&&Symbol.toStringTag&&Object.defineProperty(e,Symbol.toStringTag,{value:"Module"}),Object.defineProperty(e,"__esModule",{value:!0})},r.nmd=e=>(e.paths=[],e.children||(e.children=[]),e),(()=>{var e;r.tu=n=>(void 0===e&&(e={createScriptURL:t=>t},"undefined"!=typeof trustedTypes&&trustedTypes.createPolicy&&(e=trustedTypes.createPolicy("angular#bundler",e))),e.createScriptURL(n))})(),r.p="",(()=>{var e={666:0};r.f.j=(f,i)=>{var o=r.o(e,f)?e[f]:void 0;if(0!==o)if(o)i.push(o[2]);else if(666!=f){var a=new Promise((c,u)=>o=e[f]=[c,u]);i.push(o[2]=a);var d=r.p+r.u(f),s=new Error;r.l(d,c=>{if(r.o(e,f)&&(0!==(o=e[f])&&(e[f]=void 0),o)){var u=c&&("load"===c.type?"missing":c.type),p=c&&c.target&&c.target.src;s.message="Loading chunk "+f+" failed.\n("+u+": "+p+")",s.name="ChunkLoadError",s.type=u,s.request=p,o[1](s)}},"chunk-"+f,f)}else e[f]=0},r.O.j=f=>0===e[f];var n=(f,i)=>{var s,l,[o,a,d]=i,c=0;for(s in a)r.o(a,s)&&(r.m[s]=a[s]);if(d)var u=d(r);for(f&&f(i);c<o.length;c++)r.o(e,l=o[c])&&e[l]&&e[l][0](),e[o[c]]=0;return r.O(u)},t=self.webpackChunkceph_dashboard=self.webpackChunkceph_dashboard||[];t.forEach(n.bind(null,0)),t.push=n.bind(null,t.push.bind(t))})()})();
|
20
ceph/src/pybind/mgr/dashboard/frontend/dist/en-US/styles.c0c3da54c9c7b1207ad8.css
vendored
Normal file
20
ceph/src/pybind/mgr/dashboard/frontend/dist/en-US/styles.c0c3da54c9c7b1207ad8.css
vendored
Normal file
File diff suppressed because one or more lines are too long
@ -85,6 +85,12 @@
|
|||||||
aria-label="Close"
|
aria-label="Close"
|
||||||
(backAction)="onPreviousStep()"
|
(backAction)="onPreviousStep()"
|
||||||
[name]="showCancelButtonLabel()"></cd-back-button>
|
[name]="showCancelButtonLabel()"></cd-back-button>
|
||||||
|
<button class="btn btn-light m-2 me-4 float-end"
|
||||||
|
id="skipStepBtn"
|
||||||
|
(click)="onSkip()"
|
||||||
|
aria-label="Skip this step"
|
||||||
|
*ngIf="stepTitles[currentStep.stepIndex - 1] === 'Create OSDs'"
|
||||||
|
i18n>Skip</button>
|
||||||
</div>
|
</div>
|
||||||
</div>
|
</div>
|
||||||
|
|
||||||
|
@ -151,4 +151,28 @@ describe('CreateClusterComponent', () => {
|
|||||||
component.onSubmit();
|
component.onSubmit();
|
||||||
expect(hostServiceSpy).toHaveBeenCalledTimes(1);
|
expect(hostServiceSpy).toHaveBeenCalledTimes(1);
|
||||||
});
|
});
|
||||||
|
|
||||||
|
it('should show skip button in the Create OSDs Steps', () => {
|
||||||
|
component.createCluster();
|
||||||
|
fixture.detectChanges();
|
||||||
|
|
||||||
|
component.onNextStep();
|
||||||
|
fixture.detectChanges();
|
||||||
|
const skipBtn = fixture.debugElement.query(By.css('#skipStepBtn')).nativeElement;
|
||||||
|
expect(skipBtn).not.toBe(null);
|
||||||
|
expect(skipBtn.innerHTML).toBe('Skip');
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should skip the Create OSDs Steps', () => {
|
||||||
|
component.createCluster();
|
||||||
|
fixture.detectChanges();
|
||||||
|
|
||||||
|
component.onNextStep();
|
||||||
|
fixture.detectChanges();
|
||||||
|
const skipBtn = fixture.debugElement.query(By.css('#skipStepBtn')).nativeElement;
|
||||||
|
skipBtn.click();
|
||||||
|
fixture.detectChanges();
|
||||||
|
|
||||||
|
expect(component.stepsToSkip['Create OSDs']).toBe(true);
|
||||||
|
});
|
||||||
});
|
});
|
||||||
|
@ -52,6 +52,7 @@ export class CreateClusterComponent implements OnInit, OnDestroy {
|
|||||||
deploymentOption: DeploymentOptions;
|
deploymentOption: DeploymentOptions;
|
||||||
selectedOption = {};
|
selectedOption = {};
|
||||||
simpleDeployment = true;
|
simpleDeployment = true;
|
||||||
|
stepsToSkip: { [steps: string]: boolean } = {};
|
||||||
|
|
||||||
@Output()
|
@Output()
|
||||||
submitAction = new EventEmitter();
|
submitAction = new EventEmitter();
|
||||||
@ -80,7 +81,11 @@ export class CreateClusterComponent implements OnInit, OnDestroy {
|
|||||||
ngOnInit(): void {
|
ngOnInit(): void {
|
||||||
this.osdService.getDeploymentOptions().subscribe((options) => {
|
this.osdService.getDeploymentOptions().subscribe((options) => {
|
||||||
this.deploymentOption = options;
|
this.deploymentOption = options;
|
||||||
this.selectedOption = { option: options.recommended_option };
|
this.selectedOption = { option: options.recommended_option, encrypted: false };
|
||||||
|
});
|
||||||
|
|
||||||
|
this.stepTitles.forEach((stepTitle) => {
|
||||||
|
this.stepsToSkip[stepTitle] = false;
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -113,71 +118,75 @@ export class CreateClusterComponent implements OnInit, OnDestroy {
|
|||||||
}
|
}
|
||||||
|
|
||||||
onSubmit() {
|
onSubmit() {
|
||||||
this.hostService.list('false').subscribe((hosts) => {
|
if (!this.stepsToSkip['Add Hosts']) {
|
||||||
hosts.forEach((host) => {
|
this.hostService.list('false').subscribe((hosts) => {
|
||||||
const index = host['labels'].indexOf('_no_schedule', 0);
|
hosts.forEach((host) => {
|
||||||
if (index > -1) {
|
const index = host['labels'].indexOf('_no_schedule', 0);
|
||||||
host['labels'].splice(index, 1);
|
if (index > -1) {
|
||||||
this.observables.push(this.hostService.update(host['hostname'], true, host['labels']));
|
host['labels'].splice(index, 1);
|
||||||
}
|
this.observables.push(this.hostService.update(host['hostname'], true, host['labels']));
|
||||||
});
|
|
||||||
forkJoin(this.observables)
|
|
||||||
.pipe(
|
|
||||||
finalize(() =>
|
|
||||||
this.clusterService.updateStatus('POST_INSTALLED').subscribe(() => {
|
|
||||||
this.notificationService.show(
|
|
||||||
NotificationType.success,
|
|
||||||
$localize`Cluster expansion was successful`
|
|
||||||
);
|
|
||||||
this.router.navigate(['/dashboard']);
|
|
||||||
})
|
|
||||||
)
|
|
||||||
)
|
|
||||||
.subscribe({
|
|
||||||
error: (error) => error.preventDefault()
|
|
||||||
});
|
|
||||||
});
|
|
||||||
|
|
||||||
if (this.driveGroup) {
|
|
||||||
const user = this.authStorageService.getUsername();
|
|
||||||
this.driveGroup.setName(`dashboard-${user}-${_.now()}`);
|
|
||||||
this.driveGroups.push(this.driveGroup.spec);
|
|
||||||
}
|
|
||||||
|
|
||||||
if (this.simpleDeployment) {
|
|
||||||
const title = this.deploymentOption?.options[this.selectedOption['option']].title;
|
|
||||||
const trackingId = $localize`${title} deployment`;
|
|
||||||
this.taskWrapper
|
|
||||||
.wrapTaskAroundCall({
|
|
||||||
task: new FinishedTask('osd/' + URLVerbs.CREATE, {
|
|
||||||
tracking_id: trackingId
|
|
||||||
}),
|
|
||||||
call: this.osdService.create([this.selectedOption], trackingId, 'predefined')
|
|
||||||
})
|
|
||||||
.subscribe({
|
|
||||||
error: (error) => error.preventDefault(),
|
|
||||||
complete: () => {
|
|
||||||
this.submitAction.emit();
|
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
} else {
|
forkJoin(this.observables)
|
||||||
if (this.osdService.osdDevices['totalDevices'] > 0) {
|
.pipe(
|
||||||
this.driveGroup.setFeature('encrypted', this.selectedOption['encrypted']);
|
finalize(() =>
|
||||||
const trackingId = _.join(_.map(this.driveGroups, 'service_id'), ', ');
|
this.clusterService.updateStatus('POST_INSTALLED').subscribe(() => {
|
||||||
|
this.notificationService.show(
|
||||||
|
NotificationType.success,
|
||||||
|
$localize`Cluster expansion was successful`
|
||||||
|
);
|
||||||
|
this.router.navigate(['/dashboard']);
|
||||||
|
})
|
||||||
|
)
|
||||||
|
)
|
||||||
|
.subscribe({
|
||||||
|
error: (error) => error.preventDefault()
|
||||||
|
});
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!this.stepsToSkip['Create OSDs']) {
|
||||||
|
if (this.driveGroup) {
|
||||||
|
const user = this.authStorageService.getUsername();
|
||||||
|
this.driveGroup.setName(`dashboard-${user}-${_.now()}`);
|
||||||
|
this.driveGroups.push(this.driveGroup.spec);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (this.simpleDeployment) {
|
||||||
|
const title = this.deploymentOption?.options[this.selectedOption['option']].title;
|
||||||
|
const trackingId = $localize`${title} deployment`;
|
||||||
this.taskWrapper
|
this.taskWrapper
|
||||||
.wrapTaskAroundCall({
|
.wrapTaskAroundCall({
|
||||||
task: new FinishedTask('osd/' + URLVerbs.CREATE, {
|
task: new FinishedTask('osd/' + URLVerbs.CREATE, {
|
||||||
tracking_id: trackingId
|
tracking_id: trackingId
|
||||||
}),
|
}),
|
||||||
call: this.osdService.create(this.driveGroups, trackingId)
|
call: this.osdService.create([this.selectedOption], trackingId, 'predefined')
|
||||||
})
|
})
|
||||||
.subscribe({
|
.subscribe({
|
||||||
error: (error) => error.preventDefault(),
|
error: (error) => error.preventDefault(),
|
||||||
complete: () => {
|
complete: () => {
|
||||||
this.submitAction.emit();
|
this.submitAction.emit();
|
||||||
this.osdService.osdDevices = [];
|
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
|
} else {
|
||||||
|
if (this.osdService.osdDevices['totalDevices'] > 0) {
|
||||||
|
this.driveGroup.setFeature('encrypted', this.selectedOption['encrypted']);
|
||||||
|
const trackingId = _.join(_.map(this.driveGroups, 'service_id'), ', ');
|
||||||
|
this.taskWrapper
|
||||||
|
.wrapTaskAroundCall({
|
||||||
|
task: new FinishedTask('osd/' + URLVerbs.CREATE, {
|
||||||
|
tracking_id: trackingId
|
||||||
|
}),
|
||||||
|
call: this.osdService.create(this.driveGroups, trackingId)
|
||||||
|
})
|
||||||
|
.subscribe({
|
||||||
|
error: (error) => error.preventDefault(),
|
||||||
|
complete: () => {
|
||||||
|
this.submitAction.emit();
|
||||||
|
this.osdService.osdDevices = [];
|
||||||
|
}
|
||||||
|
});
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -213,6 +222,12 @@ export class CreateClusterComponent implements OnInit, OnDestroy {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
onSkip() {
|
||||||
|
const stepTitle = this.stepTitles[this.currentStep.stepIndex - 1];
|
||||||
|
this.stepsToSkip[stepTitle] = true;
|
||||||
|
this.onNextStep();
|
||||||
|
}
|
||||||
|
|
||||||
showSubmitButtonLabel() {
|
showSubmitButtonLabel() {
|
||||||
return !this.wizardStepsService.isLastStep()
|
return !this.wizardStepsService.isLastStep()
|
||||||
? this.actionLabels.NEXT
|
? this.actionLabels.NEXT
|
||||||
|
@ -0,0 +1,11 @@
|
|||||||
|
<span *ngIf="!key; else key_value"
|
||||||
|
class="badge badge-{{value}}"
|
||||||
|
ngClass="{{value | colorClassFromText}}">
|
||||||
|
{{ value }}
|
||||||
|
</span>
|
||||||
|
|
||||||
|
<ng-template #key_value>
|
||||||
|
<span class="badge badge-background-primary badge-{{key}}-{{value}}">
|
||||||
|
{{ key }}: {{ value }}
|
||||||
|
</span>
|
||||||
|
</ng-template>
|
@ -0,0 +1,25 @@
|
|||||||
|
import { ComponentFixture, TestBed } from '@angular/core/testing';
|
||||||
|
|
||||||
|
import { CdLabelComponent } from './cd-label.component';
|
||||||
|
import { ColorClassFromTextPipe } from './color-class-from-text.pipe';
|
||||||
|
|
||||||
|
describe('CdLabelComponent', () => {
|
||||||
|
let component: CdLabelComponent;
|
||||||
|
let fixture: ComponentFixture<CdLabelComponent>;
|
||||||
|
|
||||||
|
beforeEach(async () => {
|
||||||
|
await TestBed.configureTestingModule({
|
||||||
|
declarations: [CdLabelComponent, ColorClassFromTextPipe]
|
||||||
|
}).compileComponents();
|
||||||
|
});
|
||||||
|
|
||||||
|
beforeEach(() => {
|
||||||
|
fixture = TestBed.createComponent(CdLabelComponent);
|
||||||
|
component = fixture.componentInstance;
|
||||||
|
fixture.detectChanges();
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should create', () => {
|
||||||
|
expect(component).toBeTruthy();
|
||||||
|
});
|
||||||
|
});
|
@ -0,0 +1,11 @@
|
|||||||
|
import { Component, Input } from '@angular/core';
|
||||||
|
|
||||||
|
@Component({
|
||||||
|
selector: 'cd-label',
|
||||||
|
templateUrl: './cd-label.component.html',
|
||||||
|
styleUrls: ['./cd-label.component.scss']
|
||||||
|
})
|
||||||
|
export class CdLabelComponent {
|
||||||
|
@Input() key?: string;
|
||||||
|
@Input() value?: string;
|
||||||
|
}
|
@ -0,0 +1,28 @@
|
|||||||
|
import { Pipe, PipeTransform } from '@angular/core';
|
||||||
|
|
||||||
|
@Pipe({
|
||||||
|
name: 'colorClassFromText'
|
||||||
|
})
|
||||||
|
export class ColorClassFromTextPipe implements PipeTransform {
|
||||||
|
readonly cssClasses: string[] = [
|
||||||
|
'badge-cd-label-green',
|
||||||
|
'badge-cd-label-cyan',
|
||||||
|
'badge-cd-label-purple',
|
||||||
|
'badge-cd-label-light-blue',
|
||||||
|
'badge-cd-label-gold',
|
||||||
|
'badge-cd-label-light-green'
|
||||||
|
];
|
||||||
|
|
||||||
|
transform(text: string): string {
|
||||||
|
let hash = 0;
|
||||||
|
let charCode = 0;
|
||||||
|
if (text) {
|
||||||
|
for (let i = 0; i < text.length; i++) {
|
||||||
|
charCode = text.charCodeAt(i);
|
||||||
|
// tslint:disable-next-line:no-bitwise
|
||||||
|
hash = Math.abs((hash << 5) - hash + charCode);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return this.cssClasses[hash % this.cssClasses.length];
|
||||||
|
}
|
||||||
|
}
|
@ -7114,6 +7114,8 @@ next:
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
bool resharding_underway = true;
|
||||||
|
|
||||||
if (bucket_initable) {
|
if (bucket_initable) {
|
||||||
// we did not encounter an error, so let's work with the bucket
|
// we did not encounter an error, so let's work with the bucket
|
||||||
RGWBucketReshard br(store, bucket_info, attrs,
|
RGWBucketReshard br(store, bucket_info, attrs,
|
||||||
@ -7122,14 +7124,17 @@ next:
|
|||||||
if (ret < 0) {
|
if (ret < 0) {
|
||||||
if (ret == -EBUSY) {
|
if (ret == -EBUSY) {
|
||||||
cerr << "There is ongoing resharding, please retry after " <<
|
cerr << "There is ongoing resharding, please retry after " <<
|
||||||
store->ctx()->_conf.get_val<uint64_t>(
|
store->ctx()->_conf.get_val<uint64_t>("rgw_reshard_bucket_lock_duration") <<
|
||||||
"rgw_reshard_bucket_lock_duration") <<
|
" seconds." << std::endl;
|
||||||
" seconds " << std::endl;
|
return -ret;
|
||||||
|
} else if (ret == -EINVAL) {
|
||||||
|
resharding_underway = false;
|
||||||
|
// we can continue and try to unschedule
|
||||||
} else {
|
} else {
|
||||||
cerr << "Error canceling bucket " << bucket_name <<
|
cerr << "Error cancelling bucket \"" << bucket_name <<
|
||||||
" resharding: " << cpp_strerror(-ret) << std::endl;
|
"\" resharding: " << cpp_strerror(-ret) << std::endl;
|
||||||
|
return -ret;
|
||||||
}
|
}
|
||||||
return ret;
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -7138,13 +7143,22 @@ next:
|
|||||||
cls_rgw_reshard_entry entry;
|
cls_rgw_reshard_entry entry;
|
||||||
entry.tenant = tenant;
|
entry.tenant = tenant;
|
||||||
entry.bucket_name = bucket_name;
|
entry.bucket_name = bucket_name;
|
||||||
//entry.bucket_id = bucket_id;
|
|
||||||
|
|
||||||
ret = reshard.remove(dpp(), entry);
|
ret = reshard.remove(dpp(), entry);
|
||||||
if (ret < 0 && ret != -ENOENT) {
|
if (ret == -ENOENT) {
|
||||||
cerr << "Error in updating reshard log with bucket " <<
|
if (!resharding_underway) {
|
||||||
bucket_name << ": " << cpp_strerror(-ret) << std::endl;
|
cerr << "Error, bucket \"" << bucket_name <<
|
||||||
return ret;
|
"\" is neither undergoing resharding nor scheduled to undergo "
|
||||||
|
"resharding." << std::endl;
|
||||||
|
return EINVAL;
|
||||||
|
} else {
|
||||||
|
// we cancelled underway resharding above, so we're good
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
} else if (ret < 0) {
|
||||||
|
cerr << "Error in updating reshard log with bucket \"" <<
|
||||||
|
bucket_name << "\": " << cpp_strerror(-ret) << std::endl;
|
||||||
|
return -ret;
|
||||||
}
|
}
|
||||||
} // OPT_RESHARD_CANCEL
|
} // OPT_RESHARD_CANCEL
|
||||||
|
|
||||||
|
@ -130,8 +130,8 @@ std::string rgw_make_bucket_entry_name(const std::string& tenant_name,
|
|||||||
* Tenants are separated from buckets in URLs by a colon in S3.
|
* Tenants are separated from buckets in URLs by a colon in S3.
|
||||||
* This function is not to be used on Swift URLs, not even for COPY arguments.
|
* This function is not to be used on Swift URLs, not even for COPY arguments.
|
||||||
*/
|
*/
|
||||||
void rgw_parse_url_bucket(const string &bucket, const string& auth_tenant,
|
int rgw_parse_url_bucket(const string &bucket, const string& auth_tenant,
|
||||||
string &tenant_name, string &bucket_name) {
|
string &tenant_name, string &bucket_name) {
|
||||||
|
|
||||||
int pos = bucket.find(':');
|
int pos = bucket.find(':');
|
||||||
if (pos >= 0) {
|
if (pos >= 0) {
|
||||||
@ -142,10 +142,14 @@ void rgw_parse_url_bucket(const string &bucket, const string& auth_tenant,
|
|||||||
*/
|
*/
|
||||||
tenant_name = bucket.substr(0, pos);
|
tenant_name = bucket.substr(0, pos);
|
||||||
bucket_name = bucket.substr(pos + 1);
|
bucket_name = bucket.substr(pos + 1);
|
||||||
|
if (bucket_name.empty()) {
|
||||||
|
return -ERR_INVALID_BUCKET_NAME;
|
||||||
|
}
|
||||||
} else {
|
} else {
|
||||||
tenant_name = auth_tenant;
|
tenant_name = auth_tenant;
|
||||||
bucket_name = bucket;
|
bucket_name = bucket;
|
||||||
}
|
}
|
||||||
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -49,9 +49,10 @@ extern int rgw_bucket_parse_bucket_key(CephContext *cct, const string& key,
|
|||||||
extern std::string rgw_make_bucket_entry_name(const std::string& tenant_name,
|
extern std::string rgw_make_bucket_entry_name(const std::string& tenant_name,
|
||||||
const std::string& bucket_name);
|
const std::string& bucket_name);
|
||||||
|
|
||||||
extern void rgw_parse_url_bucket(const string& bucket,
|
[[nodiscard]] int rgw_parse_url_bucket(const std::string& bucket,
|
||||||
const string& auth_tenant,
|
const std::string& auth_tenant,
|
||||||
string &tenant_name, string &bucket_name);
|
std::string &tenant_name,
|
||||||
|
std::string &bucket_name);
|
||||||
|
|
||||||
// this is used as a filter to RGWRados::cls_bucket_list_ordered; it
|
// this is used as a filter to RGWRados::cls_bucket_list_ordered; it
|
||||||
// conforms to the type declaration of RGWRados::check_filter_t.
|
// conforms to the type declaration of RGWRados::check_filter_t.
|
||||||
|
@ -360,8 +360,7 @@ void dump(struct req_state* s)
|
|||||||
s->formatter->open_object_section("Error");
|
s->formatter->open_object_section("Error");
|
||||||
if (!s->err.err_code.empty())
|
if (!s->err.err_code.empty())
|
||||||
s->formatter->dump_string("Code", s->err.err_code);
|
s->formatter->dump_string("Code", s->err.err_code);
|
||||||
if (!s->err.message.empty())
|
s->formatter->dump_string("Message", s->err.message);
|
||||||
s->formatter->dump_string("Message", s->err.message);
|
|
||||||
if (!s->bucket_name.empty()) // TODO: connect to expose_bucket
|
if (!s->bucket_name.empty()) // TODO: connect to expose_bucket
|
||||||
s->formatter->dump_string("BucketName", s->bucket_name);
|
s->formatter->dump_string("BucketName", s->bucket_name);
|
||||||
if (!s->trans_id.empty()) // TODO: connect to expose_bucket or another toggle
|
if (!s->trans_id.empty()) // TODO: connect to expose_bucket or another toggle
|
||||||
|
@ -1037,7 +1037,9 @@ bool RGWCoroutine::drain_children(int num_cr_left,
|
|||||||
|
|
||||||
void RGWCoroutine::wakeup()
|
void RGWCoroutine::wakeup()
|
||||||
{
|
{
|
||||||
stack->wakeup();
|
if (stack) {
|
||||||
|
stack->wakeup();
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
RGWCoroutinesEnv *RGWCoroutine::get_env() const
|
RGWCoroutinesEnv *RGWCoroutine::get_env() const
|
||||||
|
@ -5854,23 +5854,23 @@ void RGWPutLC::execute(optional_yield y)
|
|||||||
RGWXMLParser parser;
|
RGWXMLParser parser;
|
||||||
RGWLifecycleConfiguration_S3 new_config(s->cct);
|
RGWLifecycleConfiguration_S3 new_config(s->cct);
|
||||||
|
|
||||||
content_md5 = s->info.env->get("HTTP_CONTENT_MD5");
|
// amazon says that Content-MD5 is required for this op specifically, but MD5
|
||||||
if (content_md5 == nullptr) {
|
// is not a security primitive and FIPS mode makes it difficult to use. if the
|
||||||
op_ret = -ERR_INVALID_REQUEST;
|
// client provides the header we'll try to verify its checksum, but the header
|
||||||
s->err.message = "Missing required header for this request: Content-MD5";
|
// itself is no longer required
|
||||||
ldpp_dout(this, 5) << s->err.message << dendl;
|
std::optional<std::string> content_md5_bin;
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
std::string content_md5_bin;
|
content_md5 = s->info.env->get("HTTP_CONTENT_MD5");
|
||||||
try {
|
if (content_md5 != nullptr) {
|
||||||
content_md5_bin = rgw::from_base64(std::string_view(content_md5));
|
try {
|
||||||
} catch (...) {
|
content_md5_bin = rgw::from_base64(std::string_view(content_md5));
|
||||||
s->err.message = "Request header Content-MD5 contains character "
|
} catch (...) {
|
||||||
"that is not base64 encoded.";
|
s->err.message = "Request header Content-MD5 contains character "
|
||||||
ldpp_dout(this, 5) << s->err.message << dendl;
|
"that is not base64 encoded.";
|
||||||
op_ret = -ERR_BAD_DIGEST;
|
ldpp_dout(this, 5) << s->err.message << dendl;
|
||||||
return;
|
op_ret = -ERR_BAD_DIGEST;
|
||||||
|
return;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!parser.init()) {
|
if (!parser.init()) {
|
||||||
@ -5885,21 +5885,23 @@ void RGWPutLC::execute(optional_yield y)
|
|||||||
char* buf = data.c_str();
|
char* buf = data.c_str();
|
||||||
ldpp_dout(this, 15) << "read len=" << data.length() << " data=" << (buf ? buf : "") << dendl;
|
ldpp_dout(this, 15) << "read len=" << data.length() << " data=" << (buf ? buf : "") << dendl;
|
||||||
|
|
||||||
MD5 data_hash;
|
if (content_md5_bin) {
|
||||||
// Allow use of MD5 digest in FIPS mode for non-cryptographic purposes
|
MD5 data_hash;
|
||||||
data_hash.SetFlags(EVP_MD_CTX_FLAG_NON_FIPS_ALLOW);
|
// Allow use of MD5 digest in FIPS mode for non-cryptographic purposes
|
||||||
unsigned char data_hash_res[CEPH_CRYPTO_MD5_DIGESTSIZE];
|
data_hash.SetFlags(EVP_MD_CTX_FLAG_NON_FIPS_ALLOW);
|
||||||
data_hash.Update(reinterpret_cast<const unsigned char*>(buf), data.length());
|
unsigned char data_hash_res[CEPH_CRYPTO_MD5_DIGESTSIZE];
|
||||||
data_hash.Final(data_hash_res);
|
data_hash.Update(reinterpret_cast<const unsigned char*>(buf), data.length());
|
||||||
|
data_hash.Final(data_hash_res);
|
||||||
|
|
||||||
if (memcmp(data_hash_res, content_md5_bin.c_str(), CEPH_CRYPTO_MD5_DIGESTSIZE) != 0) {
|
if (memcmp(data_hash_res, content_md5_bin->c_str(), CEPH_CRYPTO_MD5_DIGESTSIZE) != 0) {
|
||||||
op_ret = -ERR_BAD_DIGEST;
|
op_ret = -ERR_BAD_DIGEST;
|
||||||
s->err.message = "The Content-MD5 you specified did not match what we received.";
|
s->err.message = "The Content-MD5 you specified did not match what we received.";
|
||||||
ldpp_dout(this, 5) << s->err.message
|
ldpp_dout(this, 5) << s->err.message
|
||||||
<< " Specified content md5: " << content_md5
|
<< " Specified content md5: " << content_md5
|
||||||
<< ", calculated content md5: " << data_hash_res
|
<< ", calculated content md5: " << data_hash_res
|
||||||
<< dendl;
|
<< dendl;
|
||||||
return;
|
return;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!parser.parse(buf, data.length(), 1)) {
|
if (!parser.parse(buf, data.length(), 1)) {
|
||||||
@ -7034,13 +7036,167 @@ void RGWDeleteMultiObj::write_ops_log_entry(rgw_log_entry& entry) const {
|
|||||||
entry.delete_multi_obj_meta.objects = std::move(ops_log_entries);
|
entry.delete_multi_obj_meta.objects = std::move(ops_log_entries);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void RGWDeleteMultiObj::wait_flush(optional_yield y,
|
||||||
|
boost::asio::deadline_timer *formatter_flush_cond,
|
||||||
|
std::function<bool()> predicate)
|
||||||
|
{
|
||||||
|
if (y && formatter_flush_cond) {
|
||||||
|
auto yc = y.get_yield_context();
|
||||||
|
while (!predicate()) {
|
||||||
|
boost::system::error_code error;
|
||||||
|
formatter_flush_cond->async_wait(yc[error]);
|
||||||
|
rgw_flush_formatter(s, s->formatter);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void RGWDeleteMultiObj::handle_individual_object(const rgw_obj_key& o, optional_yield y,
|
||||||
|
boost::asio::deadline_timer *formatter_flush_cond)
|
||||||
|
{
|
||||||
|
std::string version_id;
|
||||||
|
RGWObjectCtx *obj_ctx = static_cast<RGWObjectCtx *>(s->obj_ctx);
|
||||||
|
std::unique_ptr<rgw::sal::RGWObject> obj = bucket->get_object(o);
|
||||||
|
if (s->iam_policy || ! s->iam_user_policies.empty() || !s->session_policies.empty()) {
|
||||||
|
auto identity_policy_res = eval_identity_or_session_policies(s->iam_user_policies, s->env,
|
||||||
|
boost::none,
|
||||||
|
o.instance.empty() ?
|
||||||
|
rgw::IAM::s3DeleteObject :
|
||||||
|
rgw::IAM::s3DeleteObjectVersion,
|
||||||
|
ARN(obj->get_obj()));
|
||||||
|
if (identity_policy_res == Effect::Deny) {
|
||||||
|
send_partial_response(o, false, "", -EACCES, formatter_flush_cond);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
rgw::IAM::Effect e = Effect::Pass;
|
||||||
|
rgw::IAM::PolicyPrincipal princ_type = rgw::IAM::PolicyPrincipal::Other;
|
||||||
|
if (s->iam_policy) {
|
||||||
|
e = s->iam_policy->eval(s->env,
|
||||||
|
*s->auth.identity,
|
||||||
|
o.instance.empty() ?
|
||||||
|
rgw::IAM::s3DeleteObject :
|
||||||
|
rgw::IAM::s3DeleteObjectVersion,
|
||||||
|
ARN(obj->get_obj()),
|
||||||
|
princ_type);
|
||||||
|
}
|
||||||
|
if (e == Effect::Deny) {
|
||||||
|
send_partial_response(o, false, "", -EACCES, formatter_flush_cond);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!s->session_policies.empty()) {
|
||||||
|
auto session_policy_res = eval_identity_or_session_policies(s->session_policies, s->env,
|
||||||
|
boost::none,
|
||||||
|
o.instance.empty() ?
|
||||||
|
rgw::IAM::s3DeleteObject :
|
||||||
|
rgw::IAM::s3DeleteObjectVersion,
|
||||||
|
ARN(obj->get_obj()));
|
||||||
|
if (session_policy_res == Effect::Deny) {
|
||||||
|
send_partial_response(o, false, "", -EACCES, formatter_flush_cond);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
if (princ_type == rgw::IAM::PolicyPrincipal::Role) {
|
||||||
|
//Intersection of session policy and identity policy plus intersection of session policy and bucket policy
|
||||||
|
if ((session_policy_res != Effect::Allow || identity_policy_res != Effect::Allow) &&
|
||||||
|
(session_policy_res != Effect::Allow || e != Effect::Allow)) {
|
||||||
|
send_partial_response(o, false, "", -EACCES, formatter_flush_cond);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
} else if (princ_type == rgw::IAM::PolicyPrincipal::Session) {
|
||||||
|
//Intersection of session policy and identity policy plus bucket policy
|
||||||
|
if ((session_policy_res != Effect::Allow || identity_policy_res != Effect::Allow) && e != Effect::Allow) {
|
||||||
|
send_partial_response(o, false, "", -EACCES, formatter_flush_cond);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
} else if (princ_type == rgw::IAM::PolicyPrincipal::Other) {// there was no match in the bucket policy
|
||||||
|
if (session_policy_res != Effect::Allow || identity_policy_res != Effect::Allow) {
|
||||||
|
send_partial_response(o, false, "", -EACCES, formatter_flush_cond);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
send_partial_response(o, false, "", -EACCES, formatter_flush_cond);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
if ((identity_policy_res == Effect::Pass && e == Effect::Pass && !acl_allowed)) {
|
||||||
|
send_partial_response(o, false, "", -EACCES, formatter_flush_cond);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
uint64_t obj_size = 0;
|
||||||
|
std::string etag;
|
||||||
|
|
||||||
|
if (!rgw::sal::RGWObject::empty(obj.get())) {
|
||||||
|
RGWObjState* astate = nullptr;
|
||||||
|
bool check_obj_lock = obj->have_instance() && bucket->get_info().obj_lock_enabled();
|
||||||
|
const auto ret = obj->get_obj_state(this, obj_ctx, *bucket, &astate, y, true);
|
||||||
|
|
||||||
|
if (ret < 0) {
|
||||||
|
if (ret == -ENOENT) {
|
||||||
|
// object maybe delete_marker, skip check_obj_lock
|
||||||
|
check_obj_lock = false;
|
||||||
|
} else {
|
||||||
|
// Something went wrong.
|
||||||
|
send_partial_response(o, false, "", ret, formatter_flush_cond);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
obj_size = astate->size;
|
||||||
|
etag = astate->attrset[RGW_ATTR_ETAG].to_str();
|
||||||
|
}
|
||||||
|
|
||||||
|
if (check_obj_lock) {
|
||||||
|
ceph_assert(astate);
|
||||||
|
int object_lock_response = verify_object_lock(this, astate->attrset, bypass_perm, bypass_governance_mode);
|
||||||
|
if (object_lock_response != 0) {
|
||||||
|
send_partial_response(o, false, "", object_lock_response, formatter_flush_cond);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// make reservation for notification if needed
|
||||||
|
const auto versioned_object = s->bucket->versioning_enabled();
|
||||||
|
rgw::notify::reservation_t res(this, store, s, obj.get());
|
||||||
|
const auto event_type = versioned_object && obj->get_instance().empty() ?
|
||||||
|
rgw::notify::ObjectRemovedDeleteMarkerCreated : rgw::notify::ObjectRemovedDelete;
|
||||||
|
op_ret = rgw::notify::publish_reserve(this, event_type, res, nullptr);
|
||||||
|
if (op_ret < 0) {
|
||||||
|
send_partial_response(o, false, "", op_ret, formatter_flush_cond);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
obj->set_atomic(obj_ctx);
|
||||||
|
|
||||||
|
op_ret = obj->delete_object(this, obj_ctx, s->owner, s->bucket_owner, ceph::real_time(),
|
||||||
|
false, 0, version_id, y);
|
||||||
|
if (op_ret == -ENOENT) {
|
||||||
|
op_ret = 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
send_partial_response(o, obj->get_delete_marker(), version_id, op_ret, formatter_flush_cond);
|
||||||
|
|
||||||
|
// send request to notification manager
|
||||||
|
const auto ret = rgw::notify::publish_commit(obj.get(), obj_size, ceph::real_clock::now(), etag, event_type, res, this);
|
||||||
|
if (ret < 0) {
|
||||||
|
ldpp_dout(this, 1) << "ERROR: publishing notification failed, with error: " << ret << dendl;
|
||||||
|
// too late to rollback operation, hence op_ret is not set here
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
void RGWDeleteMultiObj::execute(optional_yield y)
|
void RGWDeleteMultiObj::execute(optional_yield y)
|
||||||
{
|
{
|
||||||
RGWMultiDelDelete *multi_delete;
|
RGWMultiDelDelete *multi_delete;
|
||||||
vector<rgw_obj_key>::iterator iter;
|
vector<rgw_obj_key>::iterator iter;
|
||||||
RGWMultiDelXMLParser parser;
|
RGWMultiDelXMLParser parser;
|
||||||
RGWObjectCtx *obj_ctx = static_cast<RGWObjectCtx *>(s->obj_ctx);
|
uint32_t aio_count = 0;
|
||||||
|
const uint32_t max_aio = std::max<uint32_t>(1, s->cct->_conf->rgw_multi_obj_del_max_aio);
|
||||||
char* buf;
|
char* buf;
|
||||||
|
std::optional<boost::asio::deadline_timer> formatter_flush_cond;
|
||||||
|
if (y) {
|
||||||
|
formatter_flush_cond = std::make_optional<boost::asio::deadline_timer>(y.get_io_context());
|
||||||
|
}
|
||||||
|
|
||||||
buf = data.c_str();
|
buf = data.c_str();
|
||||||
if (!buf) {
|
if (!buf) {
|
||||||
@ -7101,136 +7257,25 @@ void RGWDeleteMultiObj::execute(optional_yield y)
|
|||||||
for (iter = multi_delete->objects.begin();
|
for (iter = multi_delete->objects.begin();
|
||||||
iter != multi_delete->objects.end();
|
iter != multi_delete->objects.end();
|
||||||
++iter) {
|
++iter) {
|
||||||
std::string version_id;
|
rgw_obj_key obj_key = *iter;
|
||||||
std::unique_ptr<rgw::sal::RGWObject> obj = bucket->get_object(*iter);
|
if (y) {
|
||||||
if (s->iam_policy || ! s->iam_user_policies.empty() || !s->session_policies.empty()) {
|
wait_flush(y, &*formatter_flush_cond, [&aio_count, max_aio] {
|
||||||
auto identity_policy_res = eval_identity_or_session_policies(s->iam_user_policies, s->env,
|
return aio_count < max_aio;
|
||||||
boost::none,
|
});
|
||||||
iter->instance.empty() ?
|
aio_count++;
|
||||||
rgw::IAM::s3DeleteObject :
|
spawn::spawn(y.get_yield_context(), [this, &y, &aio_count, obj_key, &formatter_flush_cond] (yield_context yield) {
|
||||||
rgw::IAM::s3DeleteObjectVersion,
|
handle_individual_object(obj_key, optional_yield { y.get_io_context(), yield }, &*formatter_flush_cond);
|
||||||
ARN(obj->get_obj()));
|
aio_count--;
|
||||||
if (identity_policy_res == Effect::Deny) {
|
});
|
||||||
send_partial_response(*iter, false, "", -EACCES);
|
} else {
|
||||||
continue;
|
handle_individual_object(obj_key, y, nullptr);
|
||||||
}
|
|
||||||
|
|
||||||
rgw::IAM::Effect e = Effect::Pass;
|
|
||||||
rgw::IAM::PolicyPrincipal princ_type = rgw::IAM::PolicyPrincipal::Other;
|
|
||||||
if (s->iam_policy) {
|
|
||||||
e = s->iam_policy->eval(s->env,
|
|
||||||
*s->auth.identity,
|
|
||||||
iter->instance.empty() ?
|
|
||||||
rgw::IAM::s3DeleteObject :
|
|
||||||
rgw::IAM::s3DeleteObjectVersion,
|
|
||||||
ARN(obj->get_obj()),
|
|
||||||
princ_type);
|
|
||||||
}
|
|
||||||
if (e == Effect::Deny) {
|
|
||||||
send_partial_response(*iter, false, "", -EACCES);
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (!s->session_policies.empty()) {
|
|
||||||
auto session_policy_res = eval_identity_or_session_policies(s->session_policies, s->env,
|
|
||||||
boost::none,
|
|
||||||
iter->instance.empty() ?
|
|
||||||
rgw::IAM::s3DeleteObject :
|
|
||||||
rgw::IAM::s3DeleteObjectVersion,
|
|
||||||
ARN(obj->get_obj()));
|
|
||||||
if (session_policy_res == Effect::Deny) {
|
|
||||||
send_partial_response(*iter, false, "", -EACCES);
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
if (princ_type == rgw::IAM::PolicyPrincipal::Role) {
|
|
||||||
//Intersection of session policy and identity policy plus intersection of session policy and bucket policy
|
|
||||||
if ((session_policy_res != Effect::Allow || identity_policy_res != Effect::Allow) &&
|
|
||||||
(session_policy_res != Effect::Allow || e != Effect::Allow)) {
|
|
||||||
send_partial_response(*iter, false, "", -EACCES);
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
} else if (princ_type == rgw::IAM::PolicyPrincipal::Session) {
|
|
||||||
//Intersection of session policy and identity policy plus bucket policy
|
|
||||||
if ((session_policy_res != Effect::Allow || identity_policy_res != Effect::Allow) && e != Effect::Allow) {
|
|
||||||
send_partial_response(*iter, false, "", -EACCES);
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
} else if (princ_type == rgw::IAM::PolicyPrincipal::Other) {// there was no match in the bucket policy
|
|
||||||
if (session_policy_res != Effect::Allow || identity_policy_res != Effect::Allow) {
|
|
||||||
send_partial_response(*iter, false, "", -EACCES);
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
send_partial_response(*iter, false, "", -EACCES);
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
|
|
||||||
if ((identity_policy_res == Effect::Pass && e == Effect::Pass && !acl_allowed)) {
|
|
||||||
send_partial_response(*iter, false, "", -EACCES);
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
uint64_t obj_size = 0;
|
|
||||||
std::string etag;
|
|
||||||
|
|
||||||
if (!rgw::sal::RGWObject::empty(obj.get())) {
|
|
||||||
RGWObjState* astate = nullptr;
|
|
||||||
bool check_obj_lock = obj->have_instance() && bucket->get_info().obj_lock_enabled();
|
|
||||||
const auto ret = obj->get_obj_state(this, obj_ctx, *bucket, &astate, s->yield, true);
|
|
||||||
|
|
||||||
if (ret < 0) {
|
|
||||||
if (ret == -ENOENT) {
|
|
||||||
// object maybe delete_marker, skip check_obj_lock
|
|
||||||
check_obj_lock = false;
|
|
||||||
} else {
|
|
||||||
// Something went wrong.
|
|
||||||
send_partial_response(*iter, false, "", ret);
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
obj_size = astate->size;
|
|
||||||
etag = astate->attrset[RGW_ATTR_ETAG].to_str();
|
|
||||||
}
|
|
||||||
|
|
||||||
if (check_obj_lock) {
|
|
||||||
ceph_assert(astate);
|
|
||||||
int object_lock_response = verify_object_lock(this, astate->attrset, bypass_perm, bypass_governance_mode);
|
|
||||||
if (object_lock_response != 0) {
|
|
||||||
send_partial_response(*iter, false, "", object_lock_response);
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// make reservation for notification if needed
|
|
||||||
const auto versioned_object = s->bucket->versioning_enabled();
|
|
||||||
rgw::notify::reservation_t res(this, store, s, obj.get());
|
|
||||||
const auto event_type = versioned_object && obj->get_instance().empty() ?
|
|
||||||
rgw::notify::ObjectRemovedDeleteMarkerCreated : rgw::notify::ObjectRemovedDelete;
|
|
||||||
op_ret = rgw::notify::publish_reserve(this, event_type, res, nullptr);
|
|
||||||
if (op_ret < 0) {
|
|
||||||
send_partial_response(*iter, false, "", op_ret);
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
|
|
||||||
obj->set_atomic(obj_ctx);
|
|
||||||
|
|
||||||
op_ret = obj->delete_object(this, obj_ctx, s->owner, s->bucket_owner, ceph::real_time(),
|
|
||||||
false, 0, version_id, s->yield);
|
|
||||||
if (op_ret == -ENOENT) {
|
|
||||||
op_ret = 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
send_partial_response(*iter, obj->get_delete_marker(), version_id, op_ret);
|
|
||||||
|
|
||||||
// send request to notification manager
|
|
||||||
const auto ret = rgw::notify::publish_commit(obj.get(), obj_size, ceph::real_clock::now(), etag, event_type, res, this);
|
|
||||||
if (ret < 0) {
|
|
||||||
ldpp_dout(this, 1) << "ERROR: publishing notification failed, with error: " << ret << dendl;
|
|
||||||
// too late to rollback operation, hence op_ret is not set here
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
if (formatter_flush_cond) {
|
||||||
|
wait_flush(y, &*formatter_flush_cond, [this, n=multi_delete->objects.size()] {
|
||||||
|
return n == ops_log_entries.size();
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
/* set the return code to zero, errors at this point will be
|
/* set the return code to zero, errors at this point will be
|
||||||
dumped to the response */
|
dumped to the response */
|
||||||
|
@ -26,6 +26,7 @@
|
|||||||
#include <boost/utility/in_place_factory.hpp>
|
#include <boost/utility/in_place_factory.hpp>
|
||||||
#include <boost/function.hpp>
|
#include <boost/function.hpp>
|
||||||
#include <boost/container/flat_map.hpp>
|
#include <boost/container/flat_map.hpp>
|
||||||
|
#include <boost/asio/deadline_timer.hpp>
|
||||||
|
|
||||||
#include "common/armor.h"
|
#include "common/armor.h"
|
||||||
#include "common/mime.h"
|
#include "common/mime.h"
|
||||||
@ -1907,6 +1908,29 @@ public:
|
|||||||
|
|
||||||
|
|
||||||
class RGWDeleteMultiObj : public RGWOp {
|
class RGWDeleteMultiObj : public RGWOp {
|
||||||
|
/**
|
||||||
|
* Handles the deletion of an individual object and uses
|
||||||
|
* set_partial_response to record the outcome.
|
||||||
|
*/
|
||||||
|
void handle_individual_object(const rgw_obj_key& o,
|
||||||
|
optional_yield y,
|
||||||
|
boost::asio::deadline_timer *formatter_flush_cond);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* When the request is being executed in a coroutine, performs
|
||||||
|
* the actual formatter flushing and is responsible for the
|
||||||
|
* termination condition (when when all partial object responses
|
||||||
|
* have been sent). Note that the formatter flushing must be handled
|
||||||
|
* on the coroutine that invokes the execute method vs. the
|
||||||
|
* coroutines that are spawned to handle individual objects because
|
||||||
|
* the flush logic uses a yield context that was captured
|
||||||
|
* and saved on the req_state vs. one that is passed on the stack.
|
||||||
|
* This is a no-op in the case where we're not executing as a coroutine.
|
||||||
|
*/
|
||||||
|
void wait_flush(optional_yield y,
|
||||||
|
boost::asio::deadline_timer *formatter_flush_cond,
|
||||||
|
std::function<bool()> predicate);
|
||||||
|
|
||||||
protected:
|
protected:
|
||||||
std::vector<delete_multi_obj_entry> ops_log_entries;
|
std::vector<delete_multi_obj_entry> ops_log_entries;
|
||||||
bufferlist data;
|
bufferlist data;
|
||||||
@ -1917,7 +1941,6 @@ protected:
|
|||||||
bool bypass_perm;
|
bool bypass_perm;
|
||||||
bool bypass_governance_mode;
|
bool bypass_governance_mode;
|
||||||
|
|
||||||
|
|
||||||
public:
|
public:
|
||||||
RGWDeleteMultiObj() {
|
RGWDeleteMultiObj() {
|
||||||
quiet = false;
|
quiet = false;
|
||||||
@ -1925,6 +1948,7 @@ public:
|
|||||||
bypass_perm = true;
|
bypass_perm = true;
|
||||||
bypass_governance_mode = false;
|
bypass_governance_mode = false;
|
||||||
}
|
}
|
||||||
|
|
||||||
int verify_permission(optional_yield y) override;
|
int verify_permission(optional_yield y) override;
|
||||||
void pre_exec() override;
|
void pre_exec() override;
|
||||||
void execute(optional_yield y) override;
|
void execute(optional_yield y) override;
|
||||||
@ -1932,8 +1956,9 @@ public:
|
|||||||
virtual int get_params(optional_yield y) = 0;
|
virtual int get_params(optional_yield y) = 0;
|
||||||
virtual void send_status() = 0;
|
virtual void send_status() = 0;
|
||||||
virtual void begin_response() = 0;
|
virtual void begin_response() = 0;
|
||||||
virtual void send_partial_response(rgw_obj_key& key, bool delete_marker,
|
virtual void send_partial_response(const rgw_obj_key& key, bool delete_marker,
|
||||||
const string& marker_version_id, int ret) = 0;
|
const std::string& marker_version_id, int ret,
|
||||||
|
boost::asio::deadline_timer *formatter_flush_cond) = 0;
|
||||||
virtual void end_response() = 0;
|
virtual void end_response() = 0;
|
||||||
const char* name() const override { return "multi_object_delete"; }
|
const char* name() const override { return "multi_object_delete"; }
|
||||||
RGWOpType get_type() override { return RGW_OP_DELETE_MULTI_OBJ; }
|
RGWOpType get_type() override { return RGW_OP_DELETE_MULTI_OBJ; }
|
||||||
|
@ -5187,7 +5187,7 @@ int RGWRados::Object::Delete::delete_obj(optional_yield y, const DoutPrefixProvi
|
|||||||
store->remove_rgw_head_obj(op);
|
store->remove_rgw_head_obj(op);
|
||||||
|
|
||||||
auto& ioctx = ref.pool.ioctx();
|
auto& ioctx = ref.pool.ioctx();
|
||||||
r = rgw_rados_operate(dpp, ioctx, ref.obj.oid, &op, null_yield);
|
r = rgw_rados_operate(dpp, ioctx, ref.obj.oid, &op, y);
|
||||||
|
|
||||||
/* raced with another operation, object state is indeterminate */
|
/* raced with another operation, object state is indeterminate */
|
||||||
const bool need_invalidate = (r == -ECANCELED);
|
const bool need_invalidate = (r == -ECANCELED);
|
||||||
@ -7298,9 +7298,6 @@ int RGWRados::apply_olh_log(const DoutPrefixProvider *dpp,
|
|||||||
|
|
||||||
/* update olh object */
|
/* update olh object */
|
||||||
r = rgw_rados_operate(dpp, ref.pool.ioctx(), ref.obj.oid, &op, null_yield);
|
r = rgw_rados_operate(dpp, ref.pool.ioctx(), ref.obj.oid, &op, null_yield);
|
||||||
if (r == -ECANCELED) {
|
|
||||||
r = 0;
|
|
||||||
}
|
|
||||||
if (r < 0) {
|
if (r < 0) {
|
||||||
ldpp_dout(dpp, 0) << "ERROR: could not apply olh update, r=" << r << dendl;
|
ldpp_dout(dpp, 0) << "ERROR: could not apply olh update, r=" << r << dendl;
|
||||||
return r;
|
return r;
|
||||||
@ -7611,6 +7608,12 @@ int RGWRados::follow_olh(const DoutPrefixProvider *dpp, const RGWBucketInfo& buc
|
|||||||
|
|
||||||
int ret = update_olh(dpp, obj_ctx, state, bucket_info, olh_obj);
|
int ret = update_olh(dpp, obj_ctx, state, bucket_info, olh_obj);
|
||||||
if (ret < 0) {
|
if (ret < 0) {
|
||||||
|
if (ret == -ECANCELED) {
|
||||||
|
// In this context, ECANCELED means that the OLH tag changed in either the bucket index entry or the OLH object.
|
||||||
|
// If the OLH tag changed, it indicates that a previous OLH entry was removed since this request started. We
|
||||||
|
// return ENOENT to indicate that the OLH object was removed.
|
||||||
|
ret = -ENOENT;
|
||||||
|
}
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -7664,7 +7667,7 @@ int RGWRados::raw_obj_stat(const DoutPrefixProvider *dpp,
|
|||||||
op.read(0, cct->_conf->rgw_max_chunk_size, first_chunk, NULL);
|
op.read(0, cct->_conf->rgw_max_chunk_size, first_chunk, NULL);
|
||||||
}
|
}
|
||||||
bufferlist outbl;
|
bufferlist outbl;
|
||||||
r = rgw_rados_operate(dpp, ref.pool.ioctx(), ref.obj.oid, &op, &outbl, null_yield);
|
r = rgw_rados_operate(dpp, ref.pool.ioctx(), ref.obj.oid, &op, &outbl, y);
|
||||||
|
|
||||||
if (epoch) {
|
if (epoch) {
|
||||||
*epoch = ref.pool.ioctx().get_last_version();
|
*epoch = ref.pool.ioctx().get_last_version();
|
||||||
@ -8558,14 +8561,12 @@ int RGWRados::cls_bucket_list_ordered(const DoutPrefixProvider *dpp,
|
|||||||
|
|
||||||
// add the next unique candidate, or return false if we reach the end
|
// add the next unique candidate, or return false if we reach the end
|
||||||
auto next_candidate = [] (CephContext *cct, ShardTracker& t,
|
auto next_candidate = [] (CephContext *cct, ShardTracker& t,
|
||||||
std::map<std::string, size_t>& candidates,
|
std::multimap<std::string, size_t>& candidates,
|
||||||
size_t tracker_idx) {
|
size_t tracker_idx) {
|
||||||
while (!t.at_end()) {
|
if (!t.at_end()) {
|
||||||
if (candidates.emplace(t.entry_name(), tracker_idx).second) {
|
candidates.emplace(t.entry_name(), tracker_idx);
|
||||||
return;
|
|
||||||
}
|
|
||||||
t.advance(); // skip duplicate common prefixes
|
|
||||||
}
|
}
|
||||||
|
return;
|
||||||
};
|
};
|
||||||
|
|
||||||
// one tracker per shard requested (may not be all shards)
|
// one tracker per shard requested (may not be all shards)
|
||||||
@ -8587,8 +8588,10 @@ int RGWRados::cls_bucket_list_ordered(const DoutPrefixProvider *dpp,
|
|||||||
// (key=candidate, value=index into results_trackers); as we consume
|
// (key=candidate, value=index into results_trackers); as we consume
|
||||||
// entries from shards, we replace them with the next entries in the
|
// entries from shards, we replace them with the next entries in the
|
||||||
// shards until we run out
|
// shards until we run out
|
||||||
std::map<std::string, size_t> candidates;
|
std::multimap<std::string, size_t> candidates;
|
||||||
size_t tracker_idx = 0;
|
size_t tracker_idx = 0;
|
||||||
|
std::vector<size_t> vidx;
|
||||||
|
vidx.reserve(shard_list_results.size());
|
||||||
for (auto& t : results_trackers) {
|
for (auto& t : results_trackers) {
|
||||||
// it's important that the values in the map refer to the index
|
// it's important that the values in the map refer to the index
|
||||||
// into the results_trackers vector, which may not be the same
|
// into the results_trackers vector, which may not be the same
|
||||||
@ -8650,16 +8653,31 @@ int RGWRados::cls_bucket_list_ordered(const DoutPrefixProvider *dpp,
|
|||||||
}
|
}
|
||||||
|
|
||||||
// refresh the candidates map
|
// refresh the candidates map
|
||||||
candidates.erase(candidates.begin());
|
vidx.clear();
|
||||||
tracker.advance();
|
bool need_to_stop = false;
|
||||||
|
auto range = candidates.equal_range(name);
|
||||||
next_candidate(cct, tracker, candidates, tracker_idx);
|
for (auto i = range.first; i != range.second; ++i) {
|
||||||
|
vidx.push_back(i->second);
|
||||||
if (tracker.at_end() && tracker.is_truncated()) {
|
}
|
||||||
|
candidates.erase(range.first, range.second);
|
||||||
|
for (auto idx : vidx) {
|
||||||
|
auto& tracker_match = results_trackers.at(idx);
|
||||||
|
tracker_match.advance();
|
||||||
|
next_candidate(cct, tracker_match, candidates, idx);
|
||||||
|
if (tracker_match.at_end() && tracker_match.is_truncated()) {
|
||||||
|
need_to_stop = true;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if (need_to_stop) {
|
||||||
// once we exhaust one shard that is truncated, we need to stop,
|
// once we exhaust one shard that is truncated, we need to stop,
|
||||||
// as we cannot be certain that one of the next entries needs to
|
// as we cannot be certain that one of the next entries needs to
|
||||||
// come from that shard; S3 and swift protocols allow returning
|
// come from that shard; S3 and swift protocols allow returning
|
||||||
// fewer than what was requested
|
// fewer than what was requested
|
||||||
|
ldpp_dout(dpp, 10) << __PRETTY_FUNCTION__ <<
|
||||||
|
": stopped accumulating results at count=" << count <<
|
||||||
|
", dirent=\"" << dirent.key <<
|
||||||
|
"\", because its shard is truncated and exhausted" << dendl;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
} // while we haven't provided requested # of result entries
|
} // while we haven't provided requested # of result entries
|
||||||
|
@ -3925,9 +3925,11 @@ void RGWDeleteMultiObj_ObjStore_S3::begin_response()
|
|||||||
rgw_flush_formatter(s, s->formatter);
|
rgw_flush_formatter(s, s->formatter);
|
||||||
}
|
}
|
||||||
|
|
||||||
void RGWDeleteMultiObj_ObjStore_S3::send_partial_response(rgw_obj_key& key,
|
void RGWDeleteMultiObj_ObjStore_S3::send_partial_response(const rgw_obj_key& key,
|
||||||
bool delete_marker,
|
bool delete_marker,
|
||||||
const string& marker_version_id, int ret)
|
const string& marker_version_id,
|
||||||
|
int ret,
|
||||||
|
boost::asio::deadline_timer *formatter_flush_cond)
|
||||||
{
|
{
|
||||||
if (!key.empty()) {
|
if (!key.empty()) {
|
||||||
delete_multi_obj_entry ops_log_entry;
|
delete_multi_obj_entry ops_log_entry;
|
||||||
@ -3973,7 +3975,11 @@ void RGWDeleteMultiObj_ObjStore_S3::send_partial_response(rgw_obj_key& key,
|
|||||||
}
|
}
|
||||||
|
|
||||||
ops_log_entries.push_back(std::move(ops_log_entry));
|
ops_log_entries.push_back(std::move(ops_log_entry));
|
||||||
rgw_flush_formatter(s, s->formatter);
|
if (formatter_flush_cond) {
|
||||||
|
formatter_flush_cond->cancel();
|
||||||
|
} else {
|
||||||
|
rgw_flush_formatter(s, s->formatter);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -4694,9 +4700,11 @@ int RGWHandler_REST_S3::postauth_init(optional_yield y)
|
|||||||
{
|
{
|
||||||
struct req_init_state *t = &s->init_state;
|
struct req_init_state *t = &s->init_state;
|
||||||
|
|
||||||
rgw_parse_url_bucket(t->url_bucket, s->user->get_tenant(),
|
int ret = rgw_parse_url_bucket(t->url_bucket, s->user->get_tenant(),
|
||||||
s->bucket_tenant, s->bucket_name);
|
s->bucket_tenant, s->bucket_name);
|
||||||
|
if (ret) {
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
if (s->auth.identity->get_identity_type() == TYPE_ROLE) {
|
if (s->auth.identity->get_identity_type() == TYPE_ROLE) {
|
||||||
s->bucket_tenant = s->auth.identity->get_role_tenant();
|
s->bucket_tenant = s->auth.identity->get_role_tenant();
|
||||||
}
|
}
|
||||||
@ -4704,7 +4712,6 @@ int RGWHandler_REST_S3::postauth_init(optional_yield y)
|
|||||||
ldpp_dout(s, 10) << "s->object=" << s->object
|
ldpp_dout(s, 10) << "s->object=" << s->object
|
||||||
<< " s->bucket=" << rgw_make_bucket_entry_name(s->bucket_tenant, s->bucket_name) << dendl;
|
<< " s->bucket=" << rgw_make_bucket_entry_name(s->bucket_tenant, s->bucket_name) << dendl;
|
||||||
|
|
||||||
int ret;
|
|
||||||
ret = rgw_validate_tenant_name(s->bucket_tenant);
|
ret = rgw_validate_tenant_name(s->bucket_tenant);
|
||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
@ -4721,8 +4728,11 @@ int RGWHandler_REST_S3::postauth_init(optional_yield y)
|
|||||||
} else {
|
} else {
|
||||||
auth_tenant = s->user->get_tenant();
|
auth_tenant = s->user->get_tenant();
|
||||||
}
|
}
|
||||||
rgw_parse_url_bucket(t->src_bucket, auth_tenant,
|
ret = rgw_parse_url_bucket(t->src_bucket, auth_tenant,
|
||||||
s->src_tenant_name, s->src_bucket_name);
|
s->src_tenant_name, s->src_bucket_name);
|
||||||
|
if (ret) {
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
ret = rgw_validate_tenant_name(s->src_tenant_name);
|
ret = rgw_validate_tenant_name(s->src_tenant_name);
|
||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
|
@ -492,8 +492,9 @@ public:
|
|||||||
int get_params(optional_yield y) override;
|
int get_params(optional_yield y) override;
|
||||||
void send_status() override;
|
void send_status() override;
|
||||||
void begin_response() override;
|
void begin_response() override;
|
||||||
void send_partial_response(rgw_obj_key& key, bool delete_marker,
|
void send_partial_response(const rgw_obj_key& key, bool delete_marker,
|
||||||
const string& marker_version_id, int ret) override;
|
const std::string& marker_version_id, int ret,
|
||||||
|
boost::asio::deadline_timer *formatter_flush_cond) override;
|
||||||
void end_response() override;
|
void end_response() override;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -734,6 +734,7 @@ class FSTop(object):
|
|||||||
f'{speed}', curses.A_DIM)
|
f'{speed}', curses.A_DIM)
|
||||||
else:
|
else:
|
||||||
# display 0th element from metric tuple
|
# display 0th element from metric tuple
|
||||||
|
metrics_dict[fs_name][client_id][self.items(item)] = m[0]
|
||||||
self.fsstats.addstr(y_coord, coord[0], f'{m[0]}', curses.A_DIM)
|
self.fsstats.addstr(y_coord, coord[0], f'{m[0]}', curses.A_DIM)
|
||||||
else:
|
else:
|
||||||
self.fsstats.addstr(y_coord, coord[0], "N/A", curses.A_DIM)
|
self.fsstats.addstr(y_coord, coord[0], "N/A", curses.A_DIM)
|
||||||
|
Loading…
Reference in New Issue
Block a user