mirror of
https://git.proxmox.com/git/ceph.git
synced 2025-08-06 22:28:43 +00:00
update sources to v12.1.0
This commit is contained in:
parent
40152f1e46
commit
31f18b776d
4
Makefile
4
Makefile
@ -1,8 +1,8 @@
|
||||
RELEASE=5.0
|
||||
|
||||
PACKAGE=ceph
|
||||
VER=12.0.3
|
||||
DEBREL=pve3
|
||||
VER=12.1.0
|
||||
DEBREL=pve1
|
||||
|
||||
SRCDIR=ceph
|
||||
BUILDSRC=${SRCDIR}-${VER}
|
||||
|
3
ceph/.gitmodules
vendored
3
ceph/.gitmodules
vendored
@ -55,3 +55,6 @@
|
||||
[submodule "src/blkin"]
|
||||
path = src/blkin
|
||||
url = https://github.com/ceph/blkin
|
||||
[submodule "src/rapidjson"]
|
||||
path = src/rapidjson
|
||||
url = https://github.com/ceph/rapidjson
|
||||
|
@ -59,6 +59,8 @@ Chen Dihao <tobeg3oogle@gmail.com>
|
||||
Chendi Xue <chendi.xue@intel.com>
|
||||
Chendi Xue <chendi.xue@intel.com> <xuechendi@gmail.com>
|
||||
Cheng Cheng <ccheng.leo@gmail.com>
|
||||
Chu Hua-Rong <hrchu@cht.com.tw>
|
||||
Chu Hua-Rong <hrchu@cht.com.tw> <petertc@gmail.com>
|
||||
Chris Holcombe <chris.holcombe@nebula.com> <xfactor973@gmail.com>
|
||||
Christian Brunner <christian@brunner-muc.de> <chb@muc.de>
|
||||
Christian Marie <pingu@anchor.net.au> <christian@ponies.io>
|
||||
@ -140,9 +142,11 @@ Haomai Wang <haomai@xsky.com> <haomai@xsky.io>
|
||||
Haomai Wang <haomai@xsky.com> <yuyuyu101@163.com>
|
||||
Harry Harrington <git-harry@live.co.uk>
|
||||
Hazem Amara <hazem.amara@telecom-bretagne.eu> <hazem@hazem-Inspiron-3537.(none)>
|
||||
He Chuang <hechuang@xsky.com>
|
||||
Henry C Chang <henry_c_chang@tcloudcomputing.com> <henry.cy.chang@gmail.com>
|
||||
Hervé Rousseau <hroussea@cern.ch>
|
||||
Holger Macht <hmacht@suse.de> <holger@homac.de>
|
||||
Hongtong Liu <hongtong.liu@istuary.com>
|
||||
Hong Zhangoole <hong.zhangoole@gmail.com>
|
||||
Huamin Chen <hchen@redhat.com>
|
||||
Huang Jun <hjwsm1989@gmail.com>
|
||||
@ -174,6 +178,8 @@ Jenkins <jenkins@ceph.com> <jenkins-build@trusty-small-unique--68a2c286-dc75-466
|
||||
Jenkins <jenkins@ceph.com> <jenkins-build@trusty-small-unique--e64d6d03-305d-46bd-9a2c-9b546e06937e.localdomain>
|
||||
Jenkins <jenkins@ceph.com> Jenkins Build Slave User <jenkins-build@trusty-small-unique--a7f82f5f-8832-433e-a632-928924f47e04.localdomain>
|
||||
Jenkins <jenkins@ceph.com> Jenkins Build Slave User <ceph-release-team@redhat.com>
|
||||
Jesse Williamson <jesse.williamson@suse.com>
|
||||
Jesse Williamson <jesse.williamson@suse.com> <jwilliamson@suse.de>
|
||||
Jiang Heng <jiangheng0511@gmail.com>
|
||||
Jiantao He <hejiantao5@gmail.com>
|
||||
Jian Wen <wenjian@letv.com> <wenjianhn@gmail.com>
|
||||
@ -206,6 +212,7 @@ Jonas Keidel <jonas@jonas-keidel.de>
|
||||
Jordan Dorne <jordan.dorne@gmail.com>
|
||||
Josh Durgin <jdurgin@redhat.com>
|
||||
Josh Durgin <jdurgin@redhat.com> <joshd@redhat.com>
|
||||
Josh Durgin <jdurgin@redhat.com> <jduring@redhat.com>
|
||||
Josh Durgin <josh.durgin@inktank.com> <joshd@hq.newdream.net>
|
||||
Josh Durgin <josh.durgin@inktank.com> <josh.durgin@dreamhost.com>
|
||||
Kacper Kowalik <xarthisius@gentoo.org>
|
||||
@ -218,11 +225,14 @@ Kiseleva Alyona <akiselyova@mirantis.com>
|
||||
Kongming Wu <wu.kongming@h3c.com>
|
||||
Lan De <lan.de3@zte.com.cn>
|
||||
Laszlo Boszormenyi <gcs@debian.hu>
|
||||
Leo Zhang <nguzcf@gmail.com>
|
||||
Luo Kexue <luo.kexue@zte.com.cn>
|
||||
Luo Runbing <runsisi@hust.edu.cn>
|
||||
Luo Runbing <runsisi@zte.com.cn>
|
||||
Li Hongjie <lihongjie@cmss.chinamobile.com>
|
||||
Li Wang <li.wang@kylin-cloud.com> <liwang@ubuntukylin.com>
|
||||
Linfei Hou <hou.linfei@h3c.com>
|
||||
Liu Hong <liuhong@cmss.chinamobile.com>
|
||||
Liu Peiyan <liu.peiyang@h3c.com>
|
||||
Li Tianqing <tianqing@unitedstack.com>
|
||||
Liu Yang <yippeetry@gmail.com>
|
||||
@ -353,7 +363,8 @@ Shehbaz Jaffer <shehbaz.jaffer@mail.utoronto.ca> <shehbazjaffer007@gmail.com>
|
||||
Shinobu Kinjo <shinobu@redhat.com> <shinobu@linux.com>
|
||||
Shinobu Kinjo <shinobu@redhat.com> <ceph@ceph-stack.redhat.com>
|
||||
Shishir Gowda <shishir.gowda@sandisk.com>
|
||||
shiqi <m13913886148@gmail.com> <1454927420@qq.com>
|
||||
Shiqi <m13913886148@gmail.com>
|
||||
Shiqi <m13913886148@gmail.com> <1454927420@qq.com>
|
||||
Shu, Xinxin <xinxin.shu@intel.com>
|
||||
Shun Song <song.shun3@zte.com.cn>
|
||||
Shun Song <song.shun3@zte.com.cn> <songshun134@126.com>
|
||||
@ -387,6 +398,7 @@ Tobias Suckow <tobias@suckow.biz>
|
||||
Tommi Virtanen <tv@inktank.com> <tommi.virtanen@dreamhost.com>
|
||||
Tommi Virtanen <tv@inktank.com> <tv@eagain.net>
|
||||
Tommi Virtanen <tv@inktank.com> <tv@hq.newdream.net>
|
||||
Tone Zhang <tone.zhang@linaro.org>
|
||||
Travis Rhoden <trhoden@redhat.com> <trhoden@gmail.com>
|
||||
Tyler Brekke <tyler.brekke@inktank.com>
|
||||
Uday Mullangi <udaymjl@gmail.com>
|
||||
@ -434,6 +446,7 @@ Yannick Atchy Dalama <yannick.atchy.dalama@gmail.com>
|
||||
Yankun Li <liyankun@unitedstack.com> <YankunLi@users.noreply.github.com>
|
||||
Yan, Zheng <zheng.z.yan@intel.com>
|
||||
Yan, Zheng <zheng.z.yan@intel.com> <ukernel@gmail.com>
|
||||
Yanchun Bai <yanchun.bai@istuary.com>
|
||||
Yehua Chen <chen.yehua@h3c.com>
|
||||
Yehua Chen <chen.yehua@h3c.com> <root@ubuntu1.com>
|
||||
Yehuda Sadeh <yehuda@inktank.com> <yehdua@inktank.com>
|
||||
@ -452,6 +465,7 @@ Yehuda Sadeh <ysadehwe@redhat.com> <yehuda@redhat.com.>
|
||||
Yongqiang He <he.yongqiang@h3c.com>
|
||||
Yongyue Sun <abioy.sun@gmail.com>
|
||||
You Ji <youji@ebay.com>
|
||||
Yunfei Guan <yunfei.guan@xtaotech.com>
|
||||
Yuan Zhou <yuan.zhou@intel.com> <dunk007@gmail.com>
|
||||
Yunchuan Wen <yunchuan.wen@kylin-cloud.com> <yunchuanwen@ubuntukylin.com>
|
||||
Yuri Weinstein <yuriw@redhat.com> <yweinste@redhat.com>
|
||||
|
@ -23,6 +23,7 @@
|
||||
# 9 27 TCloud Computing <contact@tcloudcomputing.com>
|
||||
# 10 22 GNU <contact@gnu.org>
|
||||
#
|
||||
99Cloud Inc <contact@99cloud.net> Yu Shengzuo <yu.shengzuo@99cloud.net>
|
||||
Acaleph <contact@acale.ph> Alistair Israel <aisrael@gmail.com>
|
||||
Alcatel Lucent <contact@alcatel-lucent.com> Joseph McDonald <joseph.mcdonald@alcatel-lucent.com>
|
||||
Alcatel Lucent <contact@alcatel-lucent.com> Ker Liu <ker.liu@alcatel-lucent.com>
|
||||
@ -66,14 +67,15 @@ China Mobile <contact@chinamobile.com> donglinpeng <donglinpeng@cmss.chinamobile
|
||||
China Mobile <contact@chinamobile.com> Gui Hecheng <guihecheng@cmss.chinamobile.com>
|
||||
China Mobile <contact@chinamobile.com> Guo Zhandong <guozhandong@cmss.chinamobile.com>
|
||||
China Mobile <contact@chinamobile.com> Jing Wenjun <jingwenjun@cmss.chinamobile.com>
|
||||
China Mobile <contact@chinamobile.com> lihongjie <lihongjie@cmss.chinamobile.com>
|
||||
China Mobile <contact@chinamobile.com> Li Hongjie <lihongjie@cmss.chinamobile.com>
|
||||
China Mobile <contact@chinamobile.com> Liu Hong <liuhong@cmss.chinamobile.com>
|
||||
China Mobile <contact@chinamobile.com> lvshuhua <lvshuhua@cmss.chinamobile.com>
|
||||
China Mobile <contact@chinamobile.com> Zhang Shaowen <zhangshaowen@cmss.chinamobile.com>
|
||||
China Mobile <contact@chinamobile.com> wangzhengyong <wangzhengyong@cmss.chinamobile.com>
|
||||
Chinac <info@chinac.com> Huan Zhang <zhanghuan@chinac.com>
|
||||
Chinac <info@chinac.com> Xinxin Shu <shuxinxin@chinac.com>
|
||||
Choopa, LLC <contact@choopa.com> Adam Twardowski <adam.twardowski@gmail.com>
|
||||
Chunghwa Telecom <contact@cht.com.tw> hrchu <hrchu@cht.com.tw>
|
||||
Chunghwa Telecom <contact@cht.com.tw> Chu Hua-Rong <hrchu@cht.com.tw>
|
||||
CISCO <contact@cisco.com> Johnu George <johnugeo@cisco.com>
|
||||
CISCO <contact@cisco.com> Kai Zhang <zakir.exe@gmail.com>
|
||||
CISCO <contact@cisco.com> Roland Mechler <rmechler@cisco.com>
|
||||
@ -206,6 +208,7 @@ Hostplex Hosting <contact@hostplex.net> Andras Elso <elso.andras@gmail.com>
|
||||
HP <contact@hp.com> Blaine Gardner <blaine.gardner@hp.com>
|
||||
HP <contact@hp.com> Joe Handzik <joseph.t.handzik@hp.com>
|
||||
Huazhong University of Science and Technology <contact@hust.edu.cn> Luo Runbing <runsisi@hust.edu.cn>
|
||||
IBM <contact@IBM.com> Andrew Solomon <asolomon@us.ibm.com>
|
||||
IBM <contact@IBM.com> Michel Normand <normand@linux.vnet.ibm.com>
|
||||
IBM <contact@IBM.com> Samuel Matzek <smatzek@us.ibm.com>
|
||||
ICT <contact@ict.ac.cn> Zhang Huan <zhanghuan@ict.ac.cn>
|
||||
@ -214,6 +217,7 @@ Igalia <contact@igalia.com> Javier M. Mellid <jmunhoz@igalia.com>
|
||||
Igalia <contact@igalia.com> Juan A. Suarez Romero <jasuarez@igalia.com>
|
||||
Imagination Technologies Ltd. <contact@imgtec.com> Alistair Strachan <alistair.strachan@imgtec.com>
|
||||
iNic <contact@inic.no> Bjørnar Ness <bjornar.ness@gmail.com>
|
||||
Indraprastha Institute of Information Technology, Delhi <contact@iiitd.ac.in> Vedant Nanda <vedant15114@iiitd.ac.in>
|
||||
Indiana University <contact@indiana.edu> Jayashree Candadai <jayaajay@indiana.edu>
|
||||
Inktank <contact@inktank.com> Alexandre Marangone <alexandre.marangone@inktank.com>
|
||||
Inktank <contact@inktank.com> Alex Elder <elder@inktank.com>
|
||||
@ -278,6 +282,7 @@ Iron Systems Inc. <info@ironsystems.com> Harpreet Dhillon <harpreet@ironsystems.
|
||||
Istuary Innovation Group <contact@istuary.com> Hongtong Liu <hongtong.liu@istuary.com>
|
||||
Istuary Innovation Group <contact@istuary.com> Pan Liu <pan.liu@istuary.com>
|
||||
Istuary Innovation Group <contact@istuary.com> Tang Jin <tang.jin@istuary.com>
|
||||
Istuary Innovation Group <contact@istuary.com> Yanchun Bai <yanchun.bai@istuary.com>
|
||||
IT Refined <contact@itrefined.com> Ron Allred <rallred@itrefined.com>
|
||||
IWeb <contact@iweb.com> David Moreau Simard <dmsimard@iweb.com>
|
||||
Johannes Gutenberg-Universität Mainz <contact@uni-mainz.de> Marcel Lauhoff <lauhoff@uni-mainz.de>
|
||||
@ -296,6 +301,7 @@ LETV <contact@letv.com> Ji Chen <insomnia@139.com>
|
||||
LETV <contact@letv.com> Wu Xingyi <wuxingyi@letv.com>
|
||||
Linaro <contact@linaro.org> Steve Capper <steve.capper@linaro.org>
|
||||
Linaro <contact@linaro.org> wei xiao <wei.xiao@linaro.org>
|
||||
Linaro <contact@linaro.org> Tone Zhang <tone.zhang@linaro.org>
|
||||
Linaro <contact@linaro.org> Yazen Ghannam <yazen.ghannam@linaro.org>
|
||||
Linaro <contact@linaro.org> Yibo Cai <yibo.cai@linaro.org>
|
||||
Los Alamos National Laboratory <contact@lanl.gov> Esteban Molina-Estolano <eestolan@lanl.gov>
|
||||
@ -339,6 +345,7 @@ Pacific Northwest National Laboratory <contact@pnl.gov> Evan Felix <evan.felix@p
|
||||
Pacific Northwest National Laboratory <contact@pnl.gov> Scott Devoid <devoid@anl.gov>
|
||||
Piston Cloud Computing <info@pistoncloud.com> Mike Lundy <mike@fluffypenguin.org>
|
||||
Pogoapp <contact@pogoapp.com> Paul Meserve <paul@pogodan.com>
|
||||
Proxmox Server Solutions GmbH <office@proxmox.com> Fabian Grünbichler <f.gruenbichler@proxmox.com>
|
||||
Purdue University <contact@purdue.edu> Mike Shuey <shuey@purdue.edu>
|
||||
Quadrature Capital Limited <info@quadraturecapital.com> Jim Wright <jim@quadraturecapital.com>
|
||||
Quantum Corporation <info@quantum.com> Bassam Tabbara <bassam.tabbara@quantum.com>
|
||||
@ -481,14 +488,17 @@ SUSE <contact@suse.com> João Eduardo Luís <joao@suse.de>
|
||||
SUSE <contact@suse.com> Karl Eichwalder <ke@suse.de>
|
||||
SUSE <contact@suse.com> Karol Mroz <kmroz@suse.com>
|
||||
SUSE <contact@suse.com> Kapil Sharma <ksharma@suse.com>
|
||||
SUSE <contact@suse.com> Mohamad Gebai <mgebai@suse.com>
|
||||
SUSE <contact@suse.com> Michal Koutný <mkoutny@suse.com>
|
||||
SUSE <contact@suse.com> Nathan Cutler <ncutler@suse.com>
|
||||
SUSE <contact@suse.com> Owen Synge <osynge@suse.com>
|
||||
SUSE <contact@suse.com> Ricardo Dias <rdias@suse.com>
|
||||
SUSE <contact@suse.com> Sven Seeberg <sseebergelverfeldt@suse.com>
|
||||
SUSE <contact@suse.com> Thorsten Behrens <tbehrens@suse.com>
|
||||
SUSE <contact@suse.com> Tim Serong <tserong@suse.com>
|
||||
SWITCH <contact@switch.ch> Jens-Christian Fischer <jens-christian.fischer@switch.ch>
|
||||
SWITCH <contact@switch.ch> Simon Leinen <simon.leinen@switch.ch>
|
||||
T2Cloud <contact@t2cloud.net> Leo Zhang <nguzcf@gmail.com>
|
||||
TCloud Computing <contact@tcloudcomputing.com> CC Lien <cc_lien@tcloudcomputing.com>
|
||||
TCloud Computing <contact@tcloudcomputing.com> Henry C Chang <henry_c_chang@tcloudcomputing.com>
|
||||
TCloud Computing <contact@tcloudcomputing.com> Herb Shiu <herb_shiu@tcloudcomputing.com>
|
||||
@ -509,6 +519,7 @@ The University of Arizona <contact@arizona.edu> James Ryan Cresawn <jrcresawn@gm
|
||||
Time Warner Cable Inc. <contact@twcable.com> Bryan Stillwell <bryan.stillwell@twcable.com>
|
||||
Trendy Tech <contact@trendytech.com.cn> shiqi <m13913886148@gmail.com>
|
||||
Trendy Tech <contact@trendytech.com.cn> Lei Zhang <243290414@qq.com>
|
||||
Uber Technologies Inc. <contact@uber.com> Henrik Korkuc <henrik@uber.com>
|
||||
Ubuntu Kylin <contact@ubuntukylin.com> Min Chen <minchen@ubuntukylin.com>
|
||||
UMCloud <contact@umcloud.com> Jiaying Ren <mikulely@gmail.com>
|
||||
UMCloud <contact@umcloud.com> Rongze Zhu <zrzhit@gmail.com>
|
||||
@ -624,7 +635,6 @@ Unaffiliated <no@organization.net> koleosfuscus <koleosfuscus@yahoo.com>
|
||||
Unaffiliated <no@organization.net> Kyr Shatskyy <kyrylo.shatskyy@gmail.com>
|
||||
Unaffiliated <no@organization.net> Laurent Guerby <laurent@guerby.net>
|
||||
Unaffiliated <no@organization.net> Lee Revell <rlrevell@gmail.com>
|
||||
Unaffiliated <no@organization.net> Leo Zhang <nguzcf@gmail.com>
|
||||
Unaffiliated <no@organization.net> Lucas Fantinel <lucas.fantinel@gmail.com>
|
||||
Unaffiliated <no@organization.net> Коренберг Марк <socketpair@gmail.com>
|
||||
Unaffiliated <no@organization.net> Matt Richards <mattjrichards@gmail.com>
|
||||
@ -644,6 +654,7 @@ Unaffiliated <no@organization.net> oddomatik <bandrus+github@gmail.com>
|
||||
Unaffiliated <no@organization.net> Oleh Prypin <oleh@pryp.in>
|
||||
Unaffiliated <no@organization.net> optimistyzy <optimistyzy@gmail.com>
|
||||
Unaffiliated <no@organization.net> Pascal Bach <pasci.bach@gmail.com>
|
||||
Unaffiliated <no@organization.net> Patrick Dinnen <pdinnen@gmail.com>
|
||||
Unaffiliated <no@organization.net> Patrick Donnelly <batrick@batbytes.com>
|
||||
Unaffiliated <no@organization.net> Peter Maloney <peter.maloney@yahoo.ca>
|
||||
Unaffiliated <no@organization.net> Petertc Chu <petertc.chu@gmail.com>
|
||||
@ -699,6 +710,7 @@ Unaffiliated <no@organization.net> Xu Biao <xubiao.codeyz@gmail.com>
|
||||
Unaffiliated <no@organization.net> Yang Honggang <joseph.yang@xtaotech.com>
|
||||
Unaffiliated <no@organization.net> Yann Dupont <yann@objoo.org>
|
||||
Unaffiliated <no@organization.net> Yannick Atchy Dalama <yannick.atchy.dalama@gmail.com>
|
||||
Unaffiliated <no@organization.net> Yao Zongyou <yaozongyou@vip.qq.com>
|
||||
Unaffiliated <no@organization.net> Yatin Kumbhare <yatinkumbhare@gmail.com>
|
||||
Unaffiliated <no@organization.net> YiQiang Chen <cyqsign@163.com>
|
||||
Unaffiliated <no@organization.net> Yongyue Sun <abioy.sun@gmail.com>
|
||||
@ -735,6 +747,7 @@ University of Maryland <contact@umd.edu> Liam Monahan <liam@umiacs.umd.edu>
|
||||
University of Maryland <contact@umd.edu> Padraig O'Sullivan <posulliv@umd.edu>
|
||||
University of Toronto <contact@mail.utoronto.ca> Shehbaz Jaffer <shehbaz.jaffer@mail.utoronto.ca>
|
||||
University of Utah <contact@utah.edu> Xing Lin <xinglin@cs.utah.edu>
|
||||
Virtuozzo <contact@virtuozzo.com> Andrey Parfenov <aparfenov@virtuozzo.com>
|
||||
VRT <contact@vrt.com.au> Stuart Longland <stuartl@vrt.com.au>
|
||||
Walmart Labs <contact@walmartlabs.com> Pavan Rallabhandi <PRallabhandi@walmartlabs.com>
|
||||
Walmart Labs <contact@walmartlabs.com> Sirisha Guduru <SGuduru@walmartlabs.com>
|
||||
@ -748,11 +761,13 @@ X-ION <contact@x-ion.de> Mouad Benchchaoui <m.benchchaoui@x-ion.de>
|
||||
X-ION <contact@x-ion.de> Stephan Renatus <s.renatus@x-ion.de>
|
||||
XSky <contact@xsky.com> hechuang <hechuang@xsky.com>
|
||||
XSky <contact@xsky.com> Haomai Wang <haomai@xsky.com>
|
||||
XSky <contact@xsky.com> He Chuang <hechuang@xsky.com>
|
||||
XSky <contact@xsky.com> Min Chen <chenmin@xsky.com>
|
||||
XSky <contact@xsky.com> Mingxin Liu <mingxin@xsky.com>
|
||||
XSky <contact@xsky.com> Tianshan Qu <tianshan@xsky.com>
|
||||
XSky <contact@xsky.com> Xinze Chi <xinze@xsky.com>
|
||||
XSky <contact@xsky.com> Zhiqiang Wang <zhiqiang@xsky.com>
|
||||
XTao Co. Ltd. <contact@xtaotech.com> Yunfei Guan <yunfei.guan@xtaotech.com>
|
||||
Yahoo! <contact@yahoo-inc.com> Guang Yang <yguang@yahoo-inc.com>
|
||||
Yahoo! <contact@yahoo-inc.com> Haifeng Liu <haifeng@yahoo-inc.com>
|
||||
Yahoo! <contact@yahoo-inc.com> Lei Dong <leidong@yahoo-inc.com>
|
||||
|
15
ceph/AUTHORS
15
ceph/AUTHORS
@ -1,17 +1,14 @@
|
||||
Maintainer
|
||||
----------
|
||||
|
||||
Sage Weil <sage@redhat.com>
|
||||
|
||||
|
||||
Component Technical Leads
|
||||
-------------------------
|
||||
|
||||
Core, RADOS - Josh Durgin <jdurgin@redhat.com>
|
||||
RBD - Jason Dillaman <dillaman@redhat.com>
|
||||
RBD (kernel) - Ilya Dryomov <idryomov@redhat.com>
|
||||
RGW - Yehuda Sadeh <yehuda@redhat.com>
|
||||
Matt Benjamin <mbenjamin@redhat.com>
|
||||
Matt Benjamin <mbenjami@redhat.com>
|
||||
CephFS - John Spray <jspray@redhat.com>
|
||||
CephFS (kernel) - Yan, Zheng <zyan@redhat.com>
|
||||
Deployment - Alfredo Deza <adeza@redhat.com>
|
||||
@ -19,10 +16,18 @@ Teuthology - Zack Cerza <zack@redhat.com>
|
||||
Calamari - Gregory Meno <gmeno@redhat.com>
|
||||
Chef Cookbook - Guilhem Lettron <guilhem@lettron.fr>
|
||||
|
||||
Release Manager
|
||||
---------------
|
||||
Abhishek Lekshmanan <abhishek@suse.com>
|
||||
|
||||
Backport Team
|
||||
-------------
|
||||
Nathan Cutler <ncutler@suse.cz>
|
||||
Shinobu Kinjo <shinobu@redhat.com>
|
||||
Abhishek Lekshmanan <abhishek@suse.com>
|
||||
|
||||
Contributors
|
||||
------------
|
||||
|
||||
For a complete contributor list:
|
||||
|
||||
git shortlog -sn
|
||||
|
@ -1,7 +1,7 @@
|
||||
cmake_minimum_required(VERSION 2.8.11)
|
||||
|
||||
project(ceph)
|
||||
set(VERSION 12.0.3)
|
||||
set(VERSION 12.1.0)
|
||||
|
||||
if(POLICY CMP0046)
|
||||
# Tweak policies (this one disables "missing" dependency warning)
|
||||
@ -22,7 +22,15 @@ endif()
|
||||
if(POLICY CMP0065)
|
||||
cmake_policy(SET CMP0065 NEW)
|
||||
endif()
|
||||
|
||||
if(POLICY CMP0051)
|
||||
# cmake 3.1 and higher include generator expressions in SOURCES property.
|
||||
# in BuildBoost.cmake, get_target_property(<var> <target> SOURCES) is used
|
||||
# to retrieve the source files of a target. in that case, we are only
|
||||
# interested in the *source* files. and i don't want to bother stripping off
|
||||
# the TARGET_OBJECTS elements from the returned SOURCES. so let's stick with
|
||||
# the old behavior now.
|
||||
cmake_policy(SET CMP0051 OLD)
|
||||
endif()
|
||||
list(APPEND CMAKE_MODULE_PATH "${CMAKE_SOURCE_DIR}/cmake/modules/")
|
||||
|
||||
if(CMAKE_SYSTEM_NAME MATCHES "Linux")
|
||||
@ -105,11 +113,10 @@ CHECK_FUNCTION_EXISTS(pthread_set_name_np HAVE_PTHREAD_SET_NAME_NP)
|
||||
CHECK_FUNCTION_EXISTS(pthread_setname_np HAVE_PTHREAD_SETNAME_NP)
|
||||
CHECK_FUNCTION_EXISTS(pthread_getname_np HAVE_PTHREAD_GETNAME_NP)
|
||||
CHECK_FUNCTION_EXISTS(eventfd HAVE_EVENTFD)
|
||||
CHECK_FUNCTION_EXISTS(getprogname HAVE_GETPROGNAME)
|
||||
|
||||
CHECK_INCLUDE_FILES("inttypes.h" HAVE_INTTYPES_H)
|
||||
CHECK_INCLUDE_FILES("linux/types.h" HAVE_LINUX_TYPES_H)
|
||||
CHECK_INCLUDE_FILES("linux/version.h" HAVE_LINUX_VERSION_H)
|
||||
CHECK_INCLUDE_FILES("stdint.h" HAVE_STDINT_H)
|
||||
CHECK_INCLUDE_FILES("arpa/nameser_compat.h" HAVE_ARPA_NAMESER_COMPAT_H)
|
||||
CHECK_INCLUDE_FILES("sys/mount.h" HAVE_SYS_MOUNT_H)
|
||||
CHECK_INCLUDE_FILES("sys/param.h" HAVE_SYS_PARAM_H)
|
||||
@ -218,6 +225,12 @@ if(WITH_SPDK)
|
||||
set(HAVE_SPDK TRUE)
|
||||
endif(WITH_SPDK)
|
||||
|
||||
option(WITH_PMEM "Enable PMEM" OFF)
|
||||
if(WITH_PMEM)
|
||||
find_package(pmem REQUIRED)
|
||||
set(HAVE_PMEM ${PMEM_FOUND})
|
||||
endif(WITH_PMEM)
|
||||
|
||||
# needs mds and? XXX
|
||||
option(WITH_LIBCEPHFS "libcephfs client library" ON)
|
||||
|
||||
@ -251,14 +264,14 @@ if(WITH_LEVELDB)
|
||||
find_file(HAVE_LEVELDB_FILTER_POLICY leveldb/filter_policy.h PATHS ${LEVELDB_INCLUDE_DIR})
|
||||
endif(WITH_LEVELDB)
|
||||
|
||||
find_package(atomic_ops REQUIRED)
|
||||
message(STATUS "${ATOMIC_OPS_LIBRARIES}")
|
||||
if(NOT ${ATOMIC_OPS_FOUND})
|
||||
set(NO_ATOMIC_OPS 1)
|
||||
endif(NOT ${ATOMIC_OPS_FOUND})
|
||||
|
||||
find_package(snappy REQUIRED)
|
||||
|
||||
option(WITH_LZ4 "LZ4 compression support" OFF)
|
||||
if(WITH_LZ4)
|
||||
find_package(LZ4 REQUIRED)
|
||||
set(HAVE_LZ4 ${LZ4_FOUND})
|
||||
endif(WITH_LZ4)
|
||||
|
||||
#if allocator is set on command line make sure it matches below strings
|
||||
if(ALLOCATOR)
|
||||
if(${ALLOCATOR} MATCHES "tcmalloc(_minimal)?")
|
||||
@ -345,7 +358,7 @@ endif(WITH_BLKIN)
|
||||
|
||||
#option for RGW
|
||||
option(WITH_RADOSGW "Rados Gateway is enabled" ON)
|
||||
option(WITH_RADOSGW_FCGI_FRONTEND "Rados Gateway's FCGI frontend is enabled" ON)
|
||||
option(WITH_RADOSGW_FCGI_FRONTEND "Rados Gateway's FCGI frontend is enabled" OFF)
|
||||
option(WITH_RADOSGW_BEAST_FRONTEND "Rados Gateway's Beast frontend is enabled" ON)
|
||||
if(WITH_RADOSGW)
|
||||
find_package(EXPAT REQUIRED)
|
||||
@ -437,6 +450,8 @@ if(${WITH_LTTNG})
|
||||
endif()
|
||||
endif(${WITH_LTTNG})
|
||||
|
||||
option(WITH_OSD_INSTRUMENT_FUNCTIONS OFF)
|
||||
|
||||
#option for Babeltrace
|
||||
option(HAVE_BABELTRACE "Babeltrace libraries are enabled" ON)
|
||||
if(${HAVE_BABELTRACE})
|
||||
@ -513,90 +528,42 @@ endif()
|
||||
# Boost
|
||||
option(WITH_SYSTEM_BOOST "require and build with system Boost" OFF)
|
||||
|
||||
# Boost::thread depends on Boost::atomic, so list it explicitly.
|
||||
set(BOOST_COMPONENTS
|
||||
container thread system regex random program_options date_time iostreams coroutine context)
|
||||
atomic thread system regex random program_options date_time iostreams
|
||||
filesystem)
|
||||
set(BOOST_HEADER_COMPONENTS container)
|
||||
|
||||
if(WITH_MGR)
|
||||
list(APPEND BOOST_COMPONENTS python)
|
||||
endif()
|
||||
|
||||
if (WITH_SYSTEM_BOOST)
|
||||
if(WITH_RADOSGW_BEAST_FRONTEND)
|
||||
list(APPEND BOOST_COMPONENTS coroutine context)
|
||||
endif()
|
||||
|
||||
set(Boost_USE_MULTITHREADED ON)
|
||||
# require minimally the bundled version
|
||||
if(WITH_SYSTEM_BOOST)
|
||||
if(ENABLE_SHARED)
|
||||
set(Boost_USE_STATIC_LIBS OFF)
|
||||
else()
|
||||
set(Boost_USE_STATIC_LIBS ON)
|
||||
endif()
|
||||
find_package(Boost 1.61 COMPONENTS ${BOOST_COMPONENTS} REQUIRED)
|
||||
else()
|
||||
set(BOOST_CFLAGS "-fPIC -w") # check on arm, etc <---XXX
|
||||
set(BOOST_J 1 CACHE STRING
|
||||
"max jobs for Boost build") # override w/-DBOOST_J=<n>
|
||||
message(STATUS "BUILDING Boost Libraries at j ${BOOST_J}")
|
||||
# 1. prep w/required components
|
||||
set(BOOST_SOURCE_DIR "${PROJECT_SOURCE_DIR}/src/boost")
|
||||
set(BOOST_PREFIX "${PROJECT_BINARY_DIR}/boost")
|
||||
set(BOOST_BUILD "${PROJECT_BINARY_DIR}/boost-build")
|
||||
string(REPLACE ";" "," BOOST_WITH_LIBS "${BOOST_COMPONENTS}")
|
||||
execute_process(COMMAND "./bootstrap.sh"
|
||||
"--prefix=${BOOST_PREFIX}"
|
||||
"--with-libraries=${BOOST_WITH_LIBS}"
|
||||
WORKING_DIRECTORY ${BOOST_SOURCE_DIR})
|
||||
set(BOOST_ROOT "${BOOST_PREFIX}")
|
||||
set(b2 ./b2
|
||||
--build-dir=${BOOST_BUILD} -j${BOOST_J})
|
||||
if(CMAKE_VERBOSE_MAKEFILE)
|
||||
list(APPEND b2 -d1)
|
||||
else()
|
||||
list(APPEND b2 -d0)
|
||||
endif()
|
||||
list(APPEND b2
|
||||
variant=release link=static threading=multi cxxflags=${BOOST_CFLAGS})
|
||||
if(NOT CMAKE_HOST_SYSTEM_PROCESSOR STREQUAL CMAKE_SYSTEM_PROCESSOR)
|
||||
# we are crosscompiling
|
||||
if(CMAKE_CXX_COMPILER_ID STREQUAL GNU)
|
||||
set(b2_cc gcc)
|
||||
elseif(CMAKE_CXX_COMPILER_ID STREQUAL Clang)
|
||||
set(b2_cc clang)
|
||||
else()
|
||||
message(SEND_ERROR "unknown compiler: ${CMAKE_CXX_COMPILER_ID}")
|
||||
endif()
|
||||
# edit the config.jam so, b2 will be able to use the specified toolset
|
||||
execute_process(
|
||||
COMMAND
|
||||
sed -i
|
||||
"s|using ${b2_cc} ;|using ${b2_cc} : ${CMAKE_SYSTEM_PROCESSOR} : ${CMAKE_CXX_COMPILER} ;|"
|
||||
${PROJECT_SOURCE_DIR}/src/boost/project-config.jam)
|
||||
# use ${CMAKE_SYSTEM_PROCESSOR} as the version identifier of compiler
|
||||
list(APPEND b2 toolset=${b2_cc}-${CMAKE_SYSTEM_PROCESSOR})
|
||||
endif()
|
||||
# 2. install headers
|
||||
execute_process(COMMAND
|
||||
${b2}
|
||||
headers
|
||||
WORKING_DIRECTORY ${BOOST_SOURCE_DIR})
|
||||
# 3. build and install libs
|
||||
execute_process(COMMAND
|
||||
${b2}
|
||||
install
|
||||
WORKING_DIRECTORY ${BOOST_SOURCE_DIR})
|
||||
# 4. set hints for FindBoost.cmake
|
||||
set(Boost_USE_STATIC_LIBS ON)
|
||||
set(Boost_NO_SYSTEM_PATHS ON)
|
||||
include_directories(BEFORE ${BOOST_PREFIX}/include)
|
||||
# fixup for CheckIncludeFileCXX
|
||||
set(HAVE_BOOST_ASIO_COROUTINE ON)
|
||||
|
||||
set(BOOST_ROOT ${BOOST_PREFIX})
|
||||
set(Boost_NO_SYSTEM_PATHS ON)
|
||||
include(BuildBoost)
|
||||
build_boost(1.63
|
||||
COMPONENTS ${BOOST_COMPONENTS} ${BOOST_HEADER_COMPONENTS})
|
||||
include_directories(BEFORE SYSTEM ${Boost_INCLUDE_DIRS})
|
||||
endif()
|
||||
|
||||
set(Boost_USE_MULTITHREADED ON)
|
||||
|
||||
# require minimally the bundled version
|
||||
find_package(Boost 1.61 COMPONENTS ${BOOST_COMPONENTS} REQUIRED)
|
||||
include_directories(SYSTEM ${Boost_INCLUDE_DIRS})
|
||||
include_directories(SYSTEM ${PROJECT_BINARY_DIR}/include)
|
||||
|
||||
CHECK_INCLUDE_FILE_CXX("boost/asio/coroutine.hpp" HAVE_BOOST_ASIO_COROUTINE)
|
||||
|
||||
find_package(Threads REQUIRED)
|
||||
|
||||
option(WITH_SELINUX "build SELinux policy" OFF)
|
||||
@ -636,5 +603,5 @@ add_tags(ctags
|
||||
SRC_DIR src
|
||||
TAG_FILE tags
|
||||
EXCLUDE_OPTS ${CTAG_EXCLUDES}
|
||||
EXCLUDES "*.js")
|
||||
EXCLUDES "*.js" "*.css")
|
||||
add_custom_target(tags DEPENDS ctags)
|
||||
|
@ -30,10 +30,6 @@ Files: src/common/bloom_filter.hpp
|
||||
Copyright: Copyright (C) 2000 Arash Partow <arash@partow.net>
|
||||
License: Boost Software License, Version 1.0
|
||||
|
||||
Files: m4/acx_pthread.m4
|
||||
Copyright: Steven G. Johnson <stevenj@alum.mit.edu>
|
||||
License: GPLWithACException
|
||||
|
||||
Files: src/common/crc32c_intel*:
|
||||
Copyright:
|
||||
Copyright 2012-2013 Intel Corporation All Rights Reserved.
|
||||
|
@ -1,36 +1,7 @@
|
||||
>= 12.0.0
|
||||
------
|
||||
* The original librados rados_objects_list_open (C) and objects_begin
|
||||
(C++) object listing API, deprecated in Hammer, has finally been
|
||||
removed. Users of this interface must update their software to use
|
||||
either the rados_nobjects_list_open (C) and nobjects_begin (C++) API or
|
||||
the new rados_object_list_begin (C) and object_list_begin (C++) API
|
||||
before updating the client-side librados library to Luminous.
|
||||
|
||||
Object enumeration (via any API) with the latest librados version
|
||||
and pre-Hammer OSDs is no longer supported. Note that no in-tree
|
||||
Ceph services rely on object enumeration via the deprecated APIs, so
|
||||
only external librados users might be affected.
|
||||
|
||||
The newest (and recommended) rados_object_list_begin (C) and
|
||||
object_list_begin (C++) API is only usable on clusters with the
|
||||
SORTBITWISE flag enabled (Jewel and later). (Note that this flag is
|
||||
required to be set before upgrading beyond Jewel.)
|
||||
* CephFS clients without the 'p' flag in their authentication capability
|
||||
string will no longer be able to set quotas or any layout fields. This
|
||||
flag previously only restricted modification of the pool and namespace
|
||||
fields in layouts.
|
||||
* CephFS directory fragmentation (large directory support) is enabled
|
||||
by default on new filesystems. To enable it on existing filesystems
|
||||
use "ceph fs set <fs_name> allow_dirfrags".
|
||||
* CephFS will generate a health warning if you have fewer standby daemons
|
||||
than it thinks you wanted. By default this will be 1 if you ever had
|
||||
a standby, and 0 if you did not. You can customize this using
|
||||
``ceph fs set <fs> standby_count_wanted <number>``. Setting it
|
||||
to zero will effectively disable the health check.
|
||||
* The "ceph mds tell ..." command has been removed. It is superceded
|
||||
by "ceph tell mds.<id> ..."
|
||||
* The "journaler allow split entries" config setting has been removed.
|
||||
* The 'apply' mode of cephfs-journal-tool has been removed
|
||||
|
||||
12.0.0
|
||||
------
|
||||
@ -121,3 +92,93 @@
|
||||
* The 'rados df' JSON output now prints numeric values as numbers instead of
|
||||
strings.
|
||||
|
||||
* There was a bug introduced in Jewel (#19119) that broke the mapping behavior
|
||||
when an "out" OSD that still existed in the CRUSH map was removed with 'osd rm'.
|
||||
This could result in 'misdirected op' and other errors. The bug is now fixed,
|
||||
but the fix itself introduces the same risk because the behavior may vary between
|
||||
clients and OSDs. To avoid problems, please ensure that all OSDs are removed
|
||||
from the CRUSH map before deleting them. That is, be sure to do::
|
||||
|
||||
ceph osd crush rm osd.123
|
||||
|
||||
before::
|
||||
|
||||
ceph osd rm osd.123
|
||||
|
||||
12.0.2
|
||||
------
|
||||
|
||||
* The original librados rados_objects_list_open (C) and objects_begin
|
||||
(C++) object listing API, deprecated in Hammer, has finally been
|
||||
removed. Users of this interface must update their software to use
|
||||
either the rados_nobjects_list_open (C) and nobjects_begin (C++) API or
|
||||
the new rados_object_list_begin (C) and object_list_begin (C++) API
|
||||
before updating the client-side librados library to Luminous.
|
||||
|
||||
Object enumeration (via any API) with the latest librados version
|
||||
and pre-Hammer OSDs is no longer supported. Note that no in-tree
|
||||
Ceph services rely on object enumeration via the deprecated APIs, so
|
||||
only external librados users might be affected.
|
||||
|
||||
The newest (and recommended) rados_object_list_begin (C) and
|
||||
object_list_begin (C++) API is only usable on clusters with the
|
||||
SORTBITWISE flag enabled (Jewel and later). (Note that this flag is
|
||||
required to be set before upgrading beyond Jewel.)
|
||||
* CephFS clients without the 'p' flag in their authentication capability
|
||||
string will no longer be able to set quotas or any layout fields. This
|
||||
flag previously only restricted modification of the pool and namespace
|
||||
fields in layouts.
|
||||
* CephFS directory fragmentation (large directory support) is enabled
|
||||
by default on new filesystems. To enable it on existing filesystems
|
||||
use "ceph fs set <fs_name> allow_dirfrags".
|
||||
* CephFS will generate a health warning if you have fewer standby daemons
|
||||
than it thinks you wanted. By default this will be 1 if you ever had
|
||||
a standby, and 0 if you did not. You can customize this using
|
||||
``ceph fs set <fs> standby_count_wanted <number>``. Setting it
|
||||
to zero will effectively disable the health check.
|
||||
* The "ceph mds tell ..." command has been removed. It is superceded
|
||||
by "ceph tell mds.<id> ..."
|
||||
|
||||
12.1.0
|
||||
------
|
||||
|
||||
* The ``mon_osd_max_op_age`` option has been renamed to
|
||||
``mon_osd_warn_op_age`` (default: 32 seconds), to indicate we
|
||||
generate a warning at this age. There is also a new
|
||||
``mon_osd_err_op_age_ratio`` that is a expressed as a multitple of
|
||||
``mon_osd_warn_op_age`` (default: 128, for roughly 60 minutes) to
|
||||
control when an error is generated.
|
||||
|
||||
* The default maximum size for a single RADOS object has been reduced from
|
||||
100GB to 128MB. The 100GB limit was completely impractical in practice
|
||||
while the 128MB limit is a bit high but not unreasonable. If you have an
|
||||
application written directly to librados that is using objects larger than
|
||||
128MB you may need to adjust ``osd_max_object_size``.
|
||||
|
||||
* The semantics of the 'rados ls' and librados object listing
|
||||
operations have always been a bit confusing in that "whiteout"
|
||||
objects (which logically don't exist and will return ENOENT if you
|
||||
try to access them) are included in the results. Previously
|
||||
whiteouts only occurred in cache tier pools. In luminous, logically
|
||||
deleted but snapshotted objects now result in a whiteout object, and
|
||||
as a result they will appear in 'rados ls' results, even though
|
||||
trying to read such an object will result in ENOENT. The 'rados
|
||||
listsnaps' operation can be used in such a case to enumerate which
|
||||
snapshots are present.
|
||||
|
||||
This may seem a bit strange, but is less strange than having a
|
||||
deleted-but-snapshotted object not appear at all and be completely
|
||||
hidden from librados's ability to enumerate objects. Future
|
||||
versions of Ceph will likely include an alternative object
|
||||
enumeration interface that makes it more natural and efficient to
|
||||
enumerate all objects along with their snapshot and clone metadata.
|
||||
|
||||
* The deprecated 'crush_ruleset' property has finally been removed; please use
|
||||
'crush_rule' instead for the 'osd pool get ...' and 'osd pool set ..' commands.
|
||||
|
||||
* The 'osd pool default crush replicated ruleset' option has been
|
||||
removed and replaced by the 'osd pool default crush rule' option.
|
||||
By default it is -1, which means the mon will pick the first type
|
||||
replicated rule in the CRUSH map for replicated pools. Erasure
|
||||
coded pools have rules that are automatically created for them if they are
|
||||
not specified at pool creation time.
|
||||
|
@ -1,7 +1,7 @@
|
||||
# Contributor: John Coyle <dx9err@gmail.com>
|
||||
# Maintainer: John Coyle <dx9err@gmail.com>
|
||||
pkgname=ceph
|
||||
pkgver=12.0.3
|
||||
pkgver=12.1.0
|
||||
pkgrel=0
|
||||
pkgdesc="Ceph is a distributed object store and file system"
|
||||
pkgusers="ceph"
|
||||
@ -38,7 +38,6 @@ makedepends="
|
||||
keyutils-dev
|
||||
leveldb-dev
|
||||
libaio-dev
|
||||
libatomic_ops-dev
|
||||
libedit-dev
|
||||
libressl-dev
|
||||
libtirpc-dev
|
||||
@ -64,7 +63,7 @@ makedepends="
|
||||
xmlstarlet
|
||||
yasm
|
||||
"
|
||||
source="ceph-12.0.3.tar.bz2"
|
||||
source="ceph-12.1.0.tar.bz2"
|
||||
subpackages="
|
||||
$pkgname-base
|
||||
$pkgname-common
|
||||
@ -117,7 +116,7 @@ _sysconfdir=/etc
|
||||
_udevrulesdir=/etc/udev/rules.d
|
||||
_python_sitelib=/usr/lib/python2.7/site-packages
|
||||
|
||||
builddir=$srcdir/ceph-12.0.3
|
||||
builddir=$srcdir/ceph-12.1.0
|
||||
|
||||
build() {
|
||||
export CEPH_BUILD_VIRTUALENV=$builddir
|
||||
@ -167,7 +166,6 @@ package() {
|
||||
|| return 1
|
||||
|
||||
chmod 0644 $pkgdir$_docdir/ceph/sample.ceph.conf || return 1
|
||||
chmod 0644 $pkgdir$_docdir/ceph/sample.fetch_config || return 1
|
||||
|
||||
# udev rules
|
||||
install -m 0644 -D udev/50-rbd.rules $pkgdir$_udevrulesdir/50-rbd.rules || return 1
|
||||
@ -193,7 +191,6 @@ base() {
|
||||
xfsprogs
|
||||
"
|
||||
|
||||
_pkg $_docdir/ceph sample.ceph.conf sample.fetch_config
|
||||
_pkg $_bindir crushtool monmaptool osdmaptool ceph-run ceph-detect-init
|
||||
_pkg $_sbindir ceph-create-keys mount.ceph
|
||||
_pkg $_libexecdir/ceph ceph_common.sh
|
||||
@ -292,7 +289,7 @@ radosgw() {
|
||||
pkgdesc="Rados REST gateway which implements Amazon's S3 and OpenStack's Swift APIs."
|
||||
depends="ceph-common"
|
||||
|
||||
_pkg $_bindir radosgw radosgw-admin radosgw-token radosgw-object-expirer
|
||||
_pkg $_bindir radosgw radosgw-admin radosgw-token radosgw-es radosgw-object-expirer
|
||||
mkdir -p $subpkgdir$_localstatedir/lib/ceph/radosgw
|
||||
}
|
||||
|
||||
@ -301,7 +298,7 @@ osd() {
|
||||
depends="ceph-base parted gptfdisk"
|
||||
|
||||
_pkg $_bindir ceph-clsinfo ceph-bluefs-tool ceph-objectstore-tool ceph-osd
|
||||
_pkg $_sbindir ceph-disk ceph-disk-udev
|
||||
_pkg $_sbindir ceph-disk
|
||||
_pkg $_libexecdir/ceph ceph-osd-prestart.sh
|
||||
_pkg $_udevrulesdir 60-ceph-by-parttypeuuid.rules 95-ceph-osd.rules
|
||||
install -m 750 -o $_ceph_uid -g $_ceph_gid -d \
|
||||
|
@ -38,7 +38,6 @@ makedepends="
|
||||
keyutils-dev
|
||||
leveldb-dev
|
||||
libaio-dev
|
||||
libatomic_ops-dev
|
||||
libedit-dev
|
||||
libressl-dev
|
||||
libtirpc-dev
|
||||
@ -167,7 +166,6 @@ package() {
|
||||
|| return 1
|
||||
|
||||
chmod 0644 $pkgdir$_docdir/ceph/sample.ceph.conf || return 1
|
||||
chmod 0644 $pkgdir$_docdir/ceph/sample.fetch_config || return 1
|
||||
|
||||
# udev rules
|
||||
install -m 0644 -D udev/50-rbd.rules $pkgdir$_udevrulesdir/50-rbd.rules || return 1
|
||||
@ -193,7 +191,6 @@ base() {
|
||||
xfsprogs
|
||||
"
|
||||
|
||||
_pkg $_docdir/ceph sample.ceph.conf sample.fetch_config
|
||||
_pkg $_bindir crushtool monmaptool osdmaptool ceph-run ceph-detect-init
|
||||
_pkg $_sbindir ceph-create-keys mount.ceph
|
||||
_pkg $_libexecdir/ceph ceph_common.sh
|
||||
@ -292,7 +289,7 @@ radosgw() {
|
||||
pkgdesc="Rados REST gateway which implements Amazon's S3 and OpenStack's Swift APIs."
|
||||
depends="ceph-common"
|
||||
|
||||
_pkg $_bindir radosgw radosgw-admin radosgw-token radosgw-object-expirer
|
||||
_pkg $_bindir radosgw radosgw-admin radosgw-token radosgw-es radosgw-object-expirer
|
||||
mkdir -p $subpkgdir$_localstatedir/lib/ceph/radosgw
|
||||
}
|
||||
|
||||
@ -301,7 +298,7 @@ osd() {
|
||||
depends="ceph-base parted gptfdisk"
|
||||
|
||||
_pkg $_bindir ceph-clsinfo ceph-bluefs-tool ceph-objectstore-tool ceph-osd
|
||||
_pkg $_sbindir ceph-disk ceph-disk-udev
|
||||
_pkg $_sbindir ceph-disk
|
||||
_pkg $_libexecdir/ceph ceph-osd-prestart.sh
|
||||
_pkg $_udevrulesdir 60-ceph-by-parttypeuuid.rules 95-ceph-osd.rules
|
||||
install -m 750 -o $_ceph_uid -g $_ceph_gid -d \
|
||||
|
380
ceph/ceph.spec
380
ceph/ceph.spec
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
184
ceph/cmake/modules/BuildBoost.cmake
Normal file
184
ceph/cmake/modules/BuildBoost.cmake
Normal file
@ -0,0 +1,184 @@
|
||||
# This module builds Boost
|
||||
# executables are. It sets the following variables:
|
||||
#
|
||||
# Boost_FOUND : boolean - system has Boost
|
||||
# Boost_LIBRARIES : list(filepath) - the libraries needed to use Boost
|
||||
# Boost_INCLUDE_DIRS : list(path) - the Boost include directories
|
||||
#
|
||||
# Following hints are respected
|
||||
#
|
||||
# Boost_USE_STATIC_LIBS : boolean (default: OFF)
|
||||
# Boost_USE_MULTITHREADED : boolean (default: OFF)
|
||||
# BOOST_J: integer (defanult 1)
|
||||
|
||||
function(do_build_boost version)
|
||||
cmake_parse_arguments(Boost_BUILD "" "" COMPONENTS ${ARGN})
|
||||
set(boost_features "variant=release")
|
||||
if(Boost_USE_MULTITHREADED)
|
||||
list(APPEND boost_features "threading=multi")
|
||||
else()
|
||||
list(APPEND boost_features "threading=single")
|
||||
endif()
|
||||
if(Boost_USE_STATIC_LIBS)
|
||||
list(APPEND boost_features "link=static")
|
||||
else()
|
||||
list(APPEND boost_features "link=shared")
|
||||
endif()
|
||||
if(CMAKE_SIZEOF_VOID_P EQUAL 8)
|
||||
list(APPEND boost_features "address-model=64")
|
||||
else()
|
||||
list(APPEND boost_features "address-model=32")
|
||||
endif()
|
||||
set(BOOST_CXXFLAGS "-fPIC -w") # check on arm, etc <---XXX
|
||||
list(APPEND boost_features "cxxflags=${BOOST_CXXFLAGS}")
|
||||
|
||||
string(REPLACE ";" "," boost_with_libs "${Boost_BUILD_COMPONENTS}")
|
||||
# build b2 and prepare the project-config.jam for boost
|
||||
set(configure_command
|
||||
./bootstrap.sh --prefix=<INSTALL_DIR>
|
||||
--with-libraries=${boost_with_libs})
|
||||
|
||||
set(b2 ./b2)
|
||||
if(BOOST_J)
|
||||
message(STATUS "BUILDING Boost Libraries at j ${BOOST_J}")
|
||||
list(APPEND b2 -j${BOOST_J})
|
||||
endif()
|
||||
if(CMAKE_VERBOSE_MAKEFILE)
|
||||
list(APPEND b2 -d1)
|
||||
else()
|
||||
list(APPEND b2 -d0)
|
||||
endif()
|
||||
|
||||
if(NOT CMAKE_HOST_SYSTEM_PROCESSOR STREQUAL CMAKE_SYSTEM_PROCESSOR)
|
||||
# we are crosscompiling
|
||||
if(CMAKE_CXX_COMPILER_ID STREQUAL GNU)
|
||||
set(b2_cc gcc)
|
||||
elseif(CMAKE_CXX_COMPILER_ID STREQUAL Clang)
|
||||
set(b2_cc clang)
|
||||
else()
|
||||
message(SEND_ERROR "unknown compiler: ${CMAKE_CXX_COMPILER_ID}")
|
||||
endif()
|
||||
# edit the config.jam so, b2 will be able to use the specified toolset
|
||||
execute_process(
|
||||
COMMAND
|
||||
sed -i
|
||||
"s|using ${b2_cc} ;|using ${b2_cc} : ${CMAKE_SYSTEM_PROCESSOR} : ${CMAKE_CXX_COMPILER} ;|"
|
||||
${PROJECT_SOURCE_DIR}/src/boost/project-config.jam)
|
||||
# use ${CMAKE_SYSTEM_PROCESSOR} as the version identifier of compiler
|
||||
list(APPEND b2 toolset=${b2_cc}-${CMAKE_SYSTEM_PROCESSOR})
|
||||
endif()
|
||||
|
||||
set(build_command
|
||||
${b2} headers stage
|
||||
#"--buildid=ceph" # changes lib names--can omit for static
|
||||
${boost_features})
|
||||
set(install_command
|
||||
${b2} install)
|
||||
set(boost_root_dir "${CMAKE_BINARY_DIR}/boost")
|
||||
if(EXISTS "${PROJECT_SOURCE_DIR}/src/boost/libs/config/include/boost/config.hpp")
|
||||
message(STATUS "boost already in src")
|
||||
set(source_dir
|
||||
SOURCE_DIR "${PROJECT_SOURCE_DIR}/src/boost")
|
||||
elseif(version VERSION_GREATER 1.63)
|
||||
message(FATAL_ERROR "Unknown BOOST_REQUESTED_VERSION: ${version}")
|
||||
else()
|
||||
message(STATUS "boost will be downloaded from sf.net")
|
||||
set(boost_version 1.63.0)
|
||||
set(boost_md5 1c837ecd990bb022d07e7aab32b09847)
|
||||
string(REPLACE "." "_" boost_version_underscore ${boost_version} )
|
||||
set(boost_url http://downloads.sourceforge.net/project/boost/boost/${boost_version}/boost_${boost_version_underscore}.tar.bz2)
|
||||
set(source_dir
|
||||
URL ${boost_url}
|
||||
URL_MD5 ${boost_md5})
|
||||
if(CMAKE_VERSION VERSION_GREATER 3.0)
|
||||
list(APPEND source_dir DOWNLOAD_NO_PROGRESS 1)
|
||||
endif()
|
||||
endif()
|
||||
# build all components in a single shot
|
||||
include(ExternalProject)
|
||||
ExternalProject_Add(Boost
|
||||
${source_dir}
|
||||
CONFIGURE_COMMAND CC=${CMAKE_C_COMPILER} CXX=${CMAKE_CXX_COMPILER} ${configure_command}
|
||||
BUILD_COMMAND CC=${CMAKE_C_COMPILER} CXX=${CMAKE_CXX_COMPILER} ${build_command}
|
||||
BUILD_IN_SOURCE 1
|
||||
INSTALL_COMMAND ${install_command}
|
||||
PREFIX "${boost_root_dir}")
|
||||
endfunction()
|
||||
|
||||
macro(build_boost version)
|
||||
do_build_boost(version ${ARGN})
|
||||
ExternalProject_Get_Property(Boost install_dir)
|
||||
set(Boost_INCLUDE_DIRS ${install_dir}/include)
|
||||
set(Boost_INCLUDE_DIR ${install_dir}/include)
|
||||
# create the directory so cmake won't complain when looking at the imported
|
||||
# target
|
||||
file(MAKE_DIRECTORY ${Boost_INCLUDE_DIRS})
|
||||
cmake_parse_arguments(Boost_BUILD "" "" COMPONENTS ${ARGN})
|
||||
foreach(c ${Boost_BUILD_COMPONENTS})
|
||||
string(TOUPPER ${c} upper_c)
|
||||
if(Boost_USE_STATIC_LIBS)
|
||||
add_library(Boost::${c} STATIC IMPORTED)
|
||||
else()
|
||||
add_library(Boost::${c} SHARED IMPORTED)
|
||||
endif()
|
||||
add_dependencies(Boost::${c} Boost)
|
||||
if(Boost_USE_STATIC_LIBS)
|
||||
set(Boost_${upper_c}_LIBRARY
|
||||
${install_dir}/lib/${CMAKE_STATIC_LIBRARY_PREFIX}boost_${c}${CMAKE_STATIC_LIBRARY_SUFFIX})
|
||||
else()
|
||||
set(Boost_${upper_c}_LIBRARY
|
||||
${install_dir}/lib/${CMAKE_SHARED_LIBRARY_PREFIX}boost_${c}${CMAKE_SHARED_LIBRARY_SUFFIX})
|
||||
endif()
|
||||
set_target_properties(Boost::${c} PROPERTIES
|
||||
INTERFACE_INCLUDE_DIRECTORIES "${Boost_INCLUDE_DIRS}"
|
||||
IMPORTED_LINK_INTERFACE_LANGUAGES "CXX"
|
||||
IMPORTED_LOCATION "${Boost_${upper_c}_LIBRARY}")
|
||||
list(APPEND Boost_LIBRARIES ${Boost_${upper_c}_LIBRARY})
|
||||
endforeach()
|
||||
|
||||
# for header-only libraries
|
||||
if(CMAKE_VERSION VERSION_LESS 3.3)
|
||||
# only ALIAS and INTERFACE target names allow ":" in it, but
|
||||
# INTERFACE library is not allowed until cmake 3.1
|
||||
add_custom_target(Boost.boost DEPENDS Boost)
|
||||
else()
|
||||
add_library(Boost.boost INTERFACE IMPORTED)
|
||||
set_target_properties(Boost.boost PROPERTIES
|
||||
INTERFACE_INCLUDE_DIRECTORIES "${Boost_INCLUDE_DIRS}")
|
||||
add_dependencies(Boost.boost Boost)
|
||||
endif()
|
||||
find_package_handle_standard_args(Boost DEFAULT_MSG
|
||||
Boost_INCLUDE_DIRS Boost_LIBRARIES)
|
||||
mark_as_advanced(Boost_LIBRARIES BOOST_INCLUDE_DIRS)
|
||||
endmacro()
|
||||
|
||||
function(maybe_add_boost_dep target)
|
||||
get_target_property(imported ${target} IMPORTED)
|
||||
if(imported)
|
||||
return()
|
||||
endif()
|
||||
get_target_property(type ${target} TYPE)
|
||||
if(NOT type MATCHES "OBJECT_LIBRARY|STATIC_LIBRARY|SHARED_LIBRARY|EXECUTABLE")
|
||||
return()
|
||||
endif()
|
||||
get_target_property(sources ${target} SOURCES)
|
||||
foreach(src ${sources})
|
||||
get_filename_component(ext ${src} EXT)
|
||||
# assuming all cxx source files include boost header(s)
|
||||
if(ext MATCHES ".cc|.cpp|.cxx")
|
||||
add_dependencies(${target} Boost.boost)
|
||||
return()
|
||||
endif()
|
||||
endforeach()
|
||||
endfunction()
|
||||
|
||||
# override add_library() to add Boost headers dependency
|
||||
function(add_library target)
|
||||
_add_library(${target} ${ARGN})
|
||||
maybe_add_boost_dep(${target})
|
||||
endfunction()
|
||||
|
||||
function(add_executable target)
|
||||
_add_executable(${target} ${ARGN})
|
||||
maybe_add_boost_dep(${target})
|
||||
endfunction()
|
@ -1,28 +0,0 @@
|
||||
# - Find atomic_ops
|
||||
# Find the native ATOMIC_OPS headers and libraries.
|
||||
#
|
||||
# ATOMIC_OPS_INCLUDE_DIRS - where to find atomic_ops.h, etc.
|
||||
# ATOMIC_OPS_LIBRARIES - List of libraries when using atomic_ops.
|
||||
# ATOMIC_OPS_FOUND - True if atomic_ops found.
|
||||
|
||||
# Look for the header file.
|
||||
FIND_PATH(ATOMIC_OPS_INCLUDE_DIR NAMES atomic_ops.h)
|
||||
|
||||
# Look for the library.
|
||||
FIND_LIBRARY(ATOMIC_OPS_LIBRARY NAMES atomic_ops)
|
||||
|
||||
# handle the QUIETLY and REQUIRED arguments and set ATOMIC_OPS_FOUND to TRUE if
|
||||
# all listed variables are TRUE
|
||||
INCLUDE(FindPackageHandleStandardArgs)
|
||||
FIND_PACKAGE_HANDLE_STANDARD_ARGS(atomic_ops DEFAULT_MSG ATOMIC_OPS_LIBRARY ATOMIC_OPS_INCLUDE_DIR)
|
||||
|
||||
# Copy the results to the output variables.
|
||||
IF(ATOMIC_OPS_FOUND)
|
||||
SET(ATOMIC_OPS_LIBRARIES ${ATOMIC_OPS_LIBRARY})
|
||||
SET(ATOMIC_OPS_INCLUDE_DIRS ${ATOMIC_OPS_INCLUDE_DIR})
|
||||
ELSE(ATOMIC_OPS_FOUND)
|
||||
SET(ATOMIC_OPS_LIBRARIES)
|
||||
SET(ATOMIC_OPS_INCLUDE_DIRS)
|
||||
ENDIF(ATOMIC_OPS_FOUND)
|
||||
|
||||
MARK_AS_ADVANCED(ATOMIC_OPS_INCLUDE_DIR ATOMIC_OPS_LIBRARY)
|
15
ceph/cmake/modules/Findpmem.cmake
Normal file
15
ceph/cmake/modules/Findpmem.cmake
Normal file
@ -0,0 +1,15 @@
|
||||
# Try to find libpmem
|
||||
#
|
||||
# Once done, this will define
|
||||
#
|
||||
# PMEM_FOUND
|
||||
# PMEM_INCLUDE_DIR
|
||||
# PMEM_LIBRARY
|
||||
|
||||
find_path(PMEM_INCLUDE_DIR NAMES libpmem.h)
|
||||
find_library(PMEM_LIBRARY NAMES pmem)
|
||||
|
||||
include(FindPackageHandleStandardArgs)
|
||||
find_package_handle_standard_args(pmem DEFAULT_MSG PMEM_LIBRARY PMEM_INCLUDE_DIR)
|
||||
|
||||
mark_as_advanced(PMEM_INCLUDE_DIR PMEM_LIBRARY)
|
@ -75,34 +75,38 @@ elseif(CMAKE_SYSTEM_PROCESSOR MATCHES "arm|ARM")
|
||||
|
||||
elseif(CMAKE_SYSTEM_PROCESSOR MATCHES "i386|i686|amd64|x86_64|AMD64")
|
||||
set(HAVE_INTEL 1)
|
||||
CHECK_C_COMPILER_FLAG(-msse HAVE_INTEL_SSE)
|
||||
if(HAVE_INTEL_SSE)
|
||||
set(SIMD_COMPILE_FLAGS "${SIMD_COMPILE_FLAGS} -msse")
|
||||
endif()
|
||||
CHECK_C_COMPILER_FLAG(-msse2 HAVE_INTEL_SSE2)
|
||||
if(HAVE_INTEL_SSE2)
|
||||
set(SIMD_COMPILE_FLAGS "${SIMD_COMPILE_FLAGS} -msse2")
|
||||
endif()
|
||||
CHECK_C_COMPILER_FLAG(-msse3 HAVE_INTEL_SSE3)
|
||||
if(HAVE_INTEL_SSE3)
|
||||
set(SIMD_COMPILE_FLAGS "${SIMD_COMPILE_FLAGS} -msse3")
|
||||
endif()
|
||||
CHECK_C_COMPILER_FLAG(-mssse3 HAVE_INTEL_SSSE3)
|
||||
if(HAVE_INTEL_SSSE3)
|
||||
set(SIMD_COMPILE_FLAGS "${SIMD_COMPILE_FLAGS} -mssse3")
|
||||
endif()
|
||||
CHECK_C_COMPILER_FLAG(-mpclmul HAVE_INTEL_PCLMUL)
|
||||
if(HAVE_INTEL_PCLMUL)
|
||||
set(SIMD_COMPILE_FLAGS "${SIMD_COMPILE_FLAGS} -mpclmul")
|
||||
endif()
|
||||
CHECK_C_COMPILER_FLAG(-msse4.1 HAVE_INTEL_SSE4_1)
|
||||
if(HAVE_INTEL_SSE4_1)
|
||||
set(SIMD_COMPILE_FLAGS "${SIMD_COMPILE_FLAGS} -msse4.1")
|
||||
endif()
|
||||
CHECK_C_COMPILER_FLAG(-msse4.2 HAVE_INTEL_SSE4_2)
|
||||
if(HAVE_INTEL_SSE4_2)
|
||||
set(SIMD_COMPILE_FLAGS "${SIMD_COMPILE_FLAGS} -msse4.2")
|
||||
endif()
|
||||
if(CMAKE_SYSTEM_PROCESSOR MATCHES "i686|amd64|x86_64|AMD64")
|
||||
CHECK_C_COMPILER_FLAG(-msse HAVE_INTEL_SSE)
|
||||
if(HAVE_INTEL_SSE)
|
||||
set(SIMD_COMPILE_FLAGS "${SIMD_COMPILE_FLAGS} -msse")
|
||||
endif()
|
||||
if(CMAKE_SYSTEM_PROCESSOR MATCHES "amd64|x86_64|AMD64")
|
||||
CHECK_C_COMPILER_FLAG(-msse2 HAVE_INTEL_SSE2)
|
||||
if(HAVE_INTEL_SSE2)
|
||||
set(SIMD_COMPILE_FLAGS "${SIMD_COMPILE_FLAGS} -msse2")
|
||||
endif()
|
||||
CHECK_C_COMPILER_FLAG(-msse3 HAVE_INTEL_SSE3)
|
||||
if(HAVE_INTEL_SSE3)
|
||||
set(SIMD_COMPILE_FLAGS "${SIMD_COMPILE_FLAGS} -msse3")
|
||||
endif()
|
||||
CHECK_C_COMPILER_FLAG(-mssse3 HAVE_INTEL_SSSE3)
|
||||
if(HAVE_INTEL_SSSE3)
|
||||
set(SIMD_COMPILE_FLAGS "${SIMD_COMPILE_FLAGS} -mssse3")
|
||||
endif()
|
||||
CHECK_C_COMPILER_FLAG(-mpclmul HAVE_INTEL_PCLMUL)
|
||||
if(HAVE_INTEL_PCLMUL)
|
||||
set(SIMD_COMPILE_FLAGS "${SIMD_COMPILE_FLAGS} -mpclmul")
|
||||
endif()
|
||||
CHECK_C_COMPILER_FLAG(-msse4.1 HAVE_INTEL_SSE4_1)
|
||||
if(HAVE_INTEL_SSE4_1)
|
||||
set(SIMD_COMPILE_FLAGS "${SIMD_COMPILE_FLAGS} -msse4.1")
|
||||
endif()
|
||||
CHECK_C_COMPILER_FLAG(-msse4.2 HAVE_INTEL_SSE4_2)
|
||||
if(HAVE_INTEL_SSE4_2)
|
||||
set(SIMD_COMPILE_FLAGS "${SIMD_COMPILE_FLAGS} -msse4.2")
|
||||
endif()
|
||||
endif(CMAKE_SYSTEM_PROCESSOR MATCHES "amd64|x86_64|AMD64")
|
||||
endif(CMAKE_SYSTEM_PROCESSOR MATCHES "i686|amd64|x86_64|AMD64")
|
||||
elseif(CMAKE_SYSTEM_PROCESSOR MATCHES "(powerpc|ppc)64le")
|
||||
set(HAVE_PPC64LE 1)
|
||||
message(STATUS " we are ppc64le")
|
||||
|
@ -1,4 +1,3 @@
|
||||
etc/bash_completion.d/ceph
|
||||
etc/init.d/ceph
|
||||
usr/sbin/ceph-create-keys
|
||||
usr/bin/ceph-detect-init
|
||||
@ -11,7 +10,6 @@ usr/lib/ceph/ceph_common.sh
|
||||
usr/lib/ceph/erasure-code/*
|
||||
usr/lib/rados-classes/*
|
||||
usr/share/doc/ceph/sample.ceph.conf
|
||||
usr/share/doc/ceph/sample.fetch_config
|
||||
usr/share/man/man8/ceph-debugpack.8
|
||||
usr/share/man/man8/ceph-deploy.8
|
||||
usr/share/man/man8/ceph-run.8
|
||||
|
2
ceph/debian/ceph-base.maintscript
Normal file
2
ceph/debian/ceph-base.maintscript
Normal file
@ -0,0 +1,2 @@
|
||||
rm_conffile /etc/logrotate.d/ceph.logrotate -- "$@"
|
||||
rm_conffile /etc/logrotate.d/ceph -- "$@"
|
@ -1,5 +1,6 @@
|
||||
#! /usr/bin/dh-exec --with=install
|
||||
|
||||
etc/bash_completion.d/ceph
|
||||
etc/bash_completion.d/rados
|
||||
etc/bash_completion.d/radosgw-admin
|
||||
etc/bash_completion.d/rbd
|
||||
|
@ -1,7 +1,6 @@
|
||||
lib/udev/rules.d/95-ceph-osd.rules
|
||||
lib/udev/rules.d/60-ceph-by-parttypeuuid.rules
|
||||
usr/sbin/ceph-disk
|
||||
usr/sbin/ceph-disk-udev
|
||||
usr/bin/ceph-clsinfo
|
||||
usr/bin/ceph-objectstore-tool
|
||||
usr/bin/ceph-bluestore-tool
|
||||
|
@ -26,7 +26,5 @@ usr/bin/ceph_xattr_bench
|
||||
usr/bin/ceph-monstore-tool
|
||||
usr/bin/ceph-osdomap-tool
|
||||
usr/bin/ceph-kvstore-tool
|
||||
usr/bin/dmclock-tests
|
||||
usr/bin/dmclock-data-struct-tests
|
||||
usr/share/java/libcephfs-test.jar
|
||||
usr/lib/ceph/ceph-monstore-update-crush.sh
|
||||
|
@ -1,3 +1,9 @@
|
||||
ceph (12.1.0-1) stable; urgency=medium
|
||||
|
||||
* New upstream release
|
||||
|
||||
-- Ceph Release Team <ceph-maintainers@ceph.com> Thu, 22 Jun 2017 15:43:47 +0000
|
||||
|
||||
ceph (12.0.3-1) stable; urgency=medium
|
||||
|
||||
* New upstream release
|
||||
|
@ -9,6 +9,7 @@ Uploaders: Ken Dreyer <kdreyer@redhat.com>,
|
||||
Alfredo Deza <adeza@redhat.com>
|
||||
Build-Depends: bc,
|
||||
btrfs-tools,
|
||||
gperf,
|
||||
cmake,
|
||||
cpio,
|
||||
cryptsetup-bin | cryptsetup,
|
||||
@ -25,13 +26,11 @@ Build-Depends: bc,
|
||||
jq,
|
||||
junit4,
|
||||
libaio-dev,
|
||||
libatomic-ops-dev,
|
||||
libbabeltrace-ctf-dev,
|
||||
libbabeltrace-dev,
|
||||
libblkid-dev (>= 2.17),
|
||||
libcurl4-gnutls-dev,
|
||||
libexpat1-dev,
|
||||
libfcgi-dev,
|
||||
libfuse-dev,
|
||||
libgoogle-perftools-dev [i386 amd64 arm64],
|
||||
libibverbs-dev,
|
||||
@ -50,10 +49,13 @@ Build-Depends: bc,
|
||||
pkg-config,
|
||||
python (>= 2.7),
|
||||
python-all-dev,
|
||||
python-cherrypy3,
|
||||
python-nose,
|
||||
python-pecan,
|
||||
python-prettytable,
|
||||
python-setuptools,
|
||||
python-sphinx,
|
||||
python-werkzeug,
|
||||
python3-all-dev,
|
||||
python3-setuptools,
|
||||
virtualenv | python-virtualenv,
|
||||
@ -159,12 +161,16 @@ Description: debugging symbols for ceph-mds
|
||||
Package: ceph-mgr
|
||||
Architecture: linux-any
|
||||
Depends: ceph-base (= ${binary:Version}),
|
||||
python-openssl,
|
||||
python-pecan,
|
||||
python-werkzeug,
|
||||
${misc:Depends},
|
||||
${python:Depends},
|
||||
python-cherrypy3,
|
||||
${shlibs:Depends}
|
||||
Replaces: ceph (<< 0.93-417)
|
||||
Breaks: ceph (<< 0.93-417)
|
||||
Description: metadata server for the ceph distributed file system
|
||||
Description: manager for the ceph distributed storage system
|
||||
Ceph is a massively scalable, open-source, distributed
|
||||
storage system that runs on commodity hardware and delivers object,
|
||||
block and file system storage.
|
||||
@ -646,7 +652,7 @@ Description: debugging symbols for radosgw
|
||||
|
||||
Package: ceph-test
|
||||
Architecture: linux-any
|
||||
Depends: ceph-common, curl, xmlstarlet, ${misc:Depends}, ${shlibs:Depends}
|
||||
Depends: ceph-common, curl, xmlstarlet, jq, ${misc:Depends}, ${shlibs:Depends}
|
||||
Description: Ceph test and benchmarking tools
|
||||
This package contains tools for testing and benchmarking Ceph.
|
||||
|
||||
|
@ -33,10 +33,6 @@ Files: src/common/bloom_filter.hpp
|
||||
Copyright: Copyright (C) 2000 Arash Partow
|
||||
License: Boost Software License, Version 1.0
|
||||
|
||||
Files: m4/acx_pthread.m4
|
||||
Copyright: Steven G. Johnson <stevenj@alum.mit.edu>
|
||||
License: GPLWithACException
|
||||
|
||||
Files: src/common/crc32c_intel*:
|
||||
Copyright:
|
||||
Copyright 2012-2013 Intel Corporation All Rights Reserved.
|
||||
|
@ -1,4 +1,5 @@
|
||||
usr/bin/radosgw
|
||||
usr/bin/radosgw-token
|
||||
usr/bin/radosgw-es
|
||||
usr/bin/radosgw-object-expirer
|
||||
usr/share/man/man8/radosgw.8
|
||||
|
@ -22,6 +22,13 @@ ifeq ($(DEB_HOST_ARCH), armel)
|
||||
extraopts += -DWITH_ATOMIC_OPS=OFF
|
||||
endif
|
||||
|
||||
ifneq (,$(filter $(DEB_HOST_ARCH), arm armel armhf arm64 i386 amd64 mips mipsel powerpc ppc64))
|
||||
# beast depends on libboost_context which only support the archs above
|
||||
extraopts += -DWITH_RADOSGW_BEAST_FRONTEND=ON
|
||||
else
|
||||
extraopts += -DWITH_RADOSGW_BEAST_FRONTEND=OFF
|
||||
endif
|
||||
|
||||
%:
|
||||
dh $@ --buildsystem=cmake --with javahelper,python2,python3,systemd --parallel
|
||||
|
||||
@ -52,8 +59,8 @@ override_dh_installdocs:
|
||||
dh_installdocs -a --all ChangeLog
|
||||
|
||||
override_dh_installlogrotate:
|
||||
cp src/logrotate.conf debian/ceph-base.ceph.logrotate
|
||||
dh_installlogrotate -pceph-base --name=ceph
|
||||
cp src/logrotate.conf debian/ceph-common.logrotate
|
||||
dh_installlogrotate -pceph-common
|
||||
|
||||
override_dh_installinit:
|
||||
# dh_installinit is only set up to handle one upstart script
|
||||
|
@ -4,9 +4,16 @@ if test -e build; then
|
||||
echo 'build dir already exists; rm -rf build and re-run'
|
||||
exit 1
|
||||
fi
|
||||
|
||||
ARGS=""
|
||||
if which ccache ; then
|
||||
echo "enabling ccache"
|
||||
ARGS+="-DWITH_CCACHE=ON"
|
||||
fi
|
||||
|
||||
mkdir build
|
||||
cd build
|
||||
cmake -DBOOST_J=$(nproc) "$@" ..
|
||||
cmake -DBOOST_J=$(nproc) $ARGS "$@" ..
|
||||
|
||||
# minimal config to find plugins
|
||||
cat <<EOF > ceph.conf
|
||||
|
@ -33,14 +33,6 @@ See `librgw-py`_.
|
||||
|
||||
.. _librgw-py: ../radosgw/api
|
||||
|
||||
Calamari APIs
|
||||
=============
|
||||
|
||||
See `Calamari API`_.
|
||||
|
||||
.. _Calamari API: http://ceph.com/calamari/docs/calamari_rest/index.html
|
||||
|
||||
|
||||
Ceph Object Store APIs
|
||||
======================
|
||||
|
||||
|
@ -207,10 +207,10 @@ of failure or bottleneck when using ``cephx``. The monitor returns an
|
||||
authentication data structure similar to a Kerberos ticket that contains a
|
||||
session key for use in obtaining Ceph services. This session key is itself
|
||||
encrypted with the user's permanent secret key, so that only the user can
|
||||
request services from the Ceph monitor(s). The client then uses the session key
|
||||
request services from the Ceph Monitor(s). The client then uses the session key
|
||||
to request its desired services from the monitor, and the monitor provides the
|
||||
client with a ticket that will authenticate the client to the OSDs that actually
|
||||
handle data. Ceph monitors and OSDs share a secret, so the client can use the
|
||||
handle data. Ceph Monitors and OSDs share a secret, so the client can use the
|
||||
ticket provided by the monitor with any OSD or metadata server in the cluster.
|
||||
Like Kerberos, ``cephx`` tickets expire, so an attacker cannot use an expired
|
||||
ticket or session key obtained surreptitiously. This form of authentication will
|
||||
@ -338,7 +338,7 @@ dispatch--which is a **huge** bottleneck at the petabyte-to-exabyte scale.
|
||||
Ceph eliminates the bottleneck: Ceph's OSD Daemons AND Ceph Clients are cluster
|
||||
aware. Like Ceph clients, each Ceph OSD Daemon knows about other Ceph OSD
|
||||
Daemons in the cluster. This enables Ceph OSD Daemons to interact directly with
|
||||
other Ceph OSD Daemons and Ceph monitors. Additionally, it enables Ceph Clients
|
||||
other Ceph OSD Daemons and Ceph Monitors. Additionally, it enables Ceph Clients
|
||||
to interact directly with Ceph OSD Daemons.
|
||||
|
||||
The ability of Ceph Clients, Ceph Monitors and Ceph OSD Daemons to interact with
|
||||
@ -360,12 +360,14 @@ ability to leverage this computing power leads to several major benefits:
|
||||
Ceph Client requests. If a Ceph OSD Daemon is ``down`` and ``in`` the Ceph
|
||||
Storage Cluster, this status may indicate the failure of the Ceph OSD
|
||||
Daemon. If a Ceph OSD Daemon is not running (e.g., it crashes), the Ceph OSD
|
||||
Daemon cannot notify the Ceph Monitor that it is ``down``. The Ceph Monitor
|
||||
can ping a Ceph OSD Daemon periodically to ensure that it is running.
|
||||
However, Ceph also empowers Ceph OSD Daemons to determine if a neighboring
|
||||
OSD is ``down``, to update the cluster map and to report it to the Ceph
|
||||
monitor(s). This means that Ceph monitors can remain light weight processes.
|
||||
See `Monitoring OSDs`_ and `Heartbeats`_ for additional details.
|
||||
Daemon cannot notify the Ceph Monitor that it is ``down``. The OSDs
|
||||
periodically send messages to the Ceph Monitor (``MPGStats`` pre-luminous,
|
||||
and a new ``MOSDBeacon`` in luminous). If the Ceph Monitor doesn't see that
|
||||
message after a configurable period of time then it marks the OSD down.
|
||||
This mechanism is a failsafe, however. Normally, Ceph OSD Daemons will
|
||||
determine if a neighboring OSD is down and report it to the Ceph Monitor(s).
|
||||
This assures that Ceph Monitors are lightweight processes. See `Monitoring
|
||||
OSDs`_ and `Heartbeats`_ for additional details.
|
||||
|
||||
#. **Data Scrubbing:** As part of maintaining data consistency and cleanliness,
|
||||
Ceph OSD Daemons can scrub objects within placement groups. That is, Ceph
|
||||
@ -1579,7 +1581,7 @@ instance for high availability.
|
||||
.. _Monitoring OSDs and PGs: ../rados/operations/monitoring-osd-pg
|
||||
.. _Heartbeats: ../rados/configuration/mon-osd-interaction
|
||||
.. _Monitoring OSDs: ../rados/operations/monitoring-osd-pg/#monitoring-osds
|
||||
.. _CRUSH - Controlled, Scalable, Decentralized Placement of Replicated Data: http://ceph.com/papers/weil-crush-sc06.pdf
|
||||
.. _CRUSH - Controlled, Scalable, Decentralized Placement of Replicated Data: https://ceph.com/wp-content/uploads/2016/08/weil-crush-sc06.pdf
|
||||
.. _Data Scrubbing: ../rados/configuration/osd-config-ref#scrubbing
|
||||
.. _Report Peering Failure: ../rados/configuration/mon-osd-interaction#osds-report-peering-failure
|
||||
.. _Troubleshooting Peering Failure: ../rados/troubleshooting/troubleshooting-pg#placement-group-down-peering-failure
|
||||
|
@ -42,6 +42,38 @@ creation of multiple filesystems use ``ceph fs flag set enable_multiple true``.
|
||||
fs rm_data_pool <filesystem name> <pool name/id>
|
||||
|
||||
|
||||
Settings
|
||||
--------
|
||||
|
||||
::
|
||||
|
||||
fs set <fs name> max_file_size <size in bytes>
|
||||
|
||||
CephFS has a configurable maximum file size, and it's 1TB by default.
|
||||
You may wish to set this limit higher if you expect to store large files
|
||||
in CephFS. It is a 64-bit field.
|
||||
|
||||
Setting ``max_file_size`` to 0 does not disable the limit. It would
|
||||
simply limit clients to only creating empty files.
|
||||
|
||||
|
||||
Maximum file sizes and performance
|
||||
----------------------------------
|
||||
|
||||
CephFS enforces the maximum file size limit at the point of appending to
|
||||
files or setting their size. It does not affect how anything is stored.
|
||||
|
||||
When users create a file of an enormous size (without necessarily
|
||||
writing any data to it), some operations (such as deletes) cause the MDS
|
||||
to have to do a large number of operations to check if any of the RADOS
|
||||
objects within the range that could exist (according to the file size)
|
||||
really existed.
|
||||
|
||||
The ``max_file_size`` setting prevents users from creating files that
|
||||
appear to be eg. exabytes in size, causing load on the MDS as it tries
|
||||
to enumerate the objects during operations like stats or deletes.
|
||||
|
||||
|
||||
Daemons
|
||||
-------
|
||||
|
||||
@ -126,6 +158,11 @@ filesystem.
|
||||
Legacy
|
||||
------
|
||||
|
||||
The ``ceph mds set`` command is the deprecated version of ``ceph fs set``,
|
||||
from before there was more than one filesystem per cluster. It operates
|
||||
on whichever filesystem is marked as the default (see ``ceph fs
|
||||
set-default``.)
|
||||
|
||||
::
|
||||
|
||||
mds stat
|
||||
|
@ -1,4 +1,5 @@
|
||||
|
||||
===============================
|
||||
Ceph filesystem client eviction
|
||||
===============================
|
||||
|
||||
@ -6,114 +7,139 @@ When a filesystem client is unresponsive or otherwise misbehaving, it
|
||||
may be necessary to forcibly terminate its access to the filesystem. This
|
||||
process is called *eviction*.
|
||||
|
||||
This process is somewhat thorough in order to protect against data inconsistency
|
||||
resulting from misbehaving clients.
|
||||
Evicting a CephFS client prevents it from communicating further with MDS
|
||||
daemons and OSD daemons. If a client was doing buffered IO to the filesystem,
|
||||
any un-flushed data will be lost.
|
||||
|
||||
OSD blacklisting
|
||||
----------------
|
||||
Clients may either be evicted automatically (if they fail to communicate
|
||||
promptly with the MDS), or manually (by the system administrator).
|
||||
|
||||
First, prevent the client from performing any more data operations by *blacklisting*
|
||||
it at the RADOS level. You may be familiar with this concept as *fencing* in other
|
||||
storage systems.
|
||||
The client eviction process applies to clients of all kinds, this includes
|
||||
FUSE mounts, kernel mounts, nfs-ganesha gateways, and any process using
|
||||
libcephfs.
|
||||
|
||||
Identify the client to evict from the MDS session list:
|
||||
Automatic client eviction
|
||||
=========================
|
||||
|
||||
There are two situations in which a client may be evicted automatically:
|
||||
|
||||
On an active MDS daemon, if a client has not communicated with the MDS for
|
||||
over ``mds_session_autoclose`` seconds (300 seconds by default), then it
|
||||
will be evicted automatically.
|
||||
|
||||
During MDS startup (including on failover), the MDS passes through a
|
||||
state called ``reconnect``. During this state, it waits for all the
|
||||
clients to connect to the new MDS daemon. If any clients fail to do
|
||||
so within the time window (``mds_reconnect_timeout``, 45 seconds by default)
|
||||
then they will be evicted.
|
||||
|
||||
A warning message is sent to the cluster log if either of these situations
|
||||
arises.
|
||||
|
||||
Manual client eviction
|
||||
======================
|
||||
|
||||
Sometimes, the administrator may want to evict a client manually. This
|
||||
could happen if a client is died and the administrator does not
|
||||
want to wait for its session to time out, or it could happen if
|
||||
a client is misbehaving and the administrator does not have access to
|
||||
the client node to unmount it.
|
||||
|
||||
It is useful to inspect the list of clients first:
|
||||
|
||||
::
|
||||
|
||||
# ceph daemon mds.a session ls
|
||||
ceph tell mds.0 client ls
|
||||
|
||||
[
|
||||
{ "id": 4117,
|
||||
"num_leases": 0,
|
||||
"num_caps": 1,
|
||||
"state": "open",
|
||||
"replay_requests": 0,
|
||||
"reconnecting": false,
|
||||
"inst": "client.4117 172.16.79.251:0\/3271",
|
||||
"client_metadata": { "entity_id": "admin",
|
||||
"hostname": "fedoravm.localdomain",
|
||||
"mount_point": "\/home\/user\/mnt"}}]
|
||||
{
|
||||
"id": 4305,
|
||||
"num_leases": 0,
|
||||
"num_caps": 3,
|
||||
"state": "open",
|
||||
"replay_requests": 0,
|
||||
"completed_requests": 0,
|
||||
"reconnecting": false,
|
||||
"inst": "client.4305 172.21.9.34:0/422650892",
|
||||
"client_metadata": {
|
||||
"ceph_sha1": "ae81e49d369875ac8b569ff3e3c456a31b8f3af5",
|
||||
"ceph_version": "ceph version 12.0.0-1934-gae81e49 (ae81e49d369875ac8b569ff3e3c456a31b8f3af5)",
|
||||
"entity_id": "0",
|
||||
"hostname": "senta04",
|
||||
"mount_point": "/tmp/tmpcMpF1b/mnt.0",
|
||||
"pid": "29377",
|
||||
"root": "/"
|
||||
}
|
||||
}
|
||||
]
|
||||
|
||||
In this case the 'fedoravm' client has address ``172.16.79.251:0/3271``, so we blacklist
|
||||
it as follows:
|
||||
|
||||
|
||||
Once you have identified the client you want to evict, you can
|
||||
do that using its unique ID, or various other attributes to identify it:
|
||||
|
||||
::
|
||||
|
||||
# ceph osd blacklist add 172.16.79.251:0/3271
|
||||
blacklisting 172.16.79.251:0/3271 until 2014-12-09 13:09:56.569368 (3600 sec)
|
||||
# These all work
|
||||
ceph tell mds.0 client evict id=4305
|
||||
ceph tell mds.0 client evict client_metadata.=4305
|
||||
|
||||
OSD epoch barrier
|
||||
-----------------
|
||||
|
||||
While the evicted client is now marked as blacklisted in the central (mon) copy of the OSD
|
||||
map, it is now necessary to ensure that this OSD map update has propagated to all daemons
|
||||
involved in subsequent filesystem I/O. To do this, use the ``osdmap barrier`` MDS admin
|
||||
socket command.
|
||||
Advanced: Un-blacklisting a client
|
||||
==================================
|
||||
|
||||
First read the latest OSD epoch:
|
||||
Ordinarily, a blacklisted client may not reconnect to the servers: it
|
||||
must be unmounted and then mounted anew.
|
||||
|
||||
However, in some situations it may be useful to permit a client that
|
||||
was evicted to attempt to reconnect.
|
||||
|
||||
Because CephFS uses the RADOS OSD blacklist to control client eviction,
|
||||
CephFS clients can be permitted to reconnect by removing them from
|
||||
the blacklist:
|
||||
|
||||
::
|
||||
|
||||
# ceph osd dump
|
||||
epoch 12
|
||||
fsid fd61ca96-53ff-4311-826c-f36b176d69ea
|
||||
created 2014-12-09 12:03:38.595844
|
||||
modified 2014-12-09 12:09:56.619957
|
||||
...
|
||||
ceph osd blacklist ls
|
||||
# ... identify the address of the client ...
|
||||
ceph osd blacklist rm <address>
|
||||
|
||||
In this case it is 12. Now request the MDS to barrier on this epoch:
|
||||
Doing this may put data integrity at risk if other clients have accessed
|
||||
files that the blacklisted client was doing buffered IO to. It is also not
|
||||
guaranteed to result in a fully functional client -- the best way to get
|
||||
a fully healthy client back after an eviction is to unmount the client
|
||||
and do a fresh mount.
|
||||
|
||||
::
|
||||
If you are trying to reconnect clients in this way, you may also
|
||||
find it useful to set ``client_reconnect_stale`` to true in the
|
||||
FUSE client, to prompt the client to try to reconnect.
|
||||
|
||||
# ceph daemon mds.a osdmap barrier 12
|
||||
Advanced: Configuring blacklisting
|
||||
==================================
|
||||
|
||||
MDS session eviction
|
||||
--------------------
|
||||
If you are experiencing frequent client evictions, due to slow
|
||||
client hosts or an unreliable network, and you cannot fix the underlying
|
||||
issue, then you may want to ask the MDS to be less strict.
|
||||
|
||||
Finally, it is safe to evict the client's MDS session, such that any capabilities it held
|
||||
may be issued to other clients. The ID here is the ``id`` attribute from the ``session ls``
|
||||
output:
|
||||
It is possible to respond to slow clients by simply dropping their
|
||||
MDS sessions, but permit them to re-open sessions and permit them
|
||||
to continue talking to OSDs. To enable this mode, set
|
||||
``mds_session_blacklist_on_timeout`` to false on your MDS nodes.
|
||||
|
||||
::
|
||||
For the equivalent behaviour on manual evictions, set
|
||||
``mds_session_blacklist_on_evict`` to false.
|
||||
|
||||
# ceph daemon mds.a session evict 4117
|
||||
Note that if blacklisting is disabled, then evicting a client will
|
||||
only have an effect on the MDS you send the command to. On a system
|
||||
with multiple active MDS daemons, you would need to send an
|
||||
eviction command to each active daemon. When blacklisting is enabled
|
||||
(the default), sending an eviction to command to just a single
|
||||
MDS is sufficient, because the blacklist propagates it to the others.
|
||||
|
||||
That's it! The client has now been evicted, and any resources it had locked will
|
||||
now be available for other clients.
|
||||
Advanced options
|
||||
================
|
||||
|
||||
Background: OSD epoch barrier
|
||||
-----------------------------
|
||||
``mds_blacklist_interval`` - this setting controls how many seconds
|
||||
entries will remain in the blacklist for.
|
||||
|
||||
The purpose of the barrier is to ensure that when we hand out any
|
||||
capabilities which might allow touching the same RADOS objects, the
|
||||
clients we hand out the capabilities to must have a sufficiently recent
|
||||
OSD map to not race with cancelled operations (from ENOSPC) or
|
||||
blacklisted clients (from evictions)
|
||||
|
||||
More specifically, the cases where we set an epoch barrier are:
|
||||
|
||||
* Client eviction (where the client is blacklisted and other clients
|
||||
must wait for a post-blacklist epoch to touch the same objects)
|
||||
* OSD map full flag handling in the client (where the client may
|
||||
cancel some OSD ops from a pre-full epoch, so other clients must
|
||||
wait until the full epoch or later before touching the same objects).
|
||||
* MDS startup, because we don't persist the barrier epoch, so must
|
||||
assume that latest OSD map is always required after a restart.
|
||||
|
||||
Note that this is a global value for simplicity: we could maintain this on
|
||||
a per-inode basis. We don't, because:
|
||||
|
||||
* It would be more complicated
|
||||
* It would use an extra 4 bytes of memory for every inode
|
||||
* It would not be much more efficient as almost always everyone has the latest
|
||||
OSD map anyway, in most cases everyone will breeze through this barrier
|
||||
rather than waiting.
|
||||
* We only do this barrier in very rare cases, so any benefit from per-inode
|
||||
granularity would only very rarely be seen.
|
||||
|
||||
The epoch barrier is transmitted along with all capability messages, and
|
||||
instructs the receiver of the message to avoid sending any more RADOS
|
||||
operations to OSDs until it has seen this OSD epoch. This mainly applies
|
||||
to clients (doing their data writes directly to files), but also applies
|
||||
to the MDS because things like file size probing and file deletion are
|
||||
done directly from the MDS.
|
||||
|
||||
|
@ -34,6 +34,14 @@ stripe_count
|
||||
object_size
|
||||
Integer in bytes. File data is chunked into RADOS objects of this size.
|
||||
|
||||
.. tip::
|
||||
|
||||
RADOS enforces a configurable limit on object sizes: if you increase CephFS
|
||||
object sizes beyond that limit then writes may not succeed. The OSD
|
||||
setting is ``rados_max_object_size``, which is 128MB by default.
|
||||
Very large RADOS objects may prevent smooth operation of the cluster,
|
||||
so increasing the object size limit past the default is not recommended.
|
||||
|
||||
Reading layouts with ``getfattr``
|
||||
---------------------------------
|
||||
|
||||
|
@ -122,6 +122,7 @@ extended attribute of directories. The name of this extended attribute is
|
||||
``ceph.dir.pin``. Users can set this attribute using standard commands:
|
||||
|
||||
::
|
||||
|
||||
setfattr -n ceph.dir.pin -v 2 path/to/dir
|
||||
|
||||
The value of the extended attribute is the rank to assign the directory subtree
|
||||
@ -133,9 +134,11 @@ children. However, the parents pin can be overriden by setting the child
|
||||
directory's export pin. For example:
|
||||
|
||||
::
|
||||
|
||||
mkdir -p a/b
|
||||
# "a" and "a/b" both start without an export pin set
|
||||
setfattr -n ceph.dir.pin -v 1 a/
|
||||
# a and b are now pinned to rank 1
|
||||
setfattr -n ceph.dir.pin -v 0 a/b
|
||||
# a/b is now pinned to rank 0 and a/ and the rest of its children are still pinned to rank 1
|
||||
|
||||
|
@ -16,7 +16,7 @@ in realtime. The LTTng traces can then be visualized with Twitter's
|
||||
Zipkin_.
|
||||
|
||||
.. _Dapper: http://static.googleusercontent.com/media/research.google.com/el//pubs/archive/36356.pdf
|
||||
.. _Zipkin: http://twitter.github.io/zipkin/
|
||||
.. _Zipkin: http://zipkin.io/
|
||||
|
||||
|
||||
Installing Blkin
|
||||
|
@ -45,10 +45,6 @@ Options
|
||||
|
||||
Add *config* to all sections in the ceph configuration.
|
||||
|
||||
.. option:: -r
|
||||
|
||||
Start radosgw on port starting from 8000.
|
||||
|
||||
.. option:: --nodaemon
|
||||
|
||||
Use ceph-run as wrapper for mon/osd/mds.
|
||||
@ -73,10 +69,6 @@ Options
|
||||
|
||||
Launch the osd/mds/mon/all the ceph binaries using valgrind with the specified tool and arguments.
|
||||
|
||||
.. option:: --{mon,osd,mds}_num
|
||||
|
||||
Set the count of mon/osd/mds daemons
|
||||
|
||||
.. option:: --bluestore
|
||||
|
||||
Use bluestore as the objectstore backend for osds
|
||||
|
@ -55,7 +55,7 @@ Release Cycle
|
||||
|
||||
|
||||
Four times a year, the development roadmap is discussed online during
|
||||
the `Ceph Developer Summit <http://wiki.ceph.com/Planning/CDS/>`_. A
|
||||
the `Ceph Developer Summit <http://tracker.ceph.com/projects/ceph/wiki/Planning#Ceph-Developer-Summit>`_. A
|
||||
new stable release (hammer, infernalis, jewel ...) is published at the same
|
||||
frequency. Every other release (firefly, hammer, jewel...) is a `Long Term
|
||||
Stable (LTS) <../../releases>`_. See `Understanding the release cycle
|
||||
@ -126,7 +126,7 @@ Running and interpreting teuthology integration tests
|
||||
The :doc:`/dev/sepia` runs `teuthology
|
||||
<https://github.com/ceph/teuthology/>`_ integration tests `on a regular basis <http://tracker.ceph.com/projects/ceph-releases/wiki/HOWTO_monitor_the_automated_tests_AKA_nightlies#Automated-tests-AKA-nightlies>`_ and the
|
||||
results are posted on `pulpito <http://pulpito.ceph.com/>`_ and the
|
||||
`ceph-qa mailing list <http://ceph.com/resources/mailing-list-irc/>`_.
|
||||
`ceph-qa mailing list <https://ceph.com/irc/>`_.
|
||||
|
||||
* The job failures are `analyzed by quality engineers and developers
|
||||
<http://tracker.ceph.com/projects/ceph-releases/wiki/HOWTO_monitor_the_automated_tests_AKA_nightlies#List-of-suites-and-watchers>`_
|
||||
|
@ -30,7 +30,7 @@ API`_ provides a complete example. It is pulled into Sphinx by
|
||||
`librados.rst`_, which is rendered at :doc:`/rados/api/librados`.
|
||||
|
||||
.. _`librados C API`: https://github.com/ceph/ceph/blob/master/src/include/rados/librados.h
|
||||
.. _`librados.rst`: https://raw.github.com/ceph/ceph/master/doc/api/librados.rst
|
||||
.. _`librados.rst`: https://github.com/ceph/ceph/raw/master/doc/rados/api/librados.rst
|
||||
|
||||
Drawing diagrams
|
||||
================
|
||||
|
@ -135,7 +135,7 @@ in the body of the message.
|
||||
|
||||
There are also `other Ceph-related mailing lists`_.
|
||||
|
||||
.. _`other Ceph-related mailing lists`: https://ceph.com/resources/mailing-list-irc/
|
||||
.. _`other Ceph-related mailing lists`: https://ceph.com/irc/
|
||||
|
||||
IRC
|
||||
---
|
||||
@ -145,7 +145,7 @@ time using `Internet Relay Chat`_.
|
||||
|
||||
.. _`Internet Relay Chat`: http://www.irchelp.org/
|
||||
|
||||
See https://ceph.com/resources/mailing-list-irc/ for how to set up your IRC
|
||||
See https://ceph.com/irc/ for how to set up your IRC
|
||||
client and a list of channels.
|
||||
|
||||
Submitting patches
|
||||
@ -750,7 +750,7 @@ The results of the nightlies are published at http://pulpito.ceph.com/ and
|
||||
http://pulpito.ovh.sepia.ceph.com:8081/. The developer nick shows in the
|
||||
test results URL and in the first column of the Pulpito dashboard. The
|
||||
results are also reported on the `ceph-qa mailing list
|
||||
<http://ceph.com/resources/mailing-list-irc/>`_ for analysis.
|
||||
<https://ceph.com/irc/>`_ for analysis.
|
||||
|
||||
Suites inventory
|
||||
----------------
|
||||
@ -1132,11 +1132,14 @@ Reducing the number of tests
|
||||
----------------------------
|
||||
|
||||
The ``rados`` suite generates thousands of tests out of a few hundred
|
||||
files. For instance, all tests in the `rados/thrash suite
|
||||
<https://github.com/ceph/ceph/tree/master/qa/suites/rados/thrash>`_
|
||||
run for ``xfs``, ``btrfs`` and ``ext4`` because they are combined (via
|
||||
special file ``%``) with the `fs directory
|
||||
<https://github.com/ceph/ceph/tree/master/qa/suites/rados/thrash/fs>`_
|
||||
files. This happens because teuthology constructs test matrices from
|
||||
subdirectories wherever it encounters a file named ``%``. For instance,
|
||||
all tests in the `rados/basic suite
|
||||
<https://github.com/ceph/ceph/tree/master/qa/suites/rados/basic>`_
|
||||
run with different messenger types: ``simple``, ``async`` and
|
||||
``random``, because they are combined (via the special file ``%``) with
|
||||
the `msgr directory
|
||||
<https://github.com/ceph/ceph/tree/master/qa/suites/rados/basic/msgr>`_
|
||||
|
||||
All integration tests are required to be run before a Ceph release is published.
|
||||
When merely verifying whether a contribution can be merged without
|
||||
@ -1199,9 +1202,9 @@ Getting ceph-workbench
|
||||
Since testing in the cloud is done using the `ceph-workbench
|
||||
ceph-qa-suite`_ tool, you will need to install that first. It is designed
|
||||
to be installed via Docker, so if you don't have Docker running on your
|
||||
development machine, take care of that first. The Docker project has a good
|
||||
tutorial called `Get Started with Docker Engine for Linux
|
||||
<https://docs.docker.com/linux/>`_ if you unsure how to proceed.
|
||||
development machine, take care of that first. You can follow `the official
|
||||
tutorial<https://docs.docker.com/engine/installation/>`_ to install if
|
||||
you have not installed yet.
|
||||
|
||||
Once Docker is up and running, install ``ceph-workbench`` by following the
|
||||
`Installation instructions in the ceph-workbench documentation
|
||||
@ -1434,28 +1437,14 @@ Refer to :doc:`install/build-ceph`.
|
||||
|
||||
You can do step 2 separately while it is building.
|
||||
|
||||
Step 2 - s3-tests
|
||||
-----------------
|
||||
|
||||
The test suite is in a separate git repo, and is written in python. Perform the
|
||||
following steps for jewel::
|
||||
|
||||
git clone git://github.com/ceph/s3-tests
|
||||
cd s3-tests
|
||||
git checkout ceph-jewel
|
||||
./bootstrap
|
||||
|
||||
For kraken, checkout the ``ceph-kraken`` branch instead of ``ceph-jewel``. For
|
||||
master, use ``ceph-master``.
|
||||
|
||||
Step 3 - vstart
|
||||
Step 2 - vstart
|
||||
---------------
|
||||
|
||||
When the build completes, and still in the top-level directory of the git
|
||||
clone where you built Ceph, do the following::
|
||||
clone where you built Ceph, do the following, for cmake builds::
|
||||
|
||||
cd src/
|
||||
./vstart.sh -n -r --mds_num 0
|
||||
cd build/
|
||||
RGW=1 ../vstart.sh -n
|
||||
|
||||
This will produce a lot of output as the vstart cluster is started up. At the
|
||||
end you should see a message like::
|
||||
@ -1464,51 +1453,13 @@ end you should see a message like::
|
||||
|
||||
This means the cluster is running.
|
||||
|
||||
Step 4 - prepare S3 environment
|
||||
-------------------------------
|
||||
|
||||
The s3-tests suite expects to run in a particular environment (S3 users, keys,
|
||||
configuration file).
|
||||
|
||||
Before you try to prepare the environment, make sure you don't have any
|
||||
existing keyring or ``ceph.conf`` files in ``/etc/ceph``.
|
||||
|
||||
For jewel, Abhishek Lekshmanan wrote a script that can be used for this
|
||||
purpose. Assuming you are testing jewel, run the following commands from the
|
||||
``src/`` directory of your ceph clone (where you just started the vstart
|
||||
cluster)::
|
||||
|
||||
pushd ~
|
||||
wget https://gist.githubusercontent.com/theanalyst/2fee6bc2780f67c79cad7802040fcddc/raw/b497ddba053d9a6fb5d91b73924cbafcfc32f137/s3tests-bootstrap.sh
|
||||
popd
|
||||
sh ~/s3tests-bootstrap.sh
|
||||
|
||||
If the script is successful, it will display a blob of JSON and create a file
|
||||
called ``s3.conf`` in the current directory.
|
||||
|
||||
Step 5 - run s3-tests
|
||||
Step 3 - run s3-tests
|
||||
---------------------
|
||||
|
||||
To actually run the tests, take note of the full path to the ``s3.conf`` file
|
||||
created in the previous step and then move to the directory where you cloned
|
||||
``s3-tests`` in Step 2.
|
||||
|
||||
First, verify that the test suite is there and can be run::
|
||||
|
||||
S3TEST_CONF=/path/to/s3.conf ./virtualenv/bin/nosetests -a '!fails_on_rgw' -v --collect-only
|
||||
|
||||
This should complete quickly - it is like a "dry run" of all the tests in the
|
||||
suite.
|
||||
|
||||
Finally, run the test suite itself::
|
||||
|
||||
S3TEST_CONF=/path/to/s3.conf ./virtualenv/bin/nosetests -a '!fails_on_rgw' -v
|
||||
|
||||
Note: the following test is expected to error - this is a problem in the test
|
||||
setup (WIP), not an actual test failure::
|
||||
|
||||
ERROR: s3tests.functional.test_s3.test_bucket_acl_grant_email
|
||||
To run the s3tests suite do the following::
|
||||
|
||||
$ ../qa/workunits/rgw/run-s3tests.sh
|
||||
|
||||
.. WIP
|
||||
.. ===
|
||||
|
@ -189,7 +189,7 @@ in the registry. The `ErasureCodePluginExample <https://github.com/ceph/ceph/blo
|
||||
|
||||
The *ErasureCodePlugin* derived object must provide a factory method
|
||||
from which the concrete implementation of the *ErasureCodeInterface*
|
||||
object can be generated. The `ErasureCodePluginExample plugin <https://github.com/ceph/ceph/blob/v0.78/src/test/osd/ErasureCodePluginExample.cc>`_ reads:
|
||||
object can be generated. The `ErasureCodePluginExample plugin <https://github.com/ceph/ceph/blob/v0.78/src/test/erasure-code/ErasureCodePluginExample.cc>`_ reads:
|
||||
|
||||
::
|
||||
|
||||
|
@ -39,89 +39,121 @@ The ``perf schema`` command dumps a json description of which values are availab
|
||||
+------+-------------------------------------+
|
||||
| 2 | unsigned 64-bit integer value |
|
||||
+------+-------------------------------------+
|
||||
| 4 | average (sum + count pair) |
|
||||
| 4 | average (sum + count pair), where |
|
||||
+------+-------------------------------------+
|
||||
| 8 | counter (vs gauge) |
|
||||
+------+-------------------------------------+
|
||||
|
||||
Every value will have either bit 1 or 2 set to indicate the type (float or integer). If bit 8 is set (counter), the reader may want to subtract off the previously read value to get the delta during the previous interval.
|
||||
Every value will have either bit 1 or 2 set to indicate the type
|
||||
(float or integer).
|
||||
|
||||
If bit 4 is set (average), there will be two values to read, a sum and a count. If it is a counter, the average for the previous interval would be sum delta (since the previous read) divided by the count delta. Alternatively, dividing the values outright would provide the lifetime average value. Normally these are used to measure latencies (number of requests and a sum of request latencies), and the average for the previous interval is what is interesting.
|
||||
If bit 8 is set (counter), the value is monotonically increasing and
|
||||
the reader may want to subtract off the previously read value to get
|
||||
the delta during the previous interval.
|
||||
|
||||
If bit 4 is set (average), there will be two values to read, a sum and
|
||||
a count. If it is a counter, the average for the previous interval
|
||||
would be sum delta (since the previous read) divided by the count
|
||||
delta. Alternatively, dividing the values outright would provide the
|
||||
lifetime average value. Normally these are used to measure latencies
|
||||
(number of requests and a sum of request latencies), and the average
|
||||
for the previous interval is what is interesting.
|
||||
|
||||
Instead of interpreting the bit fields, the ``metric type`` has a
|
||||
value of either ``guage`` or ``counter``, and the ``value type``
|
||||
property will be one of ``real``, ``integer``, ``real-integer-pair``
|
||||
(for a sum + real count pair), or ``integer-integer-pair`` (for a
|
||||
sum + integer count pair).
|
||||
|
||||
Here is an example of the schema output::
|
||||
|
||||
{
|
||||
"throttle-msgr_dispatch_throttler-hbserver" : {
|
||||
"get_or_fail_fail" : {
|
||||
"type" : 10
|
||||
},
|
||||
"get_sum" : {
|
||||
"type" : 10
|
||||
},
|
||||
"max" : {
|
||||
"type" : 10
|
||||
},
|
||||
"put" : {
|
||||
"type" : 10
|
||||
},
|
||||
"val" : {
|
||||
"type" : 10
|
||||
},
|
||||
"take" : {
|
||||
"type" : 10
|
||||
},
|
||||
"get_or_fail_success" : {
|
||||
"type" : 10
|
||||
},
|
||||
"wait" : {
|
||||
"type" : 5
|
||||
},
|
||||
"get" : {
|
||||
"type" : 10
|
||||
},
|
||||
"take_sum" : {
|
||||
"type" : 10
|
||||
},
|
||||
"put_sum" : {
|
||||
"type" : 10
|
||||
}
|
||||
},
|
||||
"throttle-msgr_dispatch_throttler-client" : {
|
||||
"get_or_fail_fail" : {
|
||||
"type" : 10
|
||||
},
|
||||
"get_sum" : {
|
||||
"type" : 10
|
||||
},
|
||||
"max" : {
|
||||
"type" : 10
|
||||
},
|
||||
"put" : {
|
||||
"type" : 10
|
||||
},
|
||||
"val" : {
|
||||
"type" : 10
|
||||
},
|
||||
"take" : {
|
||||
"type" : 10
|
||||
},
|
||||
"get_or_fail_success" : {
|
||||
"type" : 10
|
||||
},
|
||||
"wait" : {
|
||||
"type" : 5
|
||||
},
|
||||
"get" : {
|
||||
"type" : 10
|
||||
},
|
||||
"take_sum" : {
|
||||
"type" : 10
|
||||
},
|
||||
"put_sum" : {
|
||||
"type" : 10
|
||||
}
|
||||
}
|
||||
}
|
||||
{
|
||||
"throttle-bluestore_throttle_bytes": {
|
||||
"val": {
|
||||
"type": 2,
|
||||
"metric_type": "gauge",
|
||||
"value_type": "integer",
|
||||
"description": "Currently available throttle",
|
||||
"nick": ""
|
||||
},
|
||||
"max": {
|
||||
"type": 2,
|
||||
"metric_type": "gauge",
|
||||
"value_type": "integer",
|
||||
"description": "Max value for throttle",
|
||||
"nick": ""
|
||||
},
|
||||
"get_started": {
|
||||
"type": 10,
|
||||
"metric_type": "counter",
|
||||
"value_type": "integer",
|
||||
"description": "Number of get calls, increased before wait",
|
||||
"nick": ""
|
||||
},
|
||||
"get": {
|
||||
"type": 10,
|
||||
"metric_type": "counter",
|
||||
"value_type": "integer",
|
||||
"description": "Gets",
|
||||
"nick": ""
|
||||
},
|
||||
"get_sum": {
|
||||
"type": 10,
|
||||
"metric_type": "counter",
|
||||
"value_type": "integer",
|
||||
"description": "Got data",
|
||||
"nick": ""
|
||||
},
|
||||
"get_or_fail_fail": {
|
||||
"type": 10,
|
||||
"metric_type": "counter",
|
||||
"value_type": "integer",
|
||||
"description": "Get blocked during get_or_fail",
|
||||
"nick": ""
|
||||
},
|
||||
"get_or_fail_success": {
|
||||
"type": 10,
|
||||
"metric_type": "counter",
|
||||
"value_type": "integer",
|
||||
"description": "Successful get during get_or_fail",
|
||||
"nick": ""
|
||||
},
|
||||
"take": {
|
||||
"type": 10,
|
||||
"metric_type": "counter",
|
||||
"value_type": "integer",
|
||||
"description": "Takes",
|
||||
"nick": ""
|
||||
},
|
||||
"take_sum": {
|
||||
"type": 10,
|
||||
"metric_type": "counter",
|
||||
"value_type": "integer",
|
||||
"description": "Taken data",
|
||||
"nick": ""
|
||||
},
|
||||
"put": {
|
||||
"type": 10,
|
||||
"metric_type": "counter",
|
||||
"value_type": "integer",
|
||||
"description": "Puts",
|
||||
"nick": ""
|
||||
},
|
||||
"put_sum": {
|
||||
"type": 10,
|
||||
"metric_type": "counter",
|
||||
"value_type": "integer",
|
||||
"description": "Put data",
|
||||
"nick": ""
|
||||
},
|
||||
"wait": {
|
||||
"type": 5,
|
||||
"metric_type": "gauge",
|
||||
"value_type": "real-integer-pair",
|
||||
"description": "Waiting latency",
|
||||
"nick": ""
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
Dump
|
||||
|
677
ceph/doc/dev/perf_histograms.rst
Normal file
677
ceph/doc/dev/perf_histograms.rst
Normal file
@ -0,0 +1,677 @@
|
||||
=================
|
||||
Perf histograms
|
||||
=================
|
||||
|
||||
The perf histograms build on perf counters infrastructure. Histograms are built for a number of counters and simplify gathering data on which groups of counter values occur most often over time.
|
||||
Perf histograms are currently unsigned 64-bit integer counters, so they're mostly useful for time and sizes. Data dumped by perf histogram can then be feed into other analysis tools/scripts.
|
||||
|
||||
Access
|
||||
------
|
||||
|
||||
The perf histogram data are accessed via the admin socket. For example::
|
||||
|
||||
ceph daemon osd.0 perf histogram schema
|
||||
ceph daemon osd.0 perf histogram dump
|
||||
|
||||
|
||||
Collections
|
||||
-----------
|
||||
|
||||
The histograms are grouped into named collections, normally representing a subsystem or an instance of a subsystem. For example, the internal ``throttle`` mechanism reports statistics on how it is throttling, and each instance is named something like::
|
||||
|
||||
|
||||
op_r_latency_out_bytes_histogram
|
||||
op_rw_latency_in_bytes_histogram
|
||||
op_rw_latency_out_bytes_histogram
|
||||
...
|
||||
|
||||
|
||||
Schema
|
||||
------
|
||||
|
||||
The ``perf histogram schema`` command dumps a json description of which values are available, and what their type is. Each named value as a ``type`` bitfield, with the 5-th bit always set and following bits defined.
|
||||
|
||||
+------+-------------------------------------+
|
||||
| bit | meaning |
|
||||
+======+=====================================+
|
||||
| 1 | floating point value |
|
||||
+------+-------------------------------------+
|
||||
| 2 | unsigned 64-bit integer value |
|
||||
+------+-------------------------------------+
|
||||
| 4 | average (sum + count pair) |
|
||||
+------+-------------------------------------+
|
||||
| 8 | counter (vs gauge) |
|
||||
+------+-------------------------------------+
|
||||
|
||||
In other words, histogram of type "18" is a histogram of unsigned 64-bit integer values (16 + 2).
|
||||
|
||||
Here is an example of the schema output::
|
||||
|
||||
{
|
||||
"AsyncMessenger::Worker-0": {},
|
||||
"AsyncMessenger::Worker-1": {},
|
||||
"AsyncMessenger::Worker-2": {},
|
||||
"mutex-WBThrottle::lock": {},
|
||||
"objecter": {},
|
||||
"osd": {
|
||||
"op_r_latency_out_bytes_histogram": {
|
||||
"type": 18,
|
||||
"description": "Histogram of operation latency (including queue time) + data read",
|
||||
"nick": ""
|
||||
},
|
||||
"op_w_latency_in_bytes_histogram": {
|
||||
"type": 18,
|
||||
"description": "Histogram of operation latency (including queue time) + data written",
|
||||
"nick": ""
|
||||
},
|
||||
"op_rw_latency_in_bytes_histogram": {
|
||||
"type": 18,
|
||||
"description": "Histogram of rw operation latency (including queue time) + data written",
|
||||
"nick": ""
|
||||
},
|
||||
"op_rw_latency_out_bytes_histogram": {
|
||||
"type": 18,
|
||||
"description": "Histogram of rw operation latency (including queue time) + data read",
|
||||
"nick": ""
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
Dump
|
||||
----
|
||||
|
||||
The actual dump is similar to the schema, except that there are actual value groups. For example::
|
||||
|
||||
"osd": {
|
||||
"op_r_latency_out_bytes_histogram": {
|
||||
"axes": [
|
||||
{
|
||||
"name": "Latency (usec)",
|
||||
"min": 0,
|
||||
"quant_size": 100000,
|
||||
"buckets": 32,
|
||||
"scale_type": "log2",
|
||||
"ranges": [
|
||||
{
|
||||
"max": -1
|
||||
},
|
||||
{
|
||||
"min": 0,
|
||||
"max": 99999
|
||||
},
|
||||
{
|
||||
"min": 100000,
|
||||
"max": 199999
|
||||
},
|
||||
{
|
||||
"min": 200000,
|
||||
"max": 399999
|
||||
},
|
||||
{
|
||||
"min": 400000,
|
||||
"max": 799999
|
||||
},
|
||||
{
|
||||
"min": 800000,
|
||||
"max": 1599999
|
||||
},
|
||||
{
|
||||
"min": 1600000,
|
||||
"max": 3199999
|
||||
},
|
||||
{
|
||||
"min": 3200000,
|
||||
"max": 6399999
|
||||
},
|
||||
{
|
||||
"min": 6400000,
|
||||
"max": 12799999
|
||||
},
|
||||
{
|
||||
"min": 12800000,
|
||||
"max": 25599999
|
||||
},
|
||||
{
|
||||
"min": 25600000,
|
||||
"max": 51199999
|
||||
},
|
||||
{
|
||||
"min": 51200000,
|
||||
"max": 102399999
|
||||
},
|
||||
{
|
||||
"min": 102400000,
|
||||
"max": 204799999
|
||||
},
|
||||
{
|
||||
"min": 204800000,
|
||||
"max": 409599999
|
||||
},
|
||||
{
|
||||
"min": 409600000,
|
||||
"max": 819199999
|
||||
},
|
||||
{
|
||||
"min": 819200000,
|
||||
"max": 1638399999
|
||||
},
|
||||
{
|
||||
"min": 1638400000,
|
||||
"max": 3276799999
|
||||
},
|
||||
{
|
||||
"min": 3276800000,
|
||||
"max": 6553599999
|
||||
},
|
||||
{
|
||||
"min": 6553600000,
|
||||
"max": 13107199999
|
||||
},
|
||||
{
|
||||
"min": 13107200000,
|
||||
"max": 26214399999
|
||||
},
|
||||
{
|
||||
"min": 26214400000,
|
||||
"max": 52428799999
|
||||
},
|
||||
{
|
||||
"min": 52428800000,
|
||||
"max": 104857599999
|
||||
},
|
||||
{
|
||||
"min": 104857600000,
|
||||
"max": 209715199999
|
||||
},
|
||||
{
|
||||
"min": 209715200000,
|
||||
"max": 419430399999
|
||||
},
|
||||
{
|
||||
"min": 419430400000,
|
||||
"max": 838860799999
|
||||
},
|
||||
{
|
||||
"min": 838860800000,
|
||||
"max": 1677721599999
|
||||
},
|
||||
{
|
||||
"min": 1677721600000,
|
||||
"max": 3355443199999
|
||||
},
|
||||
{
|
||||
"min": 3355443200000,
|
||||
"max": 6710886399999
|
||||
},
|
||||
{
|
||||
"min": 6710886400000,
|
||||
"max": 13421772799999
|
||||
},
|
||||
{
|
||||
"min": 13421772800000,
|
||||
"max": 26843545599999
|
||||
},
|
||||
{
|
||||
"min": 26843545600000,
|
||||
"max": 53687091199999
|
||||
},
|
||||
},
|
||||
{
|
||||
"min": 53687091200000
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "Request size (bytes)",
|
||||
"min": 0,
|
||||
"quant_size": 512,
|
||||
"buckets": 32,
|
||||
"scale_type": "log2",
|
||||
"ranges": [
|
||||
{
|
||||
"max": -1
|
||||
},
|
||||
{
|
||||
"min": 0,
|
||||
"max": 511
|
||||
},
|
||||
{
|
||||
"min": 512,
|
||||
"max": 1023
|
||||
},
|
||||
{
|
||||
"min": 1024,
|
||||
"max": 2047
|
||||
},
|
||||
{
|
||||
"min": 2048,
|
||||
"max": 4095
|
||||
},
|
||||
{
|
||||
"min": 4096,
|
||||
"max": 8191
|
||||
},
|
||||
{
|
||||
"min": 8192,
|
||||
"max": 16383
|
||||
},
|
||||
{
|
||||
"min": 16384,
|
||||
"max": 32767
|
||||
},
|
||||
{
|
||||
"min": 32768,
|
||||
"max": 65535
|
||||
},
|
||||
{
|
||||
"min": 65536,
|
||||
"max": 131071
|
||||
},
|
||||
{
|
||||
"min": 131072,
|
||||
"max": 262143
|
||||
},
|
||||
{
|
||||
"min": 262144,
|
||||
"max": 524287
|
||||
},
|
||||
{
|
||||
"min": 524288,
|
||||
"max": 1048575
|
||||
},
|
||||
{
|
||||
"min": 1048576,
|
||||
"max": 2097151
|
||||
},
|
||||
{
|
||||
"min": 2097152,
|
||||
"max": 4194303
|
||||
},
|
||||
{
|
||||
"min": 4194304,
|
||||
"max": 8388607
|
||||
},
|
||||
{
|
||||
"min": 8388608,
|
||||
"max": 16777215
|
||||
},
|
||||
{
|
||||
"min": 16777216,
|
||||
"max": 33554431
|
||||
},
|
||||
{
|
||||
"min": 33554432,
|
||||
"max": 67108863
|
||||
},
|
||||
{
|
||||
"min": 67108864,
|
||||
"max": 134217727
|
||||
},
|
||||
{
|
||||
"min": 134217728,
|
||||
"max": 268435455
|
||||
},
|
||||
{
|
||||
"min": 268435456,
|
||||
"max": 536870911
|
||||
},
|
||||
{
|
||||
"min": 536870912,
|
||||
"max": 1073741823
|
||||
},
|
||||
{
|
||||
"min": 1073741824,
|
||||
"max": 2147483647
|
||||
},
|
||||
{
|
||||
"min": 2147483648,
|
||||
"max": 4294967295
|
||||
},
|
||||
{
|
||||
"min": 4294967296,
|
||||
"max": 8589934591
|
||||
},
|
||||
{
|
||||
"min": 8589934592,
|
||||
"max": 17179869183
|
||||
},
|
||||
{
|
||||
"min": 17179869184,
|
||||
"max": 34359738367
|
||||
},
|
||||
{
|
||||
"min": 34359738368,
|
||||
"max": 68719476735
|
||||
},
|
||||
{
|
||||
"min": 68719476736,
|
||||
"max": 137438953471
|
||||
},
|
||||
{
|
||||
"min": 137438953472,
|
||||
"max": 274877906943
|
||||
},
|
||||
{
|
||||
"min": 274877906944
|
||||
}
|
||||
]
|
||||
}
|
||||
],
|
||||
"values": [
|
||||
[
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0
|
||||
],
|
||||
[
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0
|
||||
],
|
||||
[
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0
|
||||
],
|
||||
[
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0
|
||||
],
|
||||
[
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0
|
||||
],
|
||||
[
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0
|
||||
],
|
||||
[
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0
|
||||
],
|
||||
[
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0
|
||||
],
|
||||
[
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0
|
||||
]
|
||||
]
|
||||
}
|
||||
},
|
||||
|
||||
This represents the 2d histogram, consisting of 9 history entrires and 32 value groups per each history entry.
|
||||
"Ranges" element denote value bounds for each of value groups. "Buckets" denote amount of value groups ("buckets"),
|
||||
"Min" is a minimum accepted valaue, "quant_size" is quantization unit and "scale_type" is either "log2" (logarhitmic
|
||||
scale) or "linear" (linear scale).
|
||||
You can use histogram_dump.py tool (see src/tools/histogram_dump.py) for quick visualisation of existing histogram
|
||||
data.
|
@ -45,12 +45,11 @@ is the primary and the rest are replicas.
|
||||
Many PGs can map to one OSD.
|
||||
|
||||
A PG represents nothing but a grouping of objects; you configure the
|
||||
number of PGs you want (see
|
||||
http://ceph.com/wiki/Changing_the_number_of_PGs ), number of
|
||||
OSDs * 100 is a good starting point, and all of your stored objects
|
||||
are pseudo-randomly evenly distributed to the PGs. So a PG explicitly
|
||||
does NOT represent a fixed amount of storage; it represents 1/pg_num
|
||||
'th of the storage you happen to have on your OSDs.
|
||||
number of PGs you want, number of OSDs * 100 is a good starting point
|
||||
, and all of your stored objects are pseudo-randomly evenly distributed
|
||||
to the PGs. So a PG explicitly does NOT represent a fixed amount of
|
||||
storage; it represents 1/pg_num'th of the storage you happen to have
|
||||
on your OSDs.
|
||||
|
||||
Ignoring the finer points of CRUSH and custom placement, it goes
|
||||
something like this in pseudocode::
|
||||
|
@ -106,12 +106,13 @@ you might do something like this:
|
||||
|
||||
Running a RadosGW development environment
|
||||
-----------------------------------------
|
||||
Add the ``-r`` to vstart.sh to enable the RadosGW
|
||||
|
||||
Set the ``RGW`` environment variable when running vstart.sh to enable the RadosGW.
|
||||
|
||||
.. code::
|
||||
|
||||
$ cd build
|
||||
$ ../src/vstart.sh -d -n -x -r
|
||||
$ RGW=1 ../src/vstart.sh -d -n -x
|
||||
|
||||
You can now use the swift python client to communicate with the RadosGW.
|
||||
|
||||
|
@ -28,9 +28,8 @@ Ceph is built using cmake. To build Ceph, navigate to your cloned Ceph
|
||||
repository and execute the following::
|
||||
|
||||
cd ceph
|
||||
mkdir build
|
||||
./do_cmake.sh
|
||||
cd build
|
||||
cmake ..
|
||||
make
|
||||
|
||||
.. topic:: Hyperthreading
|
||||
|
@ -107,9 +107,7 @@ You may find releases for CentOS/RHEL and others (installed with YUM) at::
|
||||
|
||||
https://download.ceph.com/rpm-{release-name}
|
||||
|
||||
The major releases of Ceph are summarized at:
|
||||
|
||||
http://docs.ceph.com/docs/master/releases/
|
||||
The major releases of Ceph are summarized at: :doc:`/releases`.
|
||||
|
||||
Every second major release is considered Long Term Stable (LTS). Critical
|
||||
bugfixes are backported to LTS releases until their retirement. Since retired
|
||||
|
@ -21,7 +21,7 @@ repository and build Ceph yourself.
|
||||
Get Tarballs <get-tarballs>
|
||||
Clone Source <clone-source>
|
||||
Build Ceph <build-ceph>
|
||||
Ceph Mirrors <mirrors>
|
||||
Ceph Mirrors <mirrors>
|
||||
|
||||
|
||||
Install Software
|
||||
@ -38,7 +38,7 @@ QEMU.
|
||||
:maxdepth: 1
|
||||
|
||||
Install ceph-deploy <install-ceph-deploy>
|
||||
Install Ceph Storage Cluster <install-storage-cluster>
|
||||
Install Ceph Storage Cluster <install-storage-cluster>
|
||||
Install Ceph Object Gateway <install-ceph-gateway>
|
||||
Install Virtualization for Block <install-vm-cloud>
|
||||
|
||||
@ -52,7 +52,8 @@ deployment scripts with Chef, Juju, Puppet, etc.
|
||||
|
||||
.. toctree::
|
||||
|
||||
Manual Deployment <manual-deployment>
|
||||
Manual Deployment <manual-deployment>
|
||||
Manual Deployment on FreeBSD <manual-freebsd-deployment>
|
||||
|
||||
Upgrade Software
|
||||
================
|
||||
|
@ -58,7 +58,7 @@ a number of things:
|
||||
For example, when you run multiple clusters in a `federated architecture`_,
|
||||
the cluster name (e.g., ``us-west``, ``us-east``) identifies the cluster for
|
||||
the current CLI session. **Note:** To identify the cluster name on the
|
||||
command line interface, specify the a Ceph configuration file with the
|
||||
command line interface, specify the Ceph configuration file with the
|
||||
cluster name (e.g., ``ceph.conf``, ``us-west.conf``, ``us-east.conf``, etc.).
|
||||
Also see CLI usage (``ceph --cluster {cluster-name}``).
|
||||
|
||||
@ -290,6 +290,12 @@ The procedure is as follows:
|
||||
**Note:** Once you add OSDs and start them, the placement group health errors
|
||||
should disappear. See the next section for details.
|
||||
|
||||
Manager daemon configuration
|
||||
============================
|
||||
|
||||
On each node where you run a ceph-mon daemon, you should also set up a ceph-mgr daemon.
|
||||
|
||||
See `../mgr/administrator`_
|
||||
|
||||
Adding OSDs
|
||||
===========
|
||||
|
624
ceph/doc/install/manual-freebsd-deployment.rst
Normal file
624
ceph/doc/install/manual-freebsd-deployment.rst
Normal file
@ -0,0 +1,624 @@
|
||||
==============================
|
||||
Manual Deployment on FreeBSD
|
||||
==============================
|
||||
|
||||
This a largely a copy of the regular Manual Deployment with FreeBSD specifics.
|
||||
The difference lies in two parts: The underlying diskformat, and the way to use
|
||||
the tools.
|
||||
|
||||
All Ceph clusters require at least one monitor, and at least as many OSDs as
|
||||
copies of an object stored on the cluster. Bootstrapping the initial monitor(s)
|
||||
is the first step in deploying a Ceph Storage Cluster. Monitor deployment also
|
||||
sets important criteria for the entire cluster, such as the number of replicas
|
||||
for pools, the number of placement groups per OSD, the heartbeat intervals,
|
||||
whether authentication is required, etc. Most of these values are set by
|
||||
default, so it's useful to know about them when setting up your cluster for
|
||||
production.
|
||||
|
||||
Following the same configuration as `Installation (Quick)`_, we will set up a
|
||||
cluster with ``node1`` as the monitor node, and ``node2`` and ``node3`` for
|
||||
OSD nodes.
|
||||
|
||||
|
||||
|
||||
.. ditaa::
|
||||
/------------------\ /----------------\
|
||||
| Admin Node | | node1 |
|
||||
| +-------->+ |
|
||||
| | | cCCC |
|
||||
\---------+--------/ \----------------/
|
||||
|
|
||||
| /----------------\
|
||||
| | node2 |
|
||||
+----------------->+ |
|
||||
| | cCCC |
|
||||
| \----------------/
|
||||
|
|
||||
| /----------------\
|
||||
| | node3 |
|
||||
+----------------->| |
|
||||
| cCCC |
|
||||
\----------------/
|
||||
|
||||
|
||||
|
||||
Disklayout on FreeBSD
|
||||
=====================
|
||||
|
||||
Current implementation works on ZFS pools
|
||||
|
||||
* All Ceph data is created in /var/lib/ceph
|
||||
* Log files go into /var/log/ceph
|
||||
* PID files go into /var/log/run
|
||||
* One ZFS pool is allocated per OSD, like::
|
||||
|
||||
gpart create -s GPT ada1
|
||||
gpart add -t freebsd-zfs -l osd1 ada1
|
||||
zpool create -o mountpoint=/var/lib/ceph/osd/osd.1 osd
|
||||
|
||||
* Some cache and log (ZIL) can be attached.
|
||||
Please note that this is different from the Ceph journals. Cache and log are
|
||||
totally transparent for Ceph, and help the filesystem to keep the system
|
||||
consistant and help performance.
|
||||
Assuming that ada2 is an SSD::
|
||||
|
||||
gpart create -s GPT ada2
|
||||
gpart add -t freebsd-zfs -l osd1-log -s 1G ada2
|
||||
zpool add osd1 log gpt/osd1-log
|
||||
gpart add -t freebsd-zfs -l osd1-cache -s 10G ada2
|
||||
zpool add osd1 log gpt/osd1-cache
|
||||
|
||||
* Note: *UFS2 does not allow large xattribs*
|
||||
|
||||
|
||||
Configuration
|
||||
-------------
|
||||
|
||||
As per FreeBSD default parts of extra software go into ``/usr/local/``. Which
|
||||
means that for ``/etc/ceph.conf`` the default location is
|
||||
``/usr/local/etc/ceph/ceph.conf``. Smartest thing to do is to create a softlink
|
||||
from ``/etc/ceph`` to ``/usr/local/etc/ceph``::
|
||||
|
||||
ln -s /usr/local/etc/ceph /etc/ceph
|
||||
|
||||
A sample file is provided in ``/usr/local/share/doc/ceph/sample.ceph.conf``
|
||||
Note that ``/usr/local/etc/ceph/ceph.conf`` will be found by most tools,
|
||||
linking it to ``/etc/ceph/ceph.conf`` will help with any scripts that are found
|
||||
in extra tools, scripts, and/or discussionlists.
|
||||
|
||||
Monitor Bootstrapping
|
||||
=====================
|
||||
|
||||
Bootstrapping a monitor (a Ceph Storage Cluster, in theory) requires
|
||||
a number of things:
|
||||
|
||||
- **Unique Identifier:** The ``fsid`` is a unique identifier for the cluster,
|
||||
and stands for File System ID from the days when the Ceph Storage Cluster was
|
||||
principally for the Ceph Filesystem. Ceph now supports native interfaces,
|
||||
block devices, and object storage gateway interfaces too, so ``fsid`` is a
|
||||
bit of a misnomer.
|
||||
|
||||
- **Cluster Name:** Ceph clusters have a cluster name, which is a simple string
|
||||
without spaces. The default cluster name is ``ceph``, but you may specify
|
||||
a different cluster name. Overriding the default cluster name is
|
||||
especially useful when you are working with multiple clusters and you need to
|
||||
clearly understand which cluster your are working with.
|
||||
|
||||
For example, when you run multiple clusters in a `federated architecture`_,
|
||||
the cluster name (e.g., ``us-west``, ``us-east``) identifies the cluster for
|
||||
the current CLI session. **Note:** To identify the cluster name on the
|
||||
command line interface, specify the a Ceph configuration file with the
|
||||
cluster name (e.g., ``ceph.conf``, ``us-west.conf``, ``us-east.conf``, etc.).
|
||||
Also see CLI usage (``ceph --cluster {cluster-name}``).
|
||||
|
||||
- **Monitor Name:** Each monitor instance within a cluster has a unique name.
|
||||
In common practice, the Ceph Monitor name is the host name (we recommend one
|
||||
Ceph Monitor per host, and no commingling of Ceph OSD Daemons with
|
||||
Ceph Monitors). You may retrieve the short hostname with ``hostname -s``.
|
||||
|
||||
- **Monitor Map:** Bootstrapping the initial monitor(s) requires you to
|
||||
generate a monitor map. The monitor map requires the ``fsid``, the cluster
|
||||
name (or uses the default), and at least one host name and its IP address.
|
||||
|
||||
- **Monitor Keyring**: Monitors communicate with each other via a
|
||||
secret key. You must generate a keyring with a monitor secret and provide
|
||||
it when bootstrapping the initial monitor(s).
|
||||
|
||||
- **Administrator Keyring**: To use the ``ceph`` CLI tools, you must have
|
||||
a ``client.admin`` user. So you must generate the admin user and keyring,
|
||||
and you must also add the ``client.admin`` user to the monitor keyring.
|
||||
|
||||
The foregoing requirements do not imply the creation of a Ceph Configuration
|
||||
file. However, as a best practice, we recommend creating a Ceph configuration
|
||||
file and populating it with the ``fsid``, the ``mon initial members`` and the
|
||||
``mon host`` settings.
|
||||
|
||||
You can get and set all of the monitor settings at runtime as well. However,
|
||||
a Ceph Configuration file may contain only those settings that override the
|
||||
default values. When you add settings to a Ceph configuration file, these
|
||||
settings override the default settings. Maintaining those settings in a
|
||||
Ceph configuration file makes it easier to maintain your cluster.
|
||||
|
||||
The procedure is as follows:
|
||||
|
||||
|
||||
#. Log in to the initial monitor node(s)::
|
||||
|
||||
ssh {hostname}
|
||||
|
||||
For example::
|
||||
|
||||
ssh node1
|
||||
|
||||
|
||||
#. Ensure you have a directory for the Ceph configuration file. By default,
|
||||
Ceph uses ``/etc/ceph``. When you install ``ceph``, the installer will
|
||||
create the ``/etc/ceph`` directory automatically. ::
|
||||
|
||||
ls /etc/ceph
|
||||
|
||||
**Note:** Deployment tools may remove this directory when purging a
|
||||
cluster (e.g., ``ceph-deploy purgedata {node-name}``, ``ceph-deploy purge
|
||||
{node-name}``).
|
||||
|
||||
#. Create a Ceph configuration file. By default, Ceph uses
|
||||
``ceph.conf``, where ``ceph`` reflects the cluster name. ::
|
||||
|
||||
sudo vim /etc/ceph/ceph.conf
|
||||
|
||||
|
||||
#. Generate a unique ID (i.e., ``fsid``) for your cluster. ::
|
||||
|
||||
uuidgen
|
||||
|
||||
|
||||
#. Add the unique ID to your Ceph configuration file. ::
|
||||
|
||||
fsid = {UUID}
|
||||
|
||||
For example::
|
||||
|
||||
fsid = a7f64266-0894-4f1e-a635-d0aeaca0e993
|
||||
|
||||
|
||||
#. Add the initial monitor(s) to your Ceph configuration file. ::
|
||||
|
||||
mon initial members = {hostname}[,{hostname}]
|
||||
|
||||
For example::
|
||||
|
||||
mon initial members = node1
|
||||
|
||||
|
||||
#. Add the IP address(es) of the initial monitor(s) to your Ceph configuration
|
||||
file and save the file. ::
|
||||
|
||||
mon host = {ip-address}[,{ip-address}]
|
||||
|
||||
For example::
|
||||
|
||||
mon host = 192.168.0.1
|
||||
|
||||
**Note:** You may use IPv6 addresses instead of IPv4 addresses, but
|
||||
you must set ``ms bind ipv6`` to ``true``. See `Network Configuration
|
||||
Reference`_ for details about network configuration.
|
||||
|
||||
#. Create a keyring for your cluster and generate a monitor secret key. ::
|
||||
|
||||
ceph-authtool --create-keyring /tmp/ceph.mon.keyring --gen-key -n mon. --cap mon 'allow *'
|
||||
|
||||
|
||||
#. Generate an administrator keyring, generate a ``client.admin`` user and add
|
||||
the user to the keyring. ::
|
||||
|
||||
sudo ceph-authtool --create-keyring /etc/ceph/ceph.client.admin.keyring --gen-key -n client.admin --set-uid=0 --cap mon 'allow *' --cap osd 'allow *' --cap mds 'allow'
|
||||
|
||||
|
||||
#. Add the ``client.admin`` key to the ``ceph.mon.keyring``. ::
|
||||
|
||||
ceph-authtool /tmp/ceph.mon.keyring --import-keyring /etc/ceph/ceph.client.admin.keyring
|
||||
|
||||
|
||||
#. Generate a monitor map using the hostname(s), host IP address(es) and the FSID.
|
||||
Save it as ``/tmp/monmap``::
|
||||
|
||||
monmaptool --create --add {hostname} {ip-address} --fsid {uuid} /tmp/monmap
|
||||
|
||||
For example::
|
||||
|
||||
monmaptool --create --add node1 192.168.0.1 --fsid a7f64266-0894-4f1e-a635-d0aeaca0e993 /tmp/monmap
|
||||
|
||||
|
||||
#. Create a default data directory (or directories) on the monitor host(s). ::
|
||||
|
||||
sudo mkdir /var/lib/ceph/mon/{cluster-name}-{hostname}
|
||||
|
||||
For example::
|
||||
|
||||
sudo mkdir /var/lib/ceph/mon/ceph-node1
|
||||
|
||||
See `Monitor Config Reference - Data`_ for details.
|
||||
|
||||
#. Populate the monitor daemon(s) with the monitor map and keyring. ::
|
||||
|
||||
sudo -u ceph ceph-mon [--cluster {cluster-name}] --mkfs -i {hostname} --monmap /tmp/monmap --keyring /tmp/ceph.mon.keyring
|
||||
|
||||
For example::
|
||||
|
||||
sudo -u ceph ceph-mon --mkfs -i node1 --monmap /tmp/monmap --keyring /tmp/ceph.mon.keyring
|
||||
|
||||
|
||||
#. Consider settings for a Ceph configuration file. Common settings include
|
||||
the following::
|
||||
|
||||
[global]
|
||||
fsid = {cluster-id}
|
||||
mon initial members = {hostname}[, {hostname}]
|
||||
mon host = {ip-address}[, {ip-address}]
|
||||
public network = {network}[, {network}]
|
||||
cluster network = {network}[, {network}]
|
||||
auth cluster required = cephx
|
||||
auth service required = cephx
|
||||
auth client required = cephx
|
||||
osd journal size = {n}
|
||||
osd pool default size = {n} # Write an object n times.
|
||||
osd pool default min size = {n} # Allow writing n copy in a degraded state.
|
||||
osd pool default pg num = {n}
|
||||
osd pool default pgp num = {n}
|
||||
osd crush chooseleaf type = {n}
|
||||
|
||||
In the foregoing example, the ``[global]`` section of the configuration might
|
||||
look like this::
|
||||
|
||||
[global]
|
||||
fsid = a7f64266-0894-4f1e-a635-d0aeaca0e993
|
||||
mon initial members = node1
|
||||
mon host = 192.168.0.1
|
||||
public network = 192.168.0.0/24
|
||||
auth cluster required = cephx
|
||||
auth service required = cephx
|
||||
auth client required = cephx
|
||||
osd journal size = 1024
|
||||
osd pool default size = 2
|
||||
osd pool default min size = 1
|
||||
osd pool default pg num = 333
|
||||
osd pool default pgp num = 333
|
||||
osd crush chooseleaf type = 1
|
||||
|
||||
#. Touch the ``done`` file.
|
||||
|
||||
Mark that the monitor is created and ready to be started::
|
||||
|
||||
sudo touch /var/lib/ceph/mon/ceph-node1/done
|
||||
|
||||
#. And for FreeBSD an entry for every monitor needs to be added to the config
|
||||
file. (The requirement will be removed in future releases).
|
||||
|
||||
The entry should look like::
|
||||
|
||||
[mon]
|
||||
[mon.node1]
|
||||
host = node1 # this name can be resolve
|
||||
|
||||
|
||||
#. Start the monitor(s).
|
||||
|
||||
For Ubuntu, use Upstart::
|
||||
|
||||
sudo start ceph-mon id=node1 [cluster={cluster-name}]
|
||||
|
||||
In this case, to allow the start of the daemon at each reboot you
|
||||
must create two empty files like this::
|
||||
|
||||
sudo touch /var/lib/ceph/mon/{cluster-name}-{hostname}/upstart
|
||||
|
||||
For example::
|
||||
|
||||
sudo touch /var/lib/ceph/mon/ceph-node1/upstart
|
||||
|
||||
For Debian/CentOS/RHEL, use sysvinit::
|
||||
|
||||
sudo /etc/init.d/ceph start mon.node1
|
||||
|
||||
For FreeBSD we use the rc.d init scripts (called bsdrc in Ceph)::
|
||||
|
||||
sudo service ceph start start mon.node1
|
||||
|
||||
For this to work /etc/rc.conf also needs the entry to enable ceph::
|
||||
cat 'ceph_enable="YES"' >> /etc/rc.conf
|
||||
|
||||
|
||||
#. Verify that Ceph created the default pools. ::
|
||||
|
||||
ceph osd lspools
|
||||
|
||||
You should see output like this::
|
||||
|
||||
0 data,1 metadata,2 rbd,
|
||||
|
||||
|
||||
#. Verify that the monitor is running. ::
|
||||
|
||||
ceph -s
|
||||
|
||||
You should see output that the monitor you started is up and running, and
|
||||
you should see a health error indicating that placement groups are stuck
|
||||
inactive. It should look something like this::
|
||||
|
||||
cluster a7f64266-0894-4f1e-a635-d0aeaca0e993
|
||||
health HEALTH_ERR 192 pgs stuck inactive; 192 pgs stuck unclean; no osds
|
||||
monmap e1: 1 mons at {node1=192.168.0.1:6789/0}, election epoch 1, quorum 0 node1
|
||||
osdmap e1: 0 osds: 0 up, 0 in
|
||||
pgmap v2: 192 pgs, 3 pools, 0 bytes data, 0 objects
|
||||
0 kB used, 0 kB / 0 kB avail
|
||||
192 creating
|
||||
|
||||
**Note:** Once you add OSDs and start them, the placement group health errors
|
||||
should disappear. See the next section for details.
|
||||
|
||||
|
||||
Adding OSDs
|
||||
===========
|
||||
|
||||
Once you have your initial monitor(s) running, you should add OSDs. Your cluster
|
||||
cannot reach an ``active + clean`` state until you have enough OSDs to handle the
|
||||
number of copies of an object (e.g., ``osd pool default size = 2`` requires at
|
||||
least two OSDs). After bootstrapping your monitor, your cluster has a default
|
||||
CRUSH map; however, the CRUSH map doesn't have any Ceph OSD Daemons mapped to
|
||||
a Ceph Node.
|
||||
|
||||
|
||||
Short Form
|
||||
----------
|
||||
|
||||
Ceph provides the ``ceph-disk`` utility, which can prepare a disk, partition or
|
||||
directory for use with Ceph. The ``ceph-disk`` utility creates the OSD ID by
|
||||
incrementing the index. Additionally, ``ceph-disk`` will add the new OSD to the
|
||||
CRUSH map under the host for you. Execute ``ceph-disk -h`` for CLI details.
|
||||
The ``ceph-disk`` utility automates the steps of the `Long Form`_ below. To
|
||||
create the first two OSDs with the short form procedure, execute the following
|
||||
on ``node2`` and ``node3``:
|
||||
|
||||
|
||||
#. Prepare the OSD. ::
|
||||
|
||||
On FreeBSD only existing directories can be use to create OSDs in::
|
||||
|
||||
|
||||
ssh {node-name}
|
||||
sudo ceph-disk prepare --cluster {cluster-name} --cluster-uuid {uuid} {path-to-ceph-osd-directory}
|
||||
|
||||
For example::
|
||||
|
||||
ssh node1
|
||||
sudo ceph-disk prepare --cluster ceph --cluster-uuid a7f64266-0894-4f1e-a635-d0aeaca0e993 /var/lib/ceph/osd/osd.1
|
||||
|
||||
|
||||
#. Activate the OSD::
|
||||
|
||||
sudo ceph-disk activate {data-path} [--activate-key {path}]
|
||||
|
||||
For example::
|
||||
|
||||
sudo ceph-disk activate /var/lib/ceph/osd/osd.1
|
||||
|
||||
**Note:** Use the ``--activate-key`` argument if you do not have a copy
|
||||
of ``/var/lib/ceph/bootstrap-osd/{cluster}.keyring`` on the Ceph Node.
|
||||
|
||||
FreeBSD does not auto start the OSDs, but also requires a entry in
|
||||
``ceph.conf``. One for each OSD::
|
||||
|
||||
[osd]
|
||||
[osd.1]
|
||||
host = node1 # this name can be resolve
|
||||
|
||||
|
||||
Long Form
|
||||
---------
|
||||
|
||||
Without the benefit of any helper utilities, create an OSD and add it to the
|
||||
cluster and CRUSH map with the following procedure. To create the first two
|
||||
OSDs with the long form procedure, execute the following on ``node2`` and
|
||||
``node3``:
|
||||
|
||||
#. Connect to the OSD host. ::
|
||||
|
||||
ssh {node-name}
|
||||
|
||||
#. Generate a UUID for the OSD. ::
|
||||
|
||||
uuidgen
|
||||
|
||||
|
||||
#. Create the OSD. If no UUID is given, it will be set automatically when the
|
||||
OSD starts up. The following command will output the OSD number, which you
|
||||
will need for subsequent steps. ::
|
||||
|
||||
ceph osd create [{uuid} [{id}]]
|
||||
|
||||
|
||||
#. Create the default directory on your new OSD. ::
|
||||
|
||||
ssh {new-osd-host}
|
||||
sudo mkdir /var/lib/ceph/osd/{cluster-name}-{osd-number}
|
||||
|
||||
Above are the ZFS instructions to do this for FreeBSD.
|
||||
|
||||
|
||||
#. If the OSD is for a drive other than the OS drive, prepare it
|
||||
for use with Ceph, and mount it to the directory you just created.
|
||||
|
||||
|
||||
#. Initialize the OSD data directory. ::
|
||||
|
||||
ssh {new-osd-host}
|
||||
sudo ceph-osd -i {osd-num} --mkfs --mkkey --osd-uuid [{uuid}]
|
||||
|
||||
The directory must be empty before you can run ``ceph-osd`` with the
|
||||
``--mkkey`` option. In addition, the ceph-osd tool requires specification
|
||||
of custom cluster names with the ``--cluster`` option.
|
||||
|
||||
|
||||
#. Register the OSD authentication key. The value of ``ceph`` for
|
||||
``ceph-{osd-num}`` in the path is the ``$cluster-$id``. If your
|
||||
cluster name differs from ``ceph``, use your cluster name instead.::
|
||||
|
||||
sudo ceph auth add osd.{osd-num} osd 'allow *' mon 'allow profile osd' -i /var/lib/ceph/osd/{cluster-name}-{osd-num}/keyring
|
||||
|
||||
|
||||
#. Add your Ceph Node to the CRUSH map. ::
|
||||
|
||||
ceph [--cluster {cluster-name}] osd crush add-bucket {hostname} host
|
||||
|
||||
For example::
|
||||
|
||||
ceph osd crush add-bucket node1 host
|
||||
|
||||
|
||||
#. Place the Ceph Node under the root ``default``. ::
|
||||
|
||||
ceph osd crush move node1 root=default
|
||||
|
||||
|
||||
#. Add the OSD to the CRUSH map so that it can begin receiving data. You may
|
||||
also decompile the CRUSH map, add the OSD to the device list, add the host as a
|
||||
bucket (if it's not already in the CRUSH map), add the device as an item in the
|
||||
host, assign it a weight, recompile it and set it. ::
|
||||
|
||||
ceph [--cluster {cluster-name}] osd crush add {id-or-name} {weight} [{bucket-type}={bucket-name} ...]
|
||||
|
||||
For example::
|
||||
|
||||
ceph osd crush add osd.0 1.0 host=node1
|
||||
|
||||
|
||||
#. After you add an OSD to Ceph, the OSD is in your configuration. However,
|
||||
it is not yet running. The OSD is ``down`` and ``in``. You must start
|
||||
your new OSD before it can begin receiving data.
|
||||
|
||||
For Ubuntu, use Upstart::
|
||||
|
||||
sudo start ceph-osd id={osd-num} [cluster={cluster-name}]
|
||||
|
||||
For example::
|
||||
|
||||
sudo start ceph-osd id=0
|
||||
sudo start ceph-osd id=1
|
||||
|
||||
For Debian/CentOS/RHEL, use sysvinit::
|
||||
|
||||
sudo /etc/init.d/ceph start osd.{osd-num} [--cluster {cluster-name}]
|
||||
|
||||
For example::
|
||||
|
||||
sudo /etc/init.d/ceph start osd.0
|
||||
sudo /etc/init.d/ceph start osd.1
|
||||
|
||||
In this case, to allow the start of the daemon at each reboot you
|
||||
must create an empty file like this::
|
||||
|
||||
sudo touch /var/lib/ceph/osd/{cluster-name}-{osd-num}/sysvinit
|
||||
|
||||
For example::
|
||||
|
||||
sudo touch /var/lib/ceph/osd/ceph-0/sysvinit
|
||||
sudo touch /var/lib/ceph/osd/ceph-1/sysvinit
|
||||
|
||||
Once you start your OSD, it is ``up`` and ``in``.
|
||||
|
||||
For FreeBSD using rc.d init::
|
||||
|
||||
After adding the OSD to ``ceph.conf``
|
||||
|
||||
sudo service ceph start osd.{osd-num}
|
||||
|
||||
For example::
|
||||
|
||||
sudo service ceph start osd.0
|
||||
sudo service ceph start osd.1
|
||||
|
||||
In this case, to allow the start of the daemon at each reboot you
|
||||
must create an empty file like this::
|
||||
|
||||
sudo touch /var/lib/ceph/osd/{cluster-name}-{osd-num}/bsdrc
|
||||
|
||||
For example::
|
||||
|
||||
sudo touch /var/lib/ceph/osd/ceph-0/bsdrc
|
||||
sudo touch /var/lib/ceph/osd/ceph-1/bsdrc
|
||||
|
||||
Once you start your OSD, it is ``up`` and ``in``.
|
||||
|
||||
|
||||
|
||||
Adding MDS
|
||||
==========
|
||||
|
||||
In the below instructions, ``{id}`` is an arbitrary name, such as the hostname of the machine.
|
||||
|
||||
#. Create the mds data directory.::
|
||||
|
||||
mkdir -p /var/lib/ceph/mds/{cluster-name}-{id}
|
||||
|
||||
#. Create a keyring.::
|
||||
|
||||
ceph-authtool --create-keyring /var/lib/ceph/mds/{cluster-name}-{id}/keyring --gen-key -n mds.{id}
|
||||
|
||||
#. Import the keyring and set caps.::
|
||||
|
||||
ceph auth add mds.{id} osd "allow rwx" mds "allow" mon "allow profile mds" -i /var/lib/ceph/mds/{cluster}-{id}/keyring
|
||||
|
||||
#. Add to ceph.conf.::
|
||||
|
||||
[mds.{id}]
|
||||
host = {id}
|
||||
|
||||
#. Start the daemon the manual way.::
|
||||
|
||||
ceph-mds --cluster {cluster-name} -i {id} -m {mon-hostname}:{mon-port} [-f]
|
||||
|
||||
#. Start the daemon the right way (using ceph.conf entry).::
|
||||
|
||||
service ceph start
|
||||
|
||||
#. If starting the daemon fails with this error::
|
||||
|
||||
mds.-1.0 ERROR: failed to authenticate: (22) Invalid argument
|
||||
|
||||
Then make sure you do not have a keyring set in ceph.conf in the global section; move it to the client section; or add a keyring setting specific to this mds daemon. And verify that you see the same key in the mds data directory and ``ceph auth get mds.{id}`` output.
|
||||
|
||||
#. Now you are ready to `create a Ceph filesystem`_.
|
||||
|
||||
|
||||
Summary
|
||||
=======
|
||||
|
||||
Once you have your monitor and two OSDs up and running, you can watch the
|
||||
placement groups peer by executing the following::
|
||||
|
||||
ceph -w
|
||||
|
||||
To view the tree, execute the following::
|
||||
|
||||
ceph osd tree
|
||||
|
||||
You should see output that looks something like this::
|
||||
|
||||
# id weight type name up/down reweight
|
||||
-1 2 root default
|
||||
-2 2 host node1
|
||||
0 1 osd.0 up 1
|
||||
-3 1 host node2
|
||||
1 1 osd.1 up 1
|
||||
|
||||
To add (or remove) additional monitors, see `Add/Remove Monitors`_.
|
||||
To add (or remove) additional Ceph OSD Daemons, see `Add/Remove OSDs`_.
|
||||
|
||||
|
||||
.. _federated architecture: ../../radosgw/federated-config
|
||||
.. _Installation (Quick): ../../start
|
||||
.. _Add/Remove Monitors: ../../rados/operations/add-or-rm-mons
|
||||
.. _Add/Remove OSDs: ../../rados/operations/add-or-rm-osds
|
||||
.. _Network Configuration Reference: ../../rados/configuration/network-config-ref
|
||||
.. _Monitor Config Reference - Data: ../../rados/configuration/mon-config-ref#data
|
||||
.. _create a Ceph filesystem: ../../cephfs/createfs
|
@ -24,6 +24,7 @@ These mirrors are available on the following locations:
|
||||
- **UK: UK**: http://uk.ceph.com
|
||||
- **US-East: US East Coast**: http://us-east.ceph.com/
|
||||
- **US-West: US West Coast**: http://us-west.ceph.com/
|
||||
- **CN: China**: http://cn.ceph.com/
|
||||
|
||||
You can replace all download.ceph.com URLs with any of the mirrors, for example:
|
||||
|
||||
|
@ -41,6 +41,17 @@ Options
|
||||
|
||||
Display additional information for debugging.
|
||||
|
||||
Bugs
|
||||
====
|
||||
|
||||
:program:`ceph-detect-init` is used by :program:`ceph-disk` to figure out the init system to manage the mount directory of an OSD. But only following combinations are fully tested:
|
||||
|
||||
- `upstart` on `Ubuntu 14.04`
|
||||
- `systemd` on `Ubuntu 15.04` and up
|
||||
- `systemd` on `Debian 8` and up
|
||||
- `systemd` on `RHEL/CentOS 7` and up
|
||||
- `systemd` on `Fedora 22` and up
|
||||
|
||||
Availability
|
||||
============
|
||||
|
||||
|
@ -78,6 +78,11 @@ the subcommands ``deactivate`` and ``destroy``.
|
||||
The documentation for each subcommand (prepare, activate, etc.) can be displayed
|
||||
with its ``--help`` option. For instance ``ceph-disk prepare --help``.
|
||||
|
||||
Bugs
|
||||
====
|
||||
|
||||
See also the ``Bugs`` section in :doc:`ceph-detect-init <ceph-detect-init>`\(8).
|
||||
|
||||
Availability
|
||||
============
|
||||
|
||||
@ -87,5 +92,6 @@ the Ceph documentation at http://ceph.com/docs for more information.
|
||||
See also
|
||||
========
|
||||
|
||||
:doc:`ceph-detect-init <ceph-detect-init>`\(8)
|
||||
:doc:`ceph-osd <ceph-osd>`\(8),
|
||||
:doc:`ceph-deploy <ceph-deploy>`\(8)
|
||||
|
@ -10,7 +10,7 @@ Synopsis
|
||||
========
|
||||
|
||||
| **ceph-osd** -i *osdnum* [ --osd-data *datapath* ] [ --osd-journal
|
||||
*journal* ] [ --mkfs ] [ --mkjournal ] [ --mkkey ]
|
||||
*journal* ] [ --mkfs ] [ --mkjournal ] [--flush-journal] [--check-allows-journal] [--check-wants-journal] [--check-needs-journal] [ --mkkey ]
|
||||
|
||||
|
||||
Description
|
||||
@ -57,6 +57,18 @@ Options
|
||||
|
||||
Journal updates to *journal*.
|
||||
|
||||
.. option:: --check-wants-journal
|
||||
|
||||
Check whether a journal is desired.
|
||||
|
||||
.. option:: --check-allows-journal
|
||||
|
||||
Check whether a journal is allowed.
|
||||
|
||||
.. option:: --check-needs-journal
|
||||
|
||||
Check whether a journal is required.
|
||||
|
||||
.. option:: --mkfs
|
||||
|
||||
Create an empty object repository. This also initializes the journal
|
||||
|
@ -39,7 +39,7 @@ Synopsis
|
||||
|
||||
| **ceph** **mon_status**
|
||||
|
||||
| **ceph** **osd** [ *blacklist* \| *blocked-by* \| *create* \| *deep-scrub* \| *df* \| *down* \| *dump* \| *erasure-code-profile* \| *find* \| *getcrushmap* \| *getmap* \| *getmaxosd* \| *in* \| *lspools* \| *map* \| *metadata* \| *out* \| *pause* \| *perf* \| *pg-temp* \| *primary-affinity* \| *primary-temp* \| *repair* \| *reweight* \| *reweight-by-pg* \| *rm* \| *scrub* \| *set* \| *setcrushmap* \| *setmaxosd* \| *stat* \| *tree* \| *unpause* \| *unset* ] ...
|
||||
| **ceph** **osd** [ *blacklist* \| *blocked-by* \| *create* \| *new* \| *deep-scrub* \| *df* \| *down* \| *dump* \| *erasure-code-profile* \| *find* \| *getcrushmap* \| *getmap* \| *getmaxosd* \| *in* \| *lspools* \| *map* \| *metadata* \| *out* \| *pause* \| *perf* \| *pg-temp* \| *primary-affinity* \| *primary-temp* \| *repair* \| *reweight* \| *reweight-by-pg* \| *rm* \| *destroy* \| *purge* \| *scrub* \| *set* \| *setcrushmap* \| *setmaxosd* \| *stat* \| *tree* \| *unpause* \| *unset* ] ...
|
||||
|
||||
| **ceph** **osd** **crush** [ *add* \| *add-bucket* \| *create-or-move* \| *dump* \| *get-tunable* \| *link* \| *move* \| *remove* \| *rename-bucket* \| *reweight* \| *reweight-all* \| *reweight-subtree* \| *rm* \| *rule* \| *set* \| *set-tunable* \| *show-tunables* \| *tunables* \| *unlink* ] ...
|
||||
|
||||
@ -478,10 +478,40 @@ Usage::
|
||||
|
||||
Subcommand ``create`` creates new osd (with optional UUID and ID).
|
||||
|
||||
This command is DEPRECATED as of the Luminous release, and will be removed in
|
||||
a future release.
|
||||
|
||||
Subcommand ``new`` should instead be used.
|
||||
|
||||
Usage::
|
||||
|
||||
ceph osd create {<uuid>} {<id>}
|
||||
|
||||
Subcommand ``new`` reuses a previously destroyed OSD *id*. The new OSD will
|
||||
have the specified *uuid*, and the command expects a JSON file containing
|
||||
the base64 cephx key for auth entity *client.osd.<id>*, as well as optional
|
||||
base64 cepx key for dm-crypt lockbox access and a dm-crypt key. Specifying
|
||||
a dm-crypt requires specifying the accompanying lockbox cephx key.
|
||||
|
||||
Usage::
|
||||
|
||||
ceph osd new {<id>} {<uuid>} -i {<secrets.json>}
|
||||
|
||||
The secrets JSON file is expected to maintain a form of the following format::
|
||||
|
||||
{
|
||||
"cephx_secret": "AQBWtwhZdBO5ExAAIDyjK2Bh16ZXylmzgYYEjg=="
|
||||
}
|
||||
|
||||
Or::
|
||||
|
||||
{
|
||||
"cephx_secret": "AQBWtwhZdBO5ExAAIDyjK2Bh16ZXylmzgYYEjg==",
|
||||
"cephx_lockbox_secret": "AQDNCglZuaeVCRAAYr76PzR1Anh7A0jswkODIQ==",
|
||||
"dmcrypt_key": "<dm-crypt key>"
|
||||
}
|
||||
|
||||
|
||||
Subcommand ``crush`` is used for CRUSH management. It uses some additional
|
||||
subcommands.
|
||||
|
||||
@ -938,6 +968,29 @@ Usage::
|
||||
|
||||
ceph osd rm <ids> [<ids>...]
|
||||
|
||||
Subcommand ``destroy`` marks OSD *id* as *destroyed*, removing its cephx
|
||||
entity's keys and all of its dm-crypt and daemon-private config key
|
||||
entries.
|
||||
|
||||
This command will not remove the OSD from crush, nor will it remove the
|
||||
OSD from the OSD map. Instead, once the command successfully completes,
|
||||
the OSD will show marked as *destroyed*.
|
||||
|
||||
In order to mark an OSD as destroyed, the OSD must first be marked as
|
||||
**lost**.
|
||||
|
||||
Usage::
|
||||
|
||||
ceph osd destroy <id> {--yes-i-really-mean-it}
|
||||
|
||||
|
||||
Subcommand ``purge`` performs a combination of ``osd destroy``,
|
||||
``osd rm`` and ``osd crush remove``.
|
||||
|
||||
Usage::
|
||||
|
||||
ceph osd purge <id> {--yes-i-really-mean-it}
|
||||
|
||||
Subcommand ``scrub`` initiates scrub on specified osd.
|
||||
|
||||
Usage::
|
||||
@ -1265,6 +1318,13 @@ Usage::
|
||||
|
||||
ceph tell <name (type.id)> <args> [<args>...]
|
||||
|
||||
|
||||
List all available commands.
|
||||
|
||||
Usage::
|
||||
|
||||
ceph tell <name (type.id)> help
|
||||
|
||||
version
|
||||
-------
|
||||
|
||||
|
@ -138,6 +138,18 @@ Pool specific commands
|
||||
|
||||
:command:`cleanup`
|
||||
|
||||
:command:`listxattr` *name*
|
||||
List all extended attributes of an object.
|
||||
|
||||
:command:`getxattr` *name* *attr*
|
||||
Dump the extended attribute value of *attr* of an object.
|
||||
|
||||
:command:`setxattr` *name* *attr* *value*
|
||||
Set the value of *attr* in the extended attributes of an object.
|
||||
|
||||
:command:`rmxattr` *name* *attr*
|
||||
Remove *attr* from the extended attributes of an object.
|
||||
|
||||
:command:`listomapkeys` *name*
|
||||
List all the keys stored in the object map of object name.
|
||||
|
||||
|
@ -2,12 +2,16 @@
|
||||
ceph-mgr administrator's guide
|
||||
==============================
|
||||
|
||||
Setup
|
||||
-----
|
||||
Manual setup
|
||||
------------
|
||||
|
||||
Create an authentication key for your daemon::
|
||||
Usually, you would set up a ceph-mgr daemon using a tool such
|
||||
as ceph-ansible. These instructions describe how to set up
|
||||
a ceph-mgr daemon manually.
|
||||
|
||||
ceph auth get-or-create mgr.$name mon 'allow *'
|
||||
First, create an authentication key for your daemon::
|
||||
|
||||
ceph auth get-or-create mgr.$name mon 'allow profile mgr' osd 'allow *' mds 'allow *'
|
||||
|
||||
Place that key into ``mgr data`` path, which for a cluster "ceph"
|
||||
and mgr $name "foo" would be ``/var/lib/ceph/mgr/ceph-foo``.
|
||||
@ -21,6 +25,15 @@ of ``ceph status``, which should now include a mgr status line::
|
||||
|
||||
mgr active: $name
|
||||
|
||||
Client authentication
|
||||
---------------------
|
||||
The manager is a new daemon which requires new CephX capabilities. If you upgrade
|
||||
a cluster from an old version of Ceph, or use the default install/deploy tools,
|
||||
your admin client should get this capability automatically. If you use tooling from
|
||||
elsewhere, you may get EACCES errors when invoking certain ceph cluster commands.
|
||||
To fix that, add a "mgr allow *" stanza to your client's cephx capabilities by
|
||||
`Modifying User Capabilities`_.
|
||||
|
||||
High availability
|
||||
-----------------
|
||||
|
||||
@ -75,9 +88,10 @@ OPTION(mgr_module_path, OPT_STR, CEPH_PKGLIBDIR "/mgr") // where to load python
|
||||
:Type: String
|
||||
:Default: ``"/var/lib/ceph/mgr/$cluster-$id"``
|
||||
|
||||
``mgr beacon period``
|
||||
``mgr tick period``
|
||||
|
||||
:Description: How many seconds between mgr beacons to monitors
|
||||
:Description: How many seconds between mgr beacons to monitors, and other
|
||||
periodic checks.
|
||||
:Type: Integer
|
||||
:Default: ``5``
|
||||
|
||||
@ -87,3 +101,4 @@ OPTION(mgr_module_path, OPT_STR, CEPH_PKGLIBDIR "/mgr") // where to load python
|
||||
:Type: Integer
|
||||
:Default: ``30``
|
||||
|
||||
.. _Modifying User Capabilities: ../rados/operations/user-management#modify-user-capabilities
|
||||
|
15
ceph/doc/mgr/dashboard.rst
Normal file
15
ceph/doc/mgr/dashboard.rst
Normal file
@ -0,0 +1,15 @@
|
||||
dashboard plugin
|
||||
================
|
||||
|
||||
Dashboard plugin visualizes the statistics of the cluster using a web server
|
||||
hosted by ``ceph-mgr``. Like most web applications, dashboard binds to a host
|
||||
name and port. Since each ``ceph-mgr`` hosts its own instance of dashboard, we
|
||||
need to configure them separately. The hostname and port are stored using the
|
||||
configuration key facility. So we can configure them like::
|
||||
|
||||
ceph config-key put mgr/dashboard/$name/server_addr $IP
|
||||
ceph config-key put mgr/dashboard/$name/server_port $PORT
|
||||
|
||||
where ``$name`` is the ID of the ceph-mgr who is hosting this dashboard web app.
|
||||
If they are not configured, the web app will be bound to ``127.0.0.1:7000``.
|
||||
|
@ -8,9 +8,25 @@ The :term:`Ceph Manager` daemon (ceph-mgr) runs alongside monitor daemons,
|
||||
to provide additional monitoring and interfaces to external monitoring
|
||||
and management systems.
|
||||
|
||||
Since the 12.x (*luminous*) Ceph release, the ceph-mgr daemon is required for
|
||||
normal operations. The ceph-mgr daemon is an optional component in
|
||||
the 11.x (*kraken*) Ceph release.
|
||||
|
||||
By default, the manager daemon requires no additional configuration, beyond
|
||||
ensuring it is running. If there is no mgr daemon running, you will
|
||||
see a health warning to that effect, and some of the other information
|
||||
in the output of `ceph status` will be missing or stale until a mgr is started.
|
||||
|
||||
Use your normal deployment tools, such as ceph-ansible or ceph-deploy, to
|
||||
set up ceph-mgr daemons on each of your mon nodes. It is not mandatory
|
||||
to place mgr daemons on the same nodes as mons, but it is almost always
|
||||
sensible.
|
||||
|
||||
.. toctree::
|
||||
:maxdepth: 1
|
||||
|
||||
Installation and Configuration <administrator>
|
||||
Dashboard <dashboard>
|
||||
RESTful <restful>
|
||||
Writing plugins <plugins>
|
||||
|
||||
|
25
ceph/doc/mgr/restful.rst
Normal file
25
ceph/doc/mgr/restful.rst
Normal file
@ -0,0 +1,25 @@
|
||||
restful plugin
|
||||
==============
|
||||
|
||||
RESTful plugin offers the REST API access to the status of the cluster. RESTful
|
||||
plugin enables you to secure the API endpoints via SSL. If you don't have a
|
||||
security certificate and key already, you need to create them first::
|
||||
|
||||
openssl req -new -nodes -x509 \
|
||||
-subj "/O=IT/CN=ceph-mgr-restful" \
|
||||
-days 3650 -keyout $PKEY -out $CERT -extensions v3_ca
|
||||
|
||||
where ``$PKEY`` and ``$CERT`` are the paths to the private key and the
|
||||
certificate. And then you need to import the keystore to the cluster using the
|
||||
configuration key facility, so RESTful plugin can read them at startup::
|
||||
|
||||
ceph config-key put mgr/restful/$name/crt -i $CERT
|
||||
ceph config-key put mgr/restful/$name/key -i $PKEY
|
||||
|
||||
Also, like other web applications, RESTful plugin is bound to the a hostname and
|
||||
a port::
|
||||
|
||||
ceph config-key put mgr/restful/$name/server_addr $IP
|
||||
ceph config-key put mgr/restful/$name/server_port $PORT
|
||||
|
||||
If not specified, the plugin uses ``127.0.0.1:8003`` by default.
|
@ -192,7 +192,7 @@ The following settings provide limits on the size of filestore queue.
|
||||
:Description: Defines the maximum number of in progress operations the file store accepts before blocking on queuing new operations.
|
||||
:Type: Integer
|
||||
:Required: No. Minimal impact on performance.
|
||||
:Default: ``500``
|
||||
:Default: ``50``
|
||||
|
||||
|
||||
``filestore queue max bytes``
|
||||
@ -203,20 +203,6 @@ The following settings provide limits on the size of filestore queue.
|
||||
:Default: ``100 << 20``
|
||||
|
||||
|
||||
``filestore queue committing max ops``
|
||||
|
||||
:Description: The maximum number of operations the filestore can commit.
|
||||
:Type: Integer
|
||||
:Required: No
|
||||
:Default: ``500``
|
||||
|
||||
|
||||
``filestore queue committing max bytes``
|
||||
|
||||
:Description: The maximum number of bytes the filestore can commit.
|
||||
:Type: Integer
|
||||
:Required: No
|
||||
:Default: ``100 << 20``
|
||||
|
||||
|
||||
.. index:: filestore; timeouts
|
||||
|
@ -106,6 +106,11 @@ A consensus requires a majority of monitors running to establish a quorum for
|
||||
consensus about the cluster map (e.g., 1; 2 out of 3; 3 out of 5; 4 out of 6;
|
||||
etc.).
|
||||
|
||||
``mon force quorum join``
|
||||
|
||||
:Description: Force monitor to join quorum even if it has been previously removed from the map
|
||||
:Type: Boolean
|
||||
:Default: ``False``
|
||||
|
||||
.. index:: Ceph Monitor; consistency
|
||||
|
||||
@ -314,6 +319,126 @@ by setting it in the ``[mon]`` section of the configuration file.
|
||||
:Default: ``/var/lib/ceph/mon/$cluster-$id``
|
||||
|
||||
|
||||
``mon data size warn``
|
||||
|
||||
:Description: Issue a ``HEALTH_WARN`` in cluster log when the monitor's data
|
||||
store goes over 15GB.
|
||||
:Type: Integer
|
||||
:Default: 15*1024*1024*1024*
|
||||
|
||||
|
||||
``mon data avail warn``
|
||||
|
||||
:Description: Issue a ``HEALTH_WARN`` in cluster log when the available disk
|
||||
space of monitor's data store is lower or equal to this
|
||||
percentage.
|
||||
:Type: Integer
|
||||
:Default: 30
|
||||
|
||||
|
||||
``mon data avail crit``
|
||||
|
||||
:Description: Issue a ``HEALTH_ERR`` in cluster log when the available disk
|
||||
space of monitor's data store is lower or equal to this
|
||||
percentage.
|
||||
:Type: Integer
|
||||
:Default: 5
|
||||
|
||||
|
||||
``mon warn on cache pools without hit sets``
|
||||
|
||||
:Description: Issue a ``HEALTH_WARN`` in cluster log if a cache pool does not
|
||||
have the hitset type set set.
|
||||
See `hit set type <../operations/pools#hit-set-type>`_ for more
|
||||
details.
|
||||
:Type: Boolean
|
||||
:Default: True
|
||||
|
||||
|
||||
``mon warn on crush straw calc version zero``
|
||||
|
||||
:Description: Issue a ``HEALTH_WARN`` in cluster log if the CRUSH's
|
||||
``straw_calc_version`` is zero. See
|
||||
`CRUSH map tunables <../operations/crush-map#tunables>`_ for
|
||||
details.
|
||||
:Type: Boolean
|
||||
:Default: True
|
||||
|
||||
|
||||
``mon warn on legacy crush tunables``
|
||||
|
||||
:Description: Issue a ``HEALTH_WARN`` in cluster log if
|
||||
CRUSH tunables are too old (older than ``mon_min_crush_required_version``)
|
||||
:Type: Boolean
|
||||
:Default: True
|
||||
|
||||
|
||||
``mon crush min required version``
|
||||
|
||||
:Description: The minimum tunable profile version required by the cluster.
|
||||
See
|
||||
`CRUSH map tunables <../operations/crush-map#tunables>`_ for
|
||||
details.
|
||||
:Type: String
|
||||
:Default: ``firefly``
|
||||
|
||||
|
||||
``mon warn on osd down out interval zero``
|
||||
|
||||
:Description: Issue a ``HEALTH_WARN`` in cluster log if
|
||||
``mon osd down out interval`` is zero. Having this option set to
|
||||
zero on the leader acts much like the ``noout`` flag. It's hard
|
||||
to figure out what's going wrong with clusters witout the
|
||||
``noout`` flag set but acting like that just the same, so we
|
||||
report a warning in this case.
|
||||
:Type: Boolean
|
||||
:Default: True
|
||||
|
||||
|
||||
``mon cache target full warn ratio``
|
||||
|
||||
:Description: Position between pool's ``cache_target_full`` and
|
||||
``target_max_object`` where we start warning
|
||||
:Type: Float
|
||||
:Default: ``0.66``
|
||||
|
||||
|
||||
``mon health data update interval``
|
||||
|
||||
:Description: How often (in seconds) the monitor in quorum shares its health
|
||||
status with its peers. (negative number disables it)
|
||||
:Type: Float
|
||||
:Default: ``60``
|
||||
|
||||
|
||||
``mon health to clog``
|
||||
|
||||
:Description: Enable sending health summary to cluster log periodically.
|
||||
:Type: Boolean
|
||||
:Default: True
|
||||
|
||||
|
||||
``mon health to clog tick interval``
|
||||
|
||||
:Description: How often (in seconds) the monitor send health summary to cluster
|
||||
log (a non-positive number disables it). If current health summary
|
||||
is empty or identical to the last time, monitor will not send it
|
||||
to cluster log.
|
||||
:Type: Integer
|
||||
:Default: 3600
|
||||
|
||||
|
||||
``mon health to clog interval``
|
||||
|
||||
:Description: How often (in seconds) the monitor send health summary to cluster
|
||||
log (a non-positive number disables it). Monitor will always
|
||||
send the summary to cluster log no matter if the summary changes
|
||||
or not.
|
||||
:Type: Integer
|
||||
:Default: 60
|
||||
|
||||
|
||||
|
||||
.. index:: Ceph Storage Cluster; capacity planning, Ceph Monitor; capacity planning
|
||||
|
||||
Storage Capacity
|
||||
@ -546,7 +671,9 @@ Trimming requires that the placement groups are ``active + clean``.
|
||||
|
||||
``mon sync timeout``
|
||||
|
||||
:Description:
|
||||
:Description: Number of seconds the monitor will wait for the next update
|
||||
message from its sync provider before it gives up and bootstrap
|
||||
again.
|
||||
:Type: Double
|
||||
:Default: ``30.0``
|
||||
|
||||
@ -560,39 +687,123 @@ Trimming requires that the placement groups are ``active + clean``.
|
||||
|
||||
``mon sync max payload size``
|
||||
|
||||
:Description: The maximum size for a sync payload.
|
||||
:Description: The maximum size for a sync payload (in bytes).
|
||||
:Type: 32-bit Integer
|
||||
:Default: ``1045676``
|
||||
|
||||
|
||||
``mon accept timeout``
|
||||
``paxos max join drift``
|
||||
|
||||
:Description: Number of seconds the Leader will wait for the Requester(s) to
|
||||
accept a Paxos update. It is also used during the Paxos recovery
|
||||
phase for similar purposes.
|
||||
:Description: The maximum Paxos iterations before we must first sync the
|
||||
monitor data stores. When a monitor finds that its peer is too
|
||||
far ahead of it, it will first sync with data stores before moving
|
||||
on.
|
||||
:Type: Integer
|
||||
:Default: ``10``
|
||||
|
||||
:Type: Float
|
||||
:Default: ``10.0``
|
||||
``paxos stash full interval``
|
||||
|
||||
:Description: How often (in commits) to stash a full copy of the PaxosService state.
|
||||
Current this setting only affects ``mds``, ``mon``, ``auth`` and ``mgr``
|
||||
PaxosServices.
|
||||
:Type: Integer
|
||||
:Default: 25
|
||||
|
||||
``paxos propose interval``
|
||||
|
||||
:Description: Gather updates for this time interval before proposing
|
||||
a map update.
|
||||
|
||||
:Type: Double
|
||||
:Default: ``1.0``
|
||||
|
||||
|
||||
``paxos min``
|
||||
|
||||
:Description: The minimum number of paxos states to keep around
|
||||
:Type: Integer
|
||||
:Default: 500
|
||||
|
||||
|
||||
``paxos min wait``
|
||||
|
||||
:Description: The minimum amount of time to gather updates after a period of
|
||||
inactivity.
|
||||
|
||||
:Type: Double
|
||||
:Default: ``0.05``
|
||||
|
||||
|
||||
``paxos trim min``
|
||||
|
||||
:Description: Number of extra proposals tolerated before trimming
|
||||
:Type: Integer
|
||||
:Default: 250
|
||||
|
||||
|
||||
``paxos trim max``
|
||||
|
||||
:Description: The maximum number of extra proposals to trim at a time
|
||||
:Type: Integer
|
||||
:Default: 500
|
||||
|
||||
|
||||
``paxos service trim min``
|
||||
|
||||
:Description: The minimum amount of versions to trigger a trim (0 disables it)
|
||||
:Type: Integer
|
||||
:Default: 250
|
||||
|
||||
|
||||
``paxos service trim max``
|
||||
|
||||
:Description: The maximum amount of versions to trim during a single proposal (0 disables it)
|
||||
:Type: Integer
|
||||
:Default: 500
|
||||
|
||||
|
||||
``mon max log epochs``
|
||||
|
||||
:Description: The maximum amount of log epochs to trim during a single proposal
|
||||
:Type: Integer
|
||||
:Default: 500
|
||||
|
||||
|
||||
``mon max pgmap epochs``
|
||||
|
||||
:Description: The maximum amount of pgmap epochs to trim during a single proposal
|
||||
:Type: Integer
|
||||
:Default: 500
|
||||
|
||||
|
||||
``mon mds force trim to``
|
||||
|
||||
:Description: Force monitor to trim mdsmaps to this point (0 disables it.
|
||||
dangerous, use with care)
|
||||
:Type: Integer
|
||||
:Default: 0
|
||||
|
||||
|
||||
``mon osd force trim to``
|
||||
|
||||
:Description: Force monitor to trim osdmaps to this point, even if there is
|
||||
PGs not clean at the specified epoch (0 disables it. dangerous,
|
||||
use with care)
|
||||
:Type: Integer
|
||||
:Default: 0
|
||||
|
||||
``mon osd cache size``
|
||||
|
||||
:Description: The size of osdmaps cache, not to rely on underlying store's cache
|
||||
:Type: Integer
|
||||
:Default: 10
|
||||
|
||||
|
||||
``mon election timeout``
|
||||
|
||||
:Description: On election proposer, maximum waiting time for all ACKs in seconds.
|
||||
:Type: Float
|
||||
:Default: ``5``
|
||||
|
||||
|
||||
``mon lease``
|
||||
|
||||
:Description: The length (in seconds) of the lease on the monitor's versions.
|
||||
@ -600,22 +811,30 @@ Trimming requires that the placement groups are ``active + clean``.
|
||||
:Default: ``5``
|
||||
|
||||
|
||||
``mon lease renew interval``
|
||||
|
||||
:Description: The interval (in seconds) for the Leader to renew the other
|
||||
monitor's leases.
|
||||
``mon lease renew interval factor``
|
||||
|
||||
:Description: ``mon lease`` \* ``mon lease renew interval factor`` will be the
|
||||
interval for the Leader to renew the other monitor's leases. The
|
||||
factor should be less than ``1.0``.
|
||||
:Type: Float
|
||||
:Default: ``3``
|
||||
:Default: ``0.6``
|
||||
|
||||
|
||||
``mon lease ack timeout``
|
||||
|
||||
:Description: The number of seconds the Leader will wait for the Providers to
|
||||
acknowledge the lease extension.
|
||||
``mon lease ack timeout factor``
|
||||
|
||||
:Description: The Leader will wait ``mon lease`` \* ``mon lease ack timeout factor``
|
||||
for the Providers to acknowledge the lease extension.
|
||||
:Type: Float
|
||||
:Default: ``10.0``
|
||||
:Default: ``2.0``
|
||||
|
||||
|
||||
``mon accept timeout factor``
|
||||
|
||||
:Description: The Leader will wait ``mon lease`` \* ``mon accept timeout factor``
|
||||
for the Requester(s) to accept a Paxos update. It is also used
|
||||
during the Paxos recovery phase for similar purposes.
|
||||
:Type: Float
|
||||
:Default: ``2.0``
|
||||
|
||||
|
||||
``mon min osdmap epochs``
|
||||
@ -640,42 +859,6 @@ Trimming requires that the placement groups are ``active + clean``.
|
||||
|
||||
|
||||
|
||||
|
||||
Slurp
|
||||
-----
|
||||
|
||||
In Ceph version 0.58 and earlier, when a Paxos service drifts beyond a given
|
||||
number of versions, Ceph triggers the `slurp` mechanism, which establishes a
|
||||
connection with the quorum Leader and obtains every single version the Leader
|
||||
has for every service that has drifted. In Ceph versions 0.59 and later, slurp
|
||||
will not work, because there is a single Paxos instance for all services.
|
||||
|
||||
.. deprecated:: 0.58
|
||||
|
||||
``paxos max join drift``
|
||||
|
||||
:Description: The maximum Paxos iterations before we must first sync the
|
||||
monitor data stores.
|
||||
:Type: Integer
|
||||
:Default: ``10``
|
||||
|
||||
|
||||
``mon slurp timeout``
|
||||
|
||||
:Description: The number of seconds the monitor has to recover using slurp
|
||||
before the process is aborted and the monitor bootstraps.
|
||||
|
||||
:Type: Double
|
||||
:Default: ``10.0``
|
||||
|
||||
|
||||
``mon slurp bytes``
|
||||
|
||||
:Description: Limits the slurp messages to the specified number of bytes.
|
||||
:Type: 32-bit Integer
|
||||
:Default: ``256 * 1024``
|
||||
|
||||
|
||||
.. index:: Ceph Monitor; clock
|
||||
|
||||
Clock
|
||||
@ -688,7 +871,7 @@ are not synchronized, it can lead to a number of anomalies. For example:
|
||||
- Daemons ignoring received messages (e.g., timestamps outdated)
|
||||
- Timeouts triggered too soon/late when a message wasn't received in time.
|
||||
|
||||
See `Monitor Store Synchronization`_ and `Slurp`_ for details.
|
||||
See `Monitor Store Synchronization`_ for details.
|
||||
|
||||
|
||||
.. tip:: You SHOULD install NTP on your Ceph monitor hosts to
|
||||
@ -739,12 +922,19 @@ acceptable values.
|
||||
``mon timecheck interval``
|
||||
|
||||
:Description: The time check interval (clock drift check) in seconds
|
||||
for the leader.
|
||||
for the Leader.
|
||||
|
||||
:Type: Float
|
||||
:Default: ``300.0``
|
||||
|
||||
|
||||
``mon timecheck skew interval``
|
||||
|
||||
:Description: The time check interval (clock drift check) in seconds when in
|
||||
presence of a skew in seconds for the Leader.
|
||||
:Type: Float
|
||||
:Default: ``30.0``
|
||||
|
||||
|
||||
Client
|
||||
------
|
||||
@ -835,12 +1025,6 @@ Miscellaneous
|
||||
:Type: 32-bit Integer
|
||||
:Default: ``100``
|
||||
|
||||
``mon sync fs threshold``
|
||||
|
||||
:Description: Synchronize with the filesystem when writing the specified number of objects. Set it to ``0`` to disable it.
|
||||
:Type: 32-bit Integer
|
||||
:Default: ``5``
|
||||
|
||||
``mon subscribe interval``
|
||||
|
||||
:Description: The refresh interval (in seconds) for subscriptions. The
|
||||
@ -897,6 +1081,129 @@ Miscellaneous
|
||||
:Default: ``0.5``
|
||||
|
||||
|
||||
``mon osd prime pg temp max time estimate``
|
||||
|
||||
:Description: Maximum estimate of time spent on each PG before we prime all PGs
|
||||
in parallel.
|
||||
:Type: Float
|
||||
:Default: ``0.25``
|
||||
|
||||
|
||||
``mon osd allow primary affinity``
|
||||
|
||||
:Description: allow ``primary_affinity`` to be set in the osdmap.
|
||||
:Type: Boolean
|
||||
:Default: False
|
||||
|
||||
|
||||
``mon osd pool ec fast read``
|
||||
|
||||
:Description: Whether turn on fast read on the pool or not. It will be used as
|
||||
the default setting of newly created erasure pools if ``fast_read``
|
||||
is not specified at create time.
|
||||
:Type: Boolean
|
||||
:Default: False
|
||||
|
||||
|
||||
``mon mds skip sanity``
|
||||
|
||||
:Description: Skip safety assertions on FSMap (in case of bugs where we want to
|
||||
continue anyway). Monitor terminates if the FSMap sanity check
|
||||
fails, but we can disable it by enabling this option.
|
||||
:Type: Boolean
|
||||
:Default: False
|
||||
|
||||
|
||||
``mon max mdsmap epochs``
|
||||
|
||||
:Description: The maximum amount of mdsmap epochs to trim during a single proposal.
|
||||
:Type: Integer
|
||||
:Default: 500
|
||||
|
||||
|
||||
``mon config key max entry size``
|
||||
|
||||
:Description: The maximum size of config-key entry (in bytes)
|
||||
:Type: Integer
|
||||
:Default: 4096
|
||||
|
||||
|
||||
``mon scrub interval``
|
||||
|
||||
:Description: How often (in seconds) the monitor scrub its store by comparing
|
||||
the stored checksums with the computed ones of all the stored
|
||||
keys.
|
||||
:Type: Integer
|
||||
:Default: 3600*24
|
||||
|
||||
|
||||
``mon scrub max keys``
|
||||
|
||||
:Description: The maximum number of keys to scrub each time.
|
||||
:Type: Integer
|
||||
:Default: 100
|
||||
|
||||
|
||||
``mon compact on start``
|
||||
|
||||
:Description: Compact the database used as Ceph Monitor store on
|
||||
``ceph-mon`` start. A manual compaction helps to shrink the
|
||||
monitor database and improve the performance of it if the regular
|
||||
compaction fails to work.
|
||||
:Type: Boolean
|
||||
:Default: False
|
||||
|
||||
|
||||
``mon compact on bootstrap``
|
||||
|
||||
:Description: Compact the database used as Ceph Monitor store on
|
||||
on bootstrap. Monitor starts probing each other for creating
|
||||
a quorum after bootstrap. If it times out before joining the
|
||||
quorum, it will start over and bootstrap itself again.
|
||||
:Type: Boolean
|
||||
:Default: False
|
||||
|
||||
|
||||
``mon compact on trim``
|
||||
|
||||
:Description: Compact a certain prefix (including paxos) when we trim its old states.
|
||||
:Type: Boolean
|
||||
:Default: True
|
||||
|
||||
|
||||
``mon cpu threads``
|
||||
|
||||
:Description: Number of threads for performing CPU intensive work on monitor.
|
||||
:Type: Boolean
|
||||
:Default: True
|
||||
|
||||
|
||||
``mon osd mapping pgs per chunk``
|
||||
|
||||
:Description: We calculate the mapping from placement group to OSDs in chunks.
|
||||
This option specifies the number of placement groups per chunk.
|
||||
:Type: Integer
|
||||
:Default: 4096
|
||||
|
||||
|
||||
``mon osd max split count``
|
||||
|
||||
:Description: Largest number of PGs per "involved" OSD to let split create.
|
||||
When we increase the ``pg_num`` of a pool, the placement groups
|
||||
will be splitted on all OSDs serving that pool. We want to avoid
|
||||
extreme multipliers on PG splits.
|
||||
:Type: Integer
|
||||
:Default: 300
|
||||
|
||||
|
||||
``mon session timeout``
|
||||
|
||||
:Description: Monitor will terminate inactive sessions stay idle over this
|
||||
time limit.
|
||||
:Type: Integer
|
||||
:Default: 300
|
||||
|
||||
|
||||
|
||||
.. _Paxos: http://en.wikipedia.org/wiki/Paxos_(computer_science)
|
||||
.. _Monitor Keyrings: ../../../dev/mon-bootstrap#secret-keys
|
||||
|
@ -12,6 +12,13 @@ This allows for less configuration on clients and monitors. Using a DNS update c
|
||||
|
||||
By default clients and daemons will look for the TCP service called *ceph-mon* which is configured by the *mon_dns_srv_name* configuration directive.
|
||||
|
||||
|
||||
``mon dns srv name``
|
||||
|
||||
:Description: the service name used querying the DNS for the monitor hosts/addresses
|
||||
:Type: String
|
||||
:Default: ``ceph-mon``
|
||||
|
||||
Example
|
||||
-------
|
||||
When the DNS search domain is set to *example.com* a DNS zone file might contain the following elements.
|
||||
|
@ -73,32 +73,39 @@ or by setting the value at runtime.
|
||||
OSDs Report Down OSDs
|
||||
=====================
|
||||
|
||||
By default, a Ceph OSD Daemon must report to the Ceph Monitors that another Ceph
|
||||
OSD Daemon is ``down`` three times before the Ceph Monitors acknowledge that the
|
||||
reported Ceph OSD Daemon is ``down``. By default, only one
|
||||
Ceph OSD Daemon is required to report another Ceph OSD Daemon ``down``. You can
|
||||
change the number of Ceph OSD Daemones required to report a Ceph OSD Daemon
|
||||
``down`` to a Ceph Monitor by adding an ``mon osd min down reporters`` setting
|
||||
(``osd min down reporters`` prior to v0.62) under the ``[mon]`` section of your
|
||||
Ceph configuration file, or by setting the value at runtime.
|
||||
By default, two Ceph OSD Daemons from different hosts must report to the Ceph
|
||||
Monitors that another Ceph OSD Daemon is ``down`` before the Ceph Monitors
|
||||
acknowledge that the reported Ceph OSD Daemon is ``down``. But there is chance
|
||||
that all the OSDs reporting the failure are hosted in a rack with a bad switch
|
||||
which has trouble connecting to another OSD. To avoid this sort of false alarm,
|
||||
we consider the peers reporting a failure a proxy for a potential "subcluster"
|
||||
over the overall cluster that is similarly laggy. This is clearly not true in
|
||||
all cases, but will sometimes help us localize the grace correction to a subset
|
||||
of the system that is unhappy. ``mon osd reporter subtree level`` is used to
|
||||
group the peers into the "subcluster" by their common ancestor type in CRUSH
|
||||
map. By default, only two reports from different subtree are required to report
|
||||
another Ceph OSD Daemon ``down``. You can change the number of reporters from
|
||||
unique subtrees and the common ancestor type required to report a Ceph OSD
|
||||
Daemon ``down`` to a Ceph Monitor by adding an ``mon osd min down reporters``
|
||||
and ``mon osd reporter subtree level`` settings under the ``[mon]`` section of
|
||||
your Ceph configuration file, or by setting the value at runtime.
|
||||
|
||||
|
||||
.. ditaa:: +---------+ +---------+
|
||||
| OSD 1 | | Monitor |
|
||||
+---------+ +---------+
|
||||
| |
|
||||
| OSD 2 Is Down |
|
||||
|-------------->|
|
||||
| |
|
||||
| OSD 2 Is Down |
|
||||
|-------------->|
|
||||
| |
|
||||
| OSD 2 Is Down |
|
||||
|-------------->|
|
||||
| |
|
||||
| |----------+ Mark
|
||||
| | | OSD 2
|
||||
| |<---------+ Down
|
||||
.. ditaa:: +---------+ +---------+ +---------+
|
||||
| OSD 1 | | OSD 2 | | Monitor |
|
||||
+---------+ +---------+ +---------+
|
||||
| | |
|
||||
| OSD 3 Is Down | |
|
||||
|---------------+--------------->|
|
||||
| | |
|
||||
| | |
|
||||
| | OSD 3 Is Down |
|
||||
| |--------------->|
|
||||
| | |
|
||||
| | |
|
||||
| | |---------+ Mark
|
||||
| | | | OSD 3
|
||||
| | |<--------+ Down
|
||||
|
||||
|
||||
.. index:: peering failure
|
||||
@ -237,6 +244,15 @@ Monitor Settings
|
||||
:Default: ``0.3``
|
||||
|
||||
|
||||
|
||||
``mon osd laggy max interval``
|
||||
:Description: Maximum value of ``laggy_interval`` in laggy estimations (in seconds).
|
||||
Monitor uses an adaptive approach to evaluate the ``laggy_interval`` of
|
||||
a certain OSD. This value will be used to calculate the grace time for
|
||||
that OSD.
|
||||
:Type: Integer
|
||||
:Default: 300
|
||||
|
||||
``mon osd adjust heartbeat grace``
|
||||
|
||||
:Description: If set to ``true``, Ceph will scale based on laggy estimations.
|
||||
@ -312,7 +328,16 @@ Monitor Settings
|
||||
``down`` Ceph OSD Daemon.
|
||||
|
||||
:Type: 32-bit Integer
|
||||
:Default: ``1``
|
||||
:Default: ``2``
|
||||
|
||||
|
||||
``mon osd reporter subtree level``
|
||||
|
||||
:Description: In which level of parent bucket the reporters are counted. The OSDs
|
||||
send failure reports to monitor if they find its peer is not responsive.
|
||||
And monitor mark the reported OSD out and then down after a grace period.
|
||||
:Type: String
|
||||
:Default: ``host``
|
||||
|
||||
|
||||
.. index:: OSD hearbeat
|
||||
@ -340,7 +365,6 @@ OSD Settings
|
||||
that the Ceph Storage Cluster considers it ``down``.
|
||||
This setting has to be set in both the [mon] and [osd] or [global]
|
||||
section so that it is read by both the MON and OSD daemons.
|
||||
|
||||
:Type: 32-bit Integer
|
||||
:Default: ``20``
|
||||
|
||||
@ -381,4 +405,3 @@ OSD Settings
|
||||
|
||||
:Type: 32-bit Integer
|
||||
:Default: ``30``
|
||||
|
||||
|
@ -420,7 +420,7 @@ recovery operations to ensure optimal performance during recovery.
|
||||
``osd client op priority``.
|
||||
|
||||
:Type: 32-bit Integer
|
||||
:Default: ``10``
|
||||
:Default: ``3``
|
||||
:Valid Range: 1-63
|
||||
|
||||
|
||||
@ -654,7 +654,7 @@ perform well in a degraded state.
|
||||
increased load on the cluster.
|
||||
|
||||
:Type: 32-bit Integer
|
||||
:Default: ``15``
|
||||
:Default: ``3``
|
||||
|
||||
|
||||
``osd recovery max chunk``
|
||||
@ -664,6 +664,14 @@ perform well in a degraded state.
|
||||
:Default: ``8 << 20``
|
||||
|
||||
|
||||
``osd recovery max single start``
|
||||
|
||||
:Description: The maximum number of recovery operations per OSD that will be
|
||||
newly started when an OSD is recovering.
|
||||
:Type: 64-bit Integer Unsigned
|
||||
:Default: ``1``
|
||||
|
||||
|
||||
``osd recovery thread timeout``
|
||||
|
||||
:Description: The maximum time in seconds before timing out a recovery thread.
|
||||
@ -679,6 +687,16 @@ perform well in a degraded state.
|
||||
:Type: Boolean
|
||||
:Default: ``true``
|
||||
|
||||
|
||||
``osd recovery sleep``
|
||||
|
||||
:Description: Time to sleep before next recovery. Increasing this value will
|
||||
slow down recovery operation while client operations will be
|
||||
less impacted.
|
||||
|
||||
:Type: Float
|
||||
:Default: ``0.01``
|
||||
|
||||
Tiering
|
||||
=======
|
||||
|
||||
|
@ -42,6 +42,86 @@ Ceph configuration file.
|
||||
:Type: 32-bit Integer
|
||||
:Default: ``300``
|
||||
|
||||
``mon pg min inactive``
|
||||
|
||||
:Description: Issue a ``HEALTH_ERR`` in cluster log if the number of PGs stay
|
||||
inactive longer than ``mon_pg_stuck_threshold`` exceeds this
|
||||
setting. A non-positive number means disabled, never go into ERR.
|
||||
:Type: Integer
|
||||
:Default: ``1``
|
||||
|
||||
|
||||
``mon pg warn min per osd``
|
||||
|
||||
:Description: Issue a ``HEALTH_WARN`` in cluster log if the average number
|
||||
of PGs per (in) OSD is under this number. (a non-positive number
|
||||
disables this)
|
||||
:Type: Integer
|
||||
:Default: ``30``
|
||||
|
||||
|
||||
``mon pg warn max per osd``
|
||||
|
||||
:Description: Issue a ``HEALTH_WARN`` in cluster log if the average number
|
||||
of PGs per (in) OSD is above this number. (a non-positive number
|
||||
disables this)
|
||||
:Type: Integer
|
||||
:Default: ``300``
|
||||
|
||||
|
||||
``mon pg warn min objects``
|
||||
|
||||
:Description: Do not warn if the total number of objects in cluster is below
|
||||
this number
|
||||
:Type: Integer
|
||||
:Default: ``1000``
|
||||
|
||||
|
||||
``mon pg warn min pool objects``
|
||||
|
||||
:Description: Do not warn on pools whose object number is below this number
|
||||
:Type: Integer
|
||||
:Default: ``1000``
|
||||
|
||||
|
||||
``mon pg check down all threshold``
|
||||
|
||||
:Description: Threshold of down OSDs percentage after which we check all PGs
|
||||
for stale ones.
|
||||
:Type: Float
|
||||
:Default: ``0.5``
|
||||
|
||||
|
||||
``mon pg warn max object skew``
|
||||
|
||||
:Description: Issue a ``HEALTH_WARN`` in cluster log if the average object number
|
||||
of a certain pool is greater than ``mon pg warn max object skew`` times
|
||||
the average object number of the whole pool. (a non-positive number
|
||||
disables this)
|
||||
:Type: Float
|
||||
:Default: ``10``
|
||||
|
||||
|
||||
``mon delta reset interval``
|
||||
|
||||
:Description: Seconds of inactivity before we reset the pg delta to 0. We keep
|
||||
track of the delta of the used space of each pool, so, for
|
||||
example, it would be easier for us to understand the progress of
|
||||
recovery or the performance of cache tier. But if there's no
|
||||
activity reported for a certain pool, we just reset the history of
|
||||
deltas of that pool.
|
||||
:Type: Integer
|
||||
:Default: ``10``
|
||||
|
||||
|
||||
``mon osd max op age``
|
||||
|
||||
:Description: Maximum op age before we get concerned (make it a power of 2).
|
||||
A ``HEALTH_WARN`` will be issued if a request has been blocked longer
|
||||
than this limit.
|
||||
:Type: Float
|
||||
:Default: ``32.0``
|
||||
|
||||
|
||||
``osd pg bits``
|
||||
|
||||
|
@ -1257,4 +1257,4 @@ Further, as noted above, be careful running old versions of the
|
||||
``ceph-osd`` daemon after reverting to legacy values as the feature
|
||||
bit is not perfectly enforced.
|
||||
|
||||
.. _CRUSH - Controlled, Scalable, Decentralized Placement of Replicated Data: http://ceph.com/papers/weil-crush-sc06.pdf
|
||||
.. _CRUSH - Controlled, Scalable, Decentralized Placement of Replicated Data: https://ceph.com/wp-content/uploads/2016/08/weil-crush-sc06.pdf
|
||||
|
@ -6,7 +6,7 @@ The *jerasure* plugin is the most generic and flexible plugin, it is
|
||||
also the default for Ceph erasure coded pools.
|
||||
|
||||
The *jerasure* plugin encapsulates the `Jerasure
|
||||
<https://bitbucket.org/jimplank/jerasure/>`_ library. It is
|
||||
<http://jerasure.org>`_ library. It is
|
||||
recommended to read the *jerasure* documentation to get a better
|
||||
understanding of the parameters.
|
||||
|
||||
|
@ -139,7 +139,7 @@ erasure coded pool as the ``--data-pool`` during image creation::
|
||||
rbd create --size 1G --data-pool ec_pool replicated_pool/image_name
|
||||
|
||||
For Cephfs, using an erasure coded pool means setting that pool in
|
||||
a `file layout<../../cephfs/file-layouts>`_.
|
||||
a `file layout <../../../cephfs/file-layouts>`_.
|
||||
|
||||
|
||||
Erasure coded pool and cache tiering
|
||||
|
@ -360,6 +360,7 @@ are often restricted to accessing a particular pool. ::
|
||||
pools in the cluster!
|
||||
|
||||
|
||||
.. _modify-user-capabilities:
|
||||
Modify User Capabilities
|
||||
------------------------
|
||||
|
||||
|
@ -139,4 +139,4 @@ For example::
|
||||
ceph tell osd.0 heap stop_profiler
|
||||
|
||||
.. _Logging and Debugging: ../log-and-debug
|
||||
.. _Google Heap Profiler: http://google-perftools.googlecode.com/svn/trunk/doc/heapprofile.html
|
||||
.. _Google Heap Profiler: http://goog-perftools.sourceforge.net/doc/heap_profiler.html
|
||||
|
@ -417,7 +417,57 @@ Possible solutions
|
||||
- Upgrade Ceph
|
||||
- Restart OSDs
|
||||
|
||||
Debugging Slow Requests
|
||||
-----------------------
|
||||
|
||||
If you run "ceph daemon osd.<id> dump_historic_ops" or "dump_ops_in_flight",
|
||||
you will see a set of operations and a list of events each operation went
|
||||
through. These are briefly described below.
|
||||
|
||||
Events from the Messenger layer:
|
||||
|
||||
- header_read: when the messenger first started reading the message off the wire
|
||||
- throttled: when the messenger tried to acquire memory throttle space to read
|
||||
the message into memory
|
||||
- all_read: when the messenger finished reading the message off the wire
|
||||
- dispatched: when the messenger gave the message to the OSD
|
||||
- Initiated: <This is identical to header_read. The existence of both is a
|
||||
historical oddity.
|
||||
|
||||
Events from the OSD as it prepares operations
|
||||
|
||||
- queued_for_pg: the op has been put into the queue for processing by its PG
|
||||
- reached_pg: the PG has started doing the op
|
||||
- waiting for *: the op is waiting for some other work to complete before it
|
||||
can proceed (a new OSDMap; for its object target to scrub; for the PG to
|
||||
finish peering; all as specified in the message)
|
||||
- started: the op has been accepted as something the OSD should actually do
|
||||
(reasons not to do it: failed security/permission checks; out-of-date local
|
||||
state; etc) and is now actually being performed
|
||||
- waiting for subops from: the op has been sent to replica OSDs
|
||||
|
||||
Events from the FileStore
|
||||
|
||||
- commit_queued_for_journal_write: the op has been given to the FileStore
|
||||
- write_thread_in_journal_buffer: the op is in the journal's buffer and waiting
|
||||
to be persisted (as the next disk write)
|
||||
- journaled_completion_queued: the op was journaled to disk and its callback
|
||||
queued for invocation
|
||||
|
||||
Events from the OSD after stuff has been given to local disk
|
||||
|
||||
- op_commit: the op has been committed (ie, written to journal) by the
|
||||
primary OSD
|
||||
- op_applied: The op has been write()'en to the backing FS (ie, applied in
|
||||
memory but not flushed out to disk) on the primary
|
||||
- sub_op_applied: op_applied, but for a replica's "subop"
|
||||
- sub_op_committed: op_commited, but for a replica's subop (only for EC pools)
|
||||
- sub_op_commit_rec/sub_op_apply_rec from <X>: the primary marks this when it
|
||||
hears about the above, but for a particular replica <X>
|
||||
- commit_sent: we sent a reply back to the client (or primary OSD, for sub ops)
|
||||
|
||||
Many of these events are seemingly redundant, but cross important boundaries in
|
||||
the internal code (such as passing data across locks into new threads).
|
||||
|
||||
Flapping OSDs
|
||||
=============
|
||||
|
@ -438,6 +438,20 @@ new user, and that quota is enabled. See ``rgw bucket default quota max objects
|
||||
``rgw bucket default quota max size``, ``rgw user default quota max objects``, and
|
||||
``rgw user default quota max size`` in `Ceph Object Gateway Config Reference`_
|
||||
|
||||
Quota Cache
|
||||
-----------
|
||||
|
||||
Quota statistics are cached on each RGW instance. If there are multiple
|
||||
instances, then the cache can keep quotas from being perfectly enforced, as
|
||||
each instance will have a different view of quotas. The options that control
|
||||
this are ``rgw bucket quota ttl``, ``rgw user quota bucket sync interval`` and
|
||||
``rgw user quota sync interval``. The higher these values are, the more
|
||||
efficient quota operations are, but the more out-of-sync multiple instances
|
||||
will be. The lower these values are, the closer to perfect enforcement
|
||||
multiple instances will achieve. If all three are 0, then quota caching is
|
||||
effectively disabled, and multiple instances will have perfect quota
|
||||
enforcement. See `Ceph Object Gateway Config Reference`_
|
||||
|
||||
Reading / Writing Global Quotas
|
||||
-------------------------------
|
||||
|
||||
|
@ -1818,11 +1818,6 @@ Special Error Responses
|
||||
:Description: User does not possess specified capability.
|
||||
:Code: 404 Not Found
|
||||
|
||||
Special Error Responses
|
||||
~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
None.
|
||||
|
||||
|
||||
Quotas
|
||||
======
|
||||
|
@ -67,7 +67,8 @@ In the response, ``d1e7ef3b-f841-4b7c-90b2-b7d90ca2d723`` is the key id that
|
||||
can be used in any `SSE-KMS`_ request.
|
||||
|
||||
This newly created key is not accessible by user ``rgwcrypt-user``. This
|
||||
privilege must be added with an ACL.
|
||||
privilege must be added with an ACL. See `How to Set/Replace ACL`_ for more
|
||||
details.
|
||||
|
||||
Example request (assuming that the Keystone id of ``rgwcrypt-user`` is
|
||||
``906aa90bd8a946c89cdff80d0869460f``)::
|
||||
|
133
ceph/doc/radosgw/bucketpolicy.rst
Normal file
133
ceph/doc/radosgw/bucketpolicy.rst
Normal file
@ -0,0 +1,133 @@
|
||||
===============
|
||||
Bucket Policies
|
||||
===============
|
||||
|
||||
.. versionadded:: Luminous
|
||||
|
||||
The Ceph Object Gateway supports a subset of the Amazon S3 policy
|
||||
language applied to buckets.
|
||||
|
||||
|
||||
Creation and Removal
|
||||
====================
|
||||
|
||||
Bucket policies are managed through standard S3 operations rather than
|
||||
radosgw-admin.
|
||||
|
||||
For example, one may use s3cmd to set or delete a policy thus::
|
||||
|
||||
$ cat > examplepol
|
||||
{
|
||||
"Version": "2012-10-17",
|
||||
"Statement": [{
|
||||
"Effect": "Allow",
|
||||
"Principal": {"AWS": ["arn:aws:iam::usfolks:user/fred"]},
|
||||
"Action": "s3PutObjectAcl",
|
||||
"Resource": [
|
||||
"arn:aws:s3:::happybucket/*"
|
||||
]
|
||||
}]
|
||||
}
|
||||
|
||||
$ s3cmd setpolicy examplepol s3://happybucket
|
||||
$ s3cmd delpolicy s3://happybucket
|
||||
|
||||
|
||||
Limitations
|
||||
===========
|
||||
|
||||
Currently, we support only the following actions:
|
||||
|
||||
- s3:AbortMultipartUpload
|
||||
- s3:CreateBucket
|
||||
- s3:DeleteBucketPolicy
|
||||
- s3:DeleteBucket
|
||||
- s3:DeleteBucketWebsite
|
||||
- s3:DeleteObject
|
||||
- s3:DeleteObjectVersion
|
||||
- s3:DeleteReplicationConfiguration
|
||||
- s3:GetAccelerateConfiguration
|
||||
- s3:GetBucketAcl
|
||||
- s3:GetBucketCORS
|
||||
- s3:GetBucketLocation
|
||||
- s3:GetBucketLogging
|
||||
- s3:GetBucketNotification
|
||||
- s3:GetBucketPolicy
|
||||
- s3:GetBucketRequestPayment
|
||||
- s3:GetBucketTagging
|
||||
- s3:GetBucketVersioning
|
||||
- s3:GetBucketWebsite
|
||||
- s3:GetLifecycleConfiguration
|
||||
- s3:GetObjectAcl
|
||||
- s3:GetObject
|
||||
- s3:GetObjectTorrent
|
||||
- s3:GetObjectVersionAcl
|
||||
- s3:GetObjectVersion
|
||||
- s3:GetObjectVersionTorrent
|
||||
- s3:GetReplicationConfiguration
|
||||
- s3:ListAllMyBuckets
|
||||
- s3:ListBucketMultiPartUploads
|
||||
- s3:ListBucket
|
||||
- s3:ListBucketVersions
|
||||
- s3:ListMultipartUploadParts
|
||||
- s3:PutAccelerateConfiguration
|
||||
- s3:PutBucketAcl
|
||||
- s3:PutBucketCORS
|
||||
- s3:PutBucketLogging
|
||||
- s3:PutBucketNotification
|
||||
- s3:PutBucketPolicy
|
||||
- s3:PutBucketRequestPayment
|
||||
- s3:PutBucketTagging
|
||||
- s3:PutBucketVersioning
|
||||
- s3:PutBucketWebsite
|
||||
- s3:PutLifecycleConfiguration
|
||||
- s3:PutObjectAcl
|
||||
- s3:PutObject
|
||||
- s3:PutObjectVersionAcl
|
||||
- s3:PutReplicationConfiguration
|
||||
- s3:RestoreObject
|
||||
|
||||
We do not yet support setting policies on users, groups, or roles.
|
||||
|
||||
We use the RGW ‘tenant’ identifier in place of the Amazon twelve-digit
|
||||
account ID. In the future we may allow you to assign an account ID to
|
||||
a tenant, but for now if you want to use policies between AWS S3 and
|
||||
RGW S3 you will have to use the Amazon account ID as the tenant ID when
|
||||
creating users.
|
||||
|
||||
Under AWS, all tenants share a single namespace. RGW gives every
|
||||
tenant its own namespace of buckets. There may be an option to enable
|
||||
an AWS-like 'flat' bucket namespace in future versions. At present, to
|
||||
access a bucket belonging to another tenant, address it as
|
||||
"tenant:bucket" in the S3 request.
|
||||
|
||||
In AWS, a bucket policy can grant access to another account, and that
|
||||
account owner can then grant access to individual users with user
|
||||
permissions. Since we do not yet support user, role, and group
|
||||
permissions, account owners will currently need to grant access
|
||||
directly to individual users, and granting an entire account access to
|
||||
a bucket grants access to all users in that account.
|
||||
|
||||
Bucket policies do not yet support string interpolation.
|
||||
|
||||
Currently, the only condition keys we support are:
|
||||
- aws:CurrentTime
|
||||
- aws:EpochTime
|
||||
- aws:PrincipalType
|
||||
- aws:Referer
|
||||
- aws:SecureTransport
|
||||
- aws:SourceIp
|
||||
- aws:UserAgent
|
||||
- aws:username
|
||||
|
||||
More may be supported soon as we integrate with the recently rewritten
|
||||
Authentication/Authorization subsystem.
|
||||
|
||||
Swift
|
||||
=====
|
||||
|
||||
There is no way to set bucket policies under Swift, but bucket
|
||||
policies that have been set govern Swift as well as S3 operations.
|
||||
|
||||
Swift credentials are matched against Principals specified in a policy
|
||||
in a way specific to whatever backend is being used.
|
@ -1,595 +0,0 @@
|
||||
=====================================================
|
||||
Configuring Ceph Object Gateway with Apache/FastCGI
|
||||
=====================================================
|
||||
|
||||
Configuring a Ceph Object Gateway requires a running Ceph Storage Cluster.
|
||||
Since it contains an embedded webserver (civetweb), the Ceph Object Gateway
|
||||
does not require an external web server, but it can be configured to use
|
||||
Apache with the FastCGI module.
|
||||
|
||||
.. note:: CGI can pose a security risk.
|
||||
|
||||
The Ceph Object Gateway is a client of the Ceph Storage Cluster. As a
|
||||
Ceph Storage Cluster client, it requires:
|
||||
|
||||
- A name for the gateway instance. We use ``gateway`` in this guide.
|
||||
- A storage cluster user name with appropriate permissions in a keyring.
|
||||
- Pools to store its data.
|
||||
- A data directory for the gateway instance.
|
||||
- An instance entry in the Ceph Configuration file.
|
||||
- A configuration file for the web server to interact with FastCGI.
|
||||
|
||||
|
||||
Create a User and Keyring
|
||||
=========================
|
||||
|
||||
Each instance must have a user name and key to communicate with a Ceph Storage
|
||||
Cluster. In the following steps, we use an admin node to create a keyring.
|
||||
Then, we create a client user name and key. Next, we add the
|
||||
key to the Ceph Storage Cluster. Finally, we distribute the key ring to
|
||||
the node containing the gateway instance.
|
||||
|
||||
.. topic:: Monitor Key CAPS
|
||||
|
||||
When you provide CAPS to the key, you MUST provide read capability.
|
||||
However, you have the option of providing write capability for the monitor.
|
||||
This is an important choice. If you provide write capability to the key,
|
||||
the Ceph Object Gateway will have the ability to create pools automatically;
|
||||
however, it will create pools with either the default number of placement
|
||||
groups (not ideal) or the number of placement groups you specified in your
|
||||
Ceph configuration file. If you allow the Ceph Object Gateway to create
|
||||
pools automatically, ensure that you have reasonable defaults for the number
|
||||
of placement groups first. See `Pool Configuration`_ for details.
|
||||
|
||||
|
||||
See `User Management`_ for additional details on Ceph authentication.
|
||||
|
||||
#. Generate a Ceph Object Gateway user name and key for each instance. For
|
||||
exemplary purposes, we will use the name ``gateway`` after ``client.radosgw``::
|
||||
|
||||
sudo ceph auth get-or-create client.radosgw.gateway osd 'allow rwx' mon 'allow rwx' -o /etc/ceph/ceph.client.radosgw.keyring
|
||||
|
||||
#. Distribute the keyring to the node with the gateway instance. ::
|
||||
|
||||
sudo scp /etc/ceph/ceph.client.radosgw.keyring ceph@{hostname}:/home/ceph
|
||||
ssh {hostname}
|
||||
sudo mv ceph.client.radosgw.keyring /etc/ceph/ceph.client.radosgw.keyring
|
||||
|
||||
|
||||
.. note:: The 2nd step is optional if ``admin node`` is the ``gateway host``.
|
||||
|
||||
Create Pools
|
||||
============
|
||||
|
||||
Ceph Object Gateways require Ceph Storage Cluster pools to store specific
|
||||
gateway data. If the user you created has permissions, the gateway
|
||||
will create the pools automatically. However, you should ensure that you have
|
||||
set an appropriate default number of placement groups per pool into your Ceph
|
||||
configuration file.
|
||||
|
||||
.. note:: Ceph Object Gateways have multiple pools, so don't make the number of
|
||||
PGs too high considering all of the pools assigned to the same CRUSH
|
||||
hierarchy, or performance may suffer.
|
||||
|
||||
When configuring a gateway with the default region and zone, the naming
|
||||
convention for pools typically omits region and zone naming, but you can use any
|
||||
naming convention you prefer. For example:
|
||||
|
||||
|
||||
- ``.rgw.root``
|
||||
- ``.rgw.control``
|
||||
- ``.rgw.gc``
|
||||
- ``.rgw.buckets``
|
||||
- ``.rgw.buckets.index``
|
||||
- ``.rgw.buckets.extra``
|
||||
- ``.log``
|
||||
- ``.intent-log``
|
||||
- ``.usage``
|
||||
- ``.users``
|
||||
- ``.users.email``
|
||||
- ``.users.swift``
|
||||
- ``.users.uid``
|
||||
|
||||
|
||||
See `Configuration Reference - Pools`_ for details on the default pools for
|
||||
gateways. See `Pools`_ for details on creating pools. As already said, if
|
||||
write permission is given, Ceph Object Gateway will create pools automatically.
|
||||
To create a pool manually, execute the following::
|
||||
|
||||
ceph osd pool create {poolname} {pg-num} {pgp-num} {replicated | erasure} [{erasure-code-profile}] {ruleset-name} {ruleset-number}
|
||||
|
||||
|
||||
.. tip:: Ceph supports multiple CRUSH hierarchies and CRUSH rulesets, enabling
|
||||
great flexibility in the way you configure your gateway. Pools such as
|
||||
``rgw.buckets.index`` may benefit from a pool of SSDs for fast performance.
|
||||
Backing storage may benefit from the increased economy of erasure-coded
|
||||
storage, and/or the improved performance from cache tiering.
|
||||
|
||||
When you have completed this step, execute the following to ensure that
|
||||
you have created all of the foregoing pools::
|
||||
|
||||
rados lspools
|
||||
|
||||
|
||||
Add a Gateway Configuration to Ceph
|
||||
===================================
|
||||
|
||||
Add the Ceph Object Gateway configuration to your Ceph Configuration file in
|
||||
``admin node``. The Ceph Object Gateway configuration requires you to
|
||||
identify the Ceph Object Gateway instance. Then, you must specify the host name
|
||||
where you installed the Ceph Object Gateway daemon, a keyring (for use with
|
||||
cephx), the socket path for FastCGI and a log file.
|
||||
|
||||
For distros with Apache 2.2 and early versions of Apache 2.4 (RHEL 6, Ubuntu
|
||||
12.04, 14.04 etc), append the following configuration to ``/etc/ceph/ceph.conf``
|
||||
in your ``admin node``::
|
||||
|
||||
[client.radosgw.gateway]
|
||||
host = {hostname}
|
||||
keyring = /etc/ceph/ceph.client.radosgw.keyring
|
||||
rgw socket path = ""
|
||||
log file = /var/log/radosgw/client.radosgw.gateway.log
|
||||
rgw frontends = fastcgi socket_port=9000 socket_host=0.0.0.0
|
||||
rgw print continue = false
|
||||
|
||||
|
||||
.. note:: Apache 2.2 and early versions of Apache 2.4 do not use Unix Domain
|
||||
Sockets but use localhost TCP.
|
||||
|
||||
For distros with Apache 2.4.9 or later (RHEL 7, CentOS 7 etc), append the
|
||||
following configuration to ``/etc/ceph/ceph.conf`` in your ``admin node``::
|
||||
|
||||
[client.radosgw.gateway]
|
||||
host = {hostname}
|
||||
keyring = /etc/ceph/ceph.client.radosgw.keyring
|
||||
rgw socket path = /var/run/ceph/ceph.radosgw.gateway.fastcgi.sock
|
||||
log file = /var/log/radosgw/client.radosgw.gateway.log
|
||||
rgw print continue = false
|
||||
|
||||
|
||||
.. note:: ``Apache 2.4.9`` supports Unix Domain Socket (UDS) but as
|
||||
``Ubuntu 14.04`` ships with ``Apache 2.4.7`` it doesn't have UDS support and
|
||||
has to be configured for use with localhost TCP. A bug has been filed for
|
||||
backporting UDS support in ``Apache 2.4.7`` for ``Ubuntu 14.04``.
|
||||
See: `Backport support for UDS in Ubuntu Trusty`_
|
||||
|
||||
Here, ``{hostname}`` is the short hostname (output of command ``hostname -s``)
|
||||
of the node that is going to provide the gateway service i.e., the
|
||||
``gateway host``.
|
||||
|
||||
The ``[client.radosgw.gateway]`` portion of the gateway instance identifies this
|
||||
portion of the Ceph configuration file as configuring a Ceph Storage Cluster
|
||||
client where the client type is a Ceph Object Gateway (i.e., ``radosgw``).
|
||||
|
||||
|
||||
.. note:: The last line in the configuration i.e., ``rgw print continue = false``
|
||||
is added to avoid issues with ``PUT`` operations.
|
||||
|
||||
Once you finish the setup procedure, if you encounter issues with your
|
||||
configuration, you can add debugging to the ``[global]`` section of your Ceph
|
||||
configuration file and restart the gateway to help troubleshoot any
|
||||
configuration issues. For example::
|
||||
|
||||
[global]
|
||||
#append the following in the global section.
|
||||
debug ms = 1
|
||||
debug rgw = 20
|
||||
|
||||
|
||||
Distribute updated Ceph configuration file
|
||||
==========================================
|
||||
|
||||
The updated Ceph configuration file needs to be distributed to all Ceph cluster
|
||||
nodes from the ``admin node``.
|
||||
|
||||
It involves the following steps:
|
||||
|
||||
#. Pull the updated ``ceph.conf`` from ``/etc/ceph/`` to the root directory of
|
||||
the cluster in admin node (e.g. ``my-cluster`` directory). The contents of
|
||||
``ceph.conf`` in ``my-cluster`` will get overwritten. To do so, execute the
|
||||
following::
|
||||
|
||||
ceph-deploy --overwrite-conf config pull {hostname}
|
||||
|
||||
Here, ``{hostname}`` is the short hostname of the Ceph admin node.
|
||||
|
||||
#. Push the updated ``ceph.conf`` file from the admin node to all other nodes in
|
||||
the cluster including the ``gateway host``::
|
||||
|
||||
ceph-deploy --overwrite-conf config push [HOST] [HOST...]
|
||||
|
||||
Give the hostnames of the other Ceph nodes in place of ``[HOST] [HOST...]``.
|
||||
|
||||
|
||||
Copy ceph.client.admin.keyring from admin node to gateway host
|
||||
==============================================================
|
||||
|
||||
As the ``gateway host`` can be a different node that is not part of the cluster,
|
||||
the ``ceph.client.admin.keyring`` needs to be copied from the ``admin node`` to
|
||||
the ``gateway host``. To do so, execute the following on ``admin node``::
|
||||
|
||||
sudo scp /etc/ceph/ceph.client.admin.keyring ceph@{hostname}:/home/ceph
|
||||
ssh {hostname}
|
||||
sudo mv ceph.client.admin.keyring /etc/ceph/ceph.client.admin.keyring
|
||||
|
||||
|
||||
.. note:: The above step need not be executed if ``admin node`` is the
|
||||
``gateway host``.
|
||||
|
||||
|
||||
Create Data Directory
|
||||
=====================
|
||||
|
||||
Deployment scripts may not create the default Ceph Object Gateway data
|
||||
directory. Create data directories for each instance of a ``radosgw``
|
||||
daemon (if you haven't done so already). The ``host`` variables in the
|
||||
Ceph configuration file determine which host runs each instance of a
|
||||
``radosgw`` daemon. The typical form specifies the ``radosgw`` daemon,
|
||||
the cluster name and the daemon ID.
|
||||
|
||||
To create the directory on the ``gateway host``, execute the following::
|
||||
|
||||
sudo mkdir -p /var/lib/ceph/radosgw/ceph-radosgw.gateway
|
||||
|
||||
|
||||
Adjust Socket Directory Permissions
|
||||
===================================
|
||||
|
||||
On some distros, the ``radosgw`` daemon runs as the unprivileged ``apache``
|
||||
UID, and this UID must have write access to the location where it will write
|
||||
its socket file.
|
||||
|
||||
To grant permissions to the default socket location, execute the following on
|
||||
the ``gateway host``::
|
||||
|
||||
sudo chown apache:apache /var/run/ceph
|
||||
|
||||
|
||||
Change Log File Owner
|
||||
=====================
|
||||
|
||||
On some distros, the ``radosgw`` daemon runs as the unprivileged ``apache`` UID,
|
||||
but the ``root`` user owns the log file by default. You must change it to the
|
||||
``apache`` user so that Apache can populate the log file. To do so, execute
|
||||
the following::
|
||||
|
||||
sudo chown apache:apache /var/log/radosgw/client.radosgw.gateway.log
|
||||
|
||||
|
||||
Start radosgw service
|
||||
=====================
|
||||
|
||||
The Ceph Object gateway daemon needs to be started. To do so, execute the
|
||||
following on the ``gateway host``:
|
||||
|
||||
On Debian-based distros::
|
||||
|
||||
sudo /etc/init.d/radosgw start
|
||||
|
||||
On RPM-based distros::
|
||||
|
||||
sudo /etc/init.d/ceph-radosgw start
|
||||
|
||||
|
||||
Create a Gateway Configuration file
|
||||
===================================
|
||||
|
||||
On the host where you installed the Ceph Object Gateway i.e., ``gateway host``,
|
||||
create an ``rgw.conf`` file. Place the file in ``/etc/apache2/conf-available``
|
||||
directory for ``Debian-based`` distros and in ``/etc/httpd/conf.d`` directory
|
||||
for ``RPM-based`` distros. It is a Apache configuration file which is needed
|
||||
for the ``radosgw`` service. This file must be readable by the web server.
|
||||
|
||||
Execute the following steps:
|
||||
|
||||
#. Create the file:
|
||||
|
||||
For Debian-based distros, execute::
|
||||
|
||||
sudo vi /etc/apache2/conf-available/rgw.conf
|
||||
|
||||
For RPM-based distros, execute::
|
||||
|
||||
sudo vi /etc/httpd/conf.d/rgw.conf
|
||||
|
||||
#. For distros with Apache 2.2 and early versions of Apache 2.4 that use
|
||||
localhost TCP and do not support Unix Domain Socket, add the following
|
||||
contents to the file::
|
||||
|
||||
<VirtualHost *:80>
|
||||
ServerName localhost
|
||||
DocumentRoot /var/www/html
|
||||
|
||||
ErrorLog /var/log/httpd/rgw_error.log
|
||||
CustomLog /var/log/httpd/rgw_access.log combined
|
||||
|
||||
# LogLevel debug
|
||||
|
||||
RewriteEngine On
|
||||
|
||||
RewriteRule .* - [E=HTTP_AUTHORIZATION:%{HTTP:Authorization},L]
|
||||
|
||||
SetEnv proxy-nokeepalive 1
|
||||
|
||||
ProxyPass / fcgi://localhost:9000/
|
||||
|
||||
</VirtualHost>
|
||||
|
||||
.. note:: For Debian-based distros replace ``/var/log/httpd/``
|
||||
with ``/var/log/apache2``.
|
||||
|
||||
#. For distros with Apache 2.4.9 or later that support Unix Domain Socket,
|
||||
add the following contents to the file::
|
||||
|
||||
<VirtualHost *:80>
|
||||
ServerName localhost
|
||||
DocumentRoot /var/www/html
|
||||
|
||||
ErrorLog /var/log/httpd/rgw_error.log
|
||||
CustomLog /var/log/httpd/rgw_access.log combined
|
||||
|
||||
# LogLevel debug
|
||||
|
||||
RewriteEngine On
|
||||
|
||||
RewriteRule .* - [E=HTTP_AUTHORIZATION:%{HTTP:Authorization},L]
|
||||
|
||||
SetEnv proxy-nokeepalive 1
|
||||
|
||||
ProxyPass / unix:///var/run/ceph/ceph.radosgw.gateway.fastcgi.sock|fcgi://localhost:9000/
|
||||
|
||||
</VirtualHost>
|
||||
|
||||
|
||||
Restart Apache
|
||||
==============
|
||||
|
||||
The Apache service needs to be restarted to accept the new configuration.
|
||||
|
||||
For Debian-based distros, run::
|
||||
|
||||
sudo service apache2 restart
|
||||
|
||||
For RPM-based distros, run::
|
||||
|
||||
sudo service httpd restart
|
||||
|
||||
Or::
|
||||
|
||||
sudo systemctl restart httpd
|
||||
|
||||
|
||||
Using The Gateway
|
||||
=================
|
||||
|
||||
To use the REST interfaces, first create an initial Ceph Object Gateway
|
||||
user for the S3 interface. Then, create a subuser for the Swift interface.
|
||||
See the `Admin Guide`_ for more details on user management.
|
||||
|
||||
Create a radosgw user for S3 access
|
||||
------------------------------------
|
||||
|
||||
A ``radosgw`` user needs to be created and granted access. The command
|
||||
``man radosgw-admin`` will provide information on additional command options.
|
||||
|
||||
To create the user, execute the following on the ``gateway host``::
|
||||
|
||||
sudo radosgw-admin user create --uid="testuser" --display-name="First User"
|
||||
|
||||
The output of the command will be something like the following::
|
||||
|
||||
{"user_id": "testuser",
|
||||
"display_name": "First User",
|
||||
"email": "",
|
||||
"suspended": 0,
|
||||
"max_buckets": 1000,
|
||||
"auid": 0,
|
||||
"subusers": [],
|
||||
"keys": [
|
||||
{ "user": "testuser",
|
||||
"access_key": "I0PJDPCIYZ665MW88W9R",
|
||||
"secret_key": "dxaXZ8U90SXydYzyS5ivamEP20hkLSUViiaR+ZDA"}],
|
||||
"swift_keys": [],
|
||||
"caps": [],
|
||||
"op_mask": "read, write, delete",
|
||||
"default_placement": "",
|
||||
"placement_tags": [],
|
||||
"bucket_quota": { "enabled": false,
|
||||
"max_size_kb": -1,
|
||||
"max_objects": -1},
|
||||
"user_quota": { "enabled": false,
|
||||
"max_size_kb": -1,
|
||||
"max_objects": -1},
|
||||
"temp_url_keys": []}
|
||||
|
||||
|
||||
.. note:: The values of ``keys->access_key`` and ``keys->secret_key`` are
|
||||
needed for access validation.
|
||||
|
||||
Create a Swift user
|
||||
-------------------
|
||||
|
||||
A Swift subuser needs to be created if this kind of access is needed. Creating
|
||||
a Swift user is a two step process. The first step is to create the user.
|
||||
The second is to create the secret key.
|
||||
|
||||
Execute the following steps on the ``gateway host``:
|
||||
|
||||
Create the Swift user::
|
||||
|
||||
sudo radosgw-admin subuser create --uid=testuser --subuser=testuser:swift --access=full
|
||||
|
||||
The output will be something like the following::
|
||||
|
||||
{ "user_id": "testuser",
|
||||
"display_name": "First User",
|
||||
"email": "",
|
||||
"suspended": 0,
|
||||
"max_buckets": 1000,
|
||||
"auid": 0,
|
||||
"subusers": [
|
||||
{ "id": "testuser:swift",
|
||||
"permissions": "full-control"}],
|
||||
"keys": [
|
||||
{ "user": "testuser:swift",
|
||||
"access_key": "3Y1LNW4Q6X0Y53A52DET",
|
||||
"secret_key": ""},
|
||||
{ "user": "testuser",
|
||||
"access_key": "I0PJDPCIYZ665MW88W9R",
|
||||
"secret_key": "dxaXZ8U90SXydYzyS5ivamEP20hkLSUViiaR+ZDA"}],
|
||||
"swift_keys": [],
|
||||
"caps": [],
|
||||
"op_mask": "read, write, delete",
|
||||
"default_placement": "",
|
||||
"placement_tags": [],
|
||||
"bucket_quota": { "enabled": false,
|
||||
"max_size_kb": -1,
|
||||
"max_objects": -1},
|
||||
"user_quota": { "enabled": false,
|
||||
"max_size_kb": -1,
|
||||
"max_objects": -1},
|
||||
"temp_url_keys": []}
|
||||
|
||||
Create the secret key::
|
||||
|
||||
sudo radosgw-admin key create --subuser=testuser:swift --key-type=swift --gen-secret
|
||||
|
||||
The output will be something like the following::
|
||||
|
||||
{ "user_id": "testuser",
|
||||
"display_name": "First User",
|
||||
"email": "",
|
||||
"suspended": 0,
|
||||
"max_buckets": 1000,
|
||||
"auid": 0,
|
||||
"subusers": [
|
||||
{ "id": "testuser:swift",
|
||||
"permissions": "full-control"}],
|
||||
"keys": [
|
||||
{ "user": "testuser:swift",
|
||||
"access_key": "3Y1LNW4Q6X0Y53A52DET",
|
||||
"secret_key": ""},
|
||||
{ "user": "testuser",
|
||||
"access_key": "I0PJDPCIYZ665MW88W9R",
|
||||
"secret_key": "dxaXZ8U90SXydYzyS5ivamEP20hkLSUViiaR+ZDA"}],
|
||||
"swift_keys": [
|
||||
{ "user": "testuser:swift",
|
||||
"secret_key": "244+fz2gSqoHwR3lYtSbIyomyPHf3i7rgSJrF\/IA"}],
|
||||
"caps": [],
|
||||
"op_mask": "read, write, delete",
|
||||
"default_placement": "",
|
||||
"placement_tags": [],
|
||||
"bucket_quota": { "enabled": false,
|
||||
"max_size_kb": -1,
|
||||
"max_objects": -1},
|
||||
"user_quota": { "enabled": false,
|
||||
"max_size_kb": -1,
|
||||
"max_objects": -1},
|
||||
"temp_url_keys": []}
|
||||
|
||||
Access Verification
|
||||
===================
|
||||
|
||||
You then need to verify if the created users are able to access the gateway.
|
||||
|
||||
Test S3 access
|
||||
--------------
|
||||
|
||||
You need to write and run a Python test script for verifying S3 access. The S3
|
||||
access test script will connect to the ``radosgw``, create a new bucket and list
|
||||
all buckets. The values for ``aws_access_key_id`` and ``aws_secret_access_key``
|
||||
are taken from the values of ``access_key`` and ``secret_key`` returned by the
|
||||
``radosgw_admin`` command.
|
||||
|
||||
Execute the following steps:
|
||||
|
||||
#. You will need to install the ``python-boto`` package.
|
||||
|
||||
For Debian-based distros, run::
|
||||
|
||||
sudo apt-get install python-boto
|
||||
|
||||
For RPM-based distros, run::
|
||||
|
||||
sudo yum install python-boto
|
||||
|
||||
#. Create the Python script::
|
||||
|
||||
vi s3test.py
|
||||
|
||||
#. Add the following contents to the file::
|
||||
|
||||
import boto
|
||||
import boto.s3.connection
|
||||
access_key = 'I0PJDPCIYZ665MW88W9R'
|
||||
secret_key = 'dxaXZ8U90SXydYzyS5ivamEP20hkLSUViiaR+ZDA'
|
||||
conn = boto.connect_s3(
|
||||
aws_access_key_id = access_key,
|
||||
aws_secret_access_key = secret_key,
|
||||
host = '{hostname}',
|
||||
is_secure=False,
|
||||
calling_format = boto.s3.connection.OrdinaryCallingFormat(),
|
||||
)
|
||||
bucket = conn.create_bucket('my-new-bucket')
|
||||
for bucket in conn.get_all_buckets():
|
||||
print "{name}\t{created}".format(
|
||||
name = bucket.name,
|
||||
created = bucket.creation_date,
|
||||
)
|
||||
|
||||
Replace ``{hostname}`` with the hostname of the host where you have
|
||||
configured the gateway service i.e., the ``gateway host``.
|
||||
|
||||
#. Run the script::
|
||||
|
||||
python s3test.py
|
||||
|
||||
The output will be something like the following::
|
||||
|
||||
my-new-bucket 2015-02-16T17:09:10.000Z
|
||||
|
||||
Test swift access
|
||||
-----------------
|
||||
|
||||
Swift access can be verified via the ``swift`` command line client. The command
|
||||
``man swift`` will provide more information on available command line options.
|
||||
|
||||
To install ``swift`` client, execute the following:
|
||||
|
||||
For Debian-based distros::
|
||||
|
||||
sudo apt-get install python-setuptools
|
||||
sudo easy_install pip
|
||||
sudo pip install --upgrade setuptools
|
||||
sudo pip install --upgrade python-swiftclient
|
||||
|
||||
For RPM-based distros::
|
||||
|
||||
sudo yum install python-setuptools
|
||||
sudo easy_install pip
|
||||
sudo pip install --upgrade setuptools
|
||||
sudo pip install --upgrade python-swiftclient
|
||||
|
||||
To test swift access, execute the following::
|
||||
|
||||
swift -A http://{IP ADDRESS}/auth/1.0 -U testuser:swift -K ‘{swift_secret_key}’ list
|
||||
|
||||
Replace ``{IP ADDRESS}`` with the public IP address of the gateway server and
|
||||
``{swift_secret_key}`` with its value from the output of
|
||||
``radosgw-admin key create`` command executed for the ``swift`` user.
|
||||
|
||||
For example::
|
||||
|
||||
swift -A http://10.19.143.116/auth/1.0 -U testuser:swift -K ‘244+fz2gSqoHwR3lYtSbIyomyPHf3i7rgSJrF/IA’ list
|
||||
|
||||
The output should be::
|
||||
|
||||
my-new-bucket
|
||||
|
||||
|
||||
.. _Configuration Reference - Pools: ../config-ref#pools
|
||||
.. _Pool Configuration: ../../rados/configuration/pool-pg-config-ref/
|
||||
.. _Pools: ../../rados/operations/pools
|
||||
.. _User Management: ../../rados/operations/user-management
|
||||
.. _Backport support for UDS in Ubuntu Trusty: https://bugs.launchpad.net/ubuntu/+source/apache2/+bug/1411030
|
||||
.. _Admin Guide: ../admin
|
@ -7,6 +7,11 @@ The following settings may added to the Ceph configuration file (i.e., usually
|
||||
settings may contain default values. If you do not specify each setting in the
|
||||
Ceph configuration file, the default value will be set automatically.
|
||||
|
||||
Configuration variables set under the ``[client.radosgw.{instance-name}]``
|
||||
section will not apply to rgw or radosgw-admin commands without an instance-name
|
||||
specified in the command. Thus variables meant to be applied to all RGW
|
||||
instances or all radosgw-admin commands can be put into the ``[global]`` or the
|
||||
``[client]`` section to avoid specifying instance-name.
|
||||
|
||||
``rgw data``
|
||||
|
||||
@ -294,29 +299,13 @@ Ceph configuration file, the default value will be set automatically.
|
||||
a value of zero indicates there is no sharding. It is not
|
||||
recommended to set a value too large (e.g. thousand) as it
|
||||
increases the cost for bucket listing.
|
||||
This variable should be set in the client or global sections
|
||||
so that it is automatically applied to radosgw-admin commands.
|
||||
|
||||
:Type: Integer
|
||||
:Default: ``0``
|
||||
|
||||
|
||||
``rgw num zone opstate shards``
|
||||
|
||||
:Description: The maximum number of shards for keeping inter-region copy
|
||||
progress information.
|
||||
|
||||
:Type: Integer
|
||||
:Default: ``128``
|
||||
|
||||
|
||||
``rgw opstate ratelimit sec``
|
||||
|
||||
:Description: The minimum time between opstate updates on a single upload.
|
||||
``0`` disables the ratelimit.
|
||||
|
||||
:Type: Integer
|
||||
:Default: ``30``
|
||||
|
||||
|
||||
``rgw curl wait timeout ms``
|
||||
|
||||
:Description: The timeout in milliseconds for certain ``curl`` calls.
|
||||
@ -352,10 +341,41 @@ Ceph configuration file, the default value will be set automatically.
|
||||
:Default: ``false``
|
||||
|
||||
|
||||
``rgw bucket quota ttl``
|
||||
|
||||
:Description: The amount of time in seconds cached quota information is
|
||||
trusted. After this timeout, the quota information will be
|
||||
re-fetched from the cluster.
|
||||
:Type: Integer
|
||||
:Default: ``600``
|
||||
|
||||
|
||||
``rgw user quota bucket sync interval``
|
||||
|
||||
:Description: The amount of time in seconds bucket quota information is
|
||||
accumulated before syncing to the cluster. During this time,
|
||||
other RGW instances will not see the changes in bucket quota
|
||||
stats from operations on this instance.
|
||||
:Type: Integer
|
||||
:Default: ``180``
|
||||
|
||||
|
||||
``rgw user quota sync interval``
|
||||
|
||||
:Description: The amount of time in seconds user quota information is
|
||||
accumulated before syncing to the cluster. During this time,
|
||||
other RGW instances will not see the changes in user quota stats
|
||||
from operations on this instance.
|
||||
:Type: Integer
|
||||
:Default: ``180``
|
||||
|
||||
|
||||
``rgw bucket default quota max objects``
|
||||
|
||||
:Description: Default max number of objects per bucket. Set on new users,
|
||||
if no other quota is specified. Has no effect on existing users.
|
||||
This variable should be set in the client or global sections
|
||||
so that it is automatically applied to radosgw-admin commands.
|
||||
:Type: Integer
|
||||
:Default: ``-1``
|
||||
|
||||
@ -385,389 +405,96 @@ Ceph configuration file, the default value will be set automatically.
|
||||
:Default: ``-1``
|
||||
|
||||
|
||||
Regions
|
||||
=======
|
||||
``rgw verify ssl``
|
||||
|
||||
In Ceph v0.67 and beyond, Ceph Object Gateway supports federated deployments and
|
||||
a global namespace via the notion of regions. A region defines the geographic
|
||||
location of one or more Ceph Object Gateway instances within one or more zones.
|
||||
:Description: Verify SSL certificates while making requests.
|
||||
:Type: Boolean
|
||||
:Default: ``true``
|
||||
|
||||
|
||||
Configuring regions differs from typical configuration procedures, because not
|
||||
all of the settings end up in a Ceph configuration file. In Ceph v0.67 and
|
||||
beyond, you can list regions, get a region configuration and set a region
|
||||
configuration.
|
||||
Multisite Settings
|
||||
==================
|
||||
|
||||
|
||||
List Regions
|
||||
------------
|
||||
|
||||
A Ceph cluster contains a list of regions. To list the regions, execute::
|
||||
|
||||
sudo radosgw-admin region list
|
||||
|
||||
The ``radosgw-admin`` returns a JSON formatted list of regions.
|
||||
|
||||
.. code-block:: javascript
|
||||
|
||||
{ "default_info": { "default_region": "default"},
|
||||
"regions": [
|
||||
"default"]}
|
||||
|
||||
|
||||
Get a Region Map
|
||||
----------------
|
||||
|
||||
To list the details of each region, execute::
|
||||
|
||||
sudo radosgw-admin region-map get
|
||||
|
||||
|
||||
.. note:: If you receive a ``failed to read region map`` error, run
|
||||
``sudo radosgw-admin region-map update`` first.
|
||||
|
||||
|
||||
Get a Region
|
||||
------------
|
||||
|
||||
To view the configuration of a region, execute::
|
||||
|
||||
radosgw-admin region get [--rgw-region=<region>]
|
||||
|
||||
The ``default`` region looks like this:
|
||||
|
||||
.. code-block:: javascript
|
||||
|
||||
{"name": "default",
|
||||
"api_name": "",
|
||||
"is_master": "true",
|
||||
"endpoints": [],
|
||||
"hostnames": [],
|
||||
"master_zone": "",
|
||||
"zones": [
|
||||
{"name": "default",
|
||||
"endpoints": [],
|
||||
"log_meta": "false",
|
||||
"log_data": "false"}
|
||||
],
|
||||
"placement_targets": [
|
||||
{"name": "default-placement",
|
||||
"tags": [] }],
|
||||
"default_placement": "default-placement"}
|
||||
|
||||
Set a Region
|
||||
------------
|
||||
|
||||
Defining a region consists of creating a JSON object, specifying at least the
|
||||
required settings:
|
||||
|
||||
#. ``name``: The name of the region. Required.
|
||||
|
||||
#. ``api_name``: The API name for the region. Optional.
|
||||
|
||||
#. ``is_master``: Determines if the region is the master region. Required.
|
||||
**note:** You can only have one master region.
|
||||
|
||||
#. ``endpoints``: A list of all the endpoints in the region. For example,
|
||||
you may use multiple domain names to refer to the same region. Remember to
|
||||
escape the forward slashes (``\/``). You may also specify a
|
||||
port (``fqdn:port``) for each endpoint. Optional.
|
||||
|
||||
#. ``hostnames``: A list of all the hostnames in the region. For example,
|
||||
you may use multiple domain names to refer to the same region. Optional.
|
||||
The ``rgw dns name`` setting will automatically be included in this list.
|
||||
You should restart the ``radosgw`` daemon(s) after changing this setting.
|
||||
|
||||
#. ``master_zone``: The master zone for the region. Optional. Uses the default
|
||||
zone if not specified. **note:** You can only have one master zone per
|
||||
region.
|
||||
|
||||
#. ``zones``: A list of all zones within the region. Each zone has a
|
||||
name (required), a list of endpoints (optional), and whether or not the
|
||||
gateway will log metadata and data operations (false by default).
|
||||
|
||||
#. ``placement_targets``: A list of placement targets (optional). Each
|
||||
placement target contains a name (required) for the placement target
|
||||
and a list of tags (optional) so that only users with the tag can use
|
||||
the placement target (i.e., the user's ``placement_tags`` field in the
|
||||
user info).
|
||||
|
||||
#. ``default_placement``: The default placement target for the object
|
||||
index and object data. Set to ``default-placement`` by default. You
|
||||
may also set a per-user default placement in the user info for each
|
||||
user.
|
||||
|
||||
To set a region, create a JSON object consisting of the required fields, save
|
||||
the object to a file (e.g., ``region.json``); then, execute the following
|
||||
command::
|
||||
|
||||
sudo radosgw-admin region set --infile region.json
|
||||
|
||||
Where ``region.json`` is the JSON file you created.
|
||||
|
||||
|
||||
.. important:: The ``default`` region ``is_master`` setting is ``true`` by
|
||||
default. If you create a new region and want to make it the master region,
|
||||
you must either set the ``default`` region ``is_master`` setting to
|
||||
``false``, or delete the ``default`` region.
|
||||
|
||||
|
||||
Finally, update the map. ::
|
||||
|
||||
sudo radosgw-admin region-map update
|
||||
|
||||
|
||||
Set a Region Map
|
||||
----------------
|
||||
|
||||
Setting a region map consists of creating a JSON object consisting of one or more
|
||||
regions, and setting the ``master_region`` for the cluster. Each region in the
|
||||
region map consists of a key/value pair, where the ``key`` setting is equivalent to
|
||||
the ``name`` setting for an individual region configuration, and the ``val`` is
|
||||
a JSON object consisting of an individual region configuration.
|
||||
|
||||
You may only have one region with ``is_master`` equal to ``true``, and it must be
|
||||
specified as the ``master_region`` at the end of the region map. The following
|
||||
JSON object is an example of a default region map.
|
||||
|
||||
|
||||
.. code-block:: javascript
|
||||
|
||||
{ "regions": [
|
||||
{ "key": "default",
|
||||
"val": { "name": "default",
|
||||
"api_name": "",
|
||||
"is_master": "true",
|
||||
"endpoints": [],
|
||||
"hostnames": [],
|
||||
"master_zone": "",
|
||||
"zones": [
|
||||
{ "name": "default",
|
||||
"endpoints": [],
|
||||
"log_meta": "false",
|
||||
"log_data": "false"}],
|
||||
"placement_targets": [
|
||||
{ "name": "default-placement",
|
||||
"tags": []}],
|
||||
"default_placement": "default-placement"
|
||||
}
|
||||
}
|
||||
],
|
||||
"master_region": "default"
|
||||
}
|
||||
|
||||
To set a region map, execute the following::
|
||||
|
||||
sudo radosgw-admin region-map set --infile regionmap.json
|
||||
|
||||
Where ``regionmap.json`` is the JSON file you created. Ensure that you have
|
||||
zones created for the ones specified in the region map. Finally, update the map.
|
||||
::
|
||||
|
||||
sudo radosgw-admin regionmap update
|
||||
|
||||
|
||||
Zones
|
||||
=====
|
||||
|
||||
In Ceph v0.67 and beyond, Ceph Object Gateway supports the notion of zones. A
|
||||
zone defines a logical group consisting of one or more Ceph Object Gateway
|
||||
instances.
|
||||
|
||||
Configuring zones differs from typical configuration procedures, because not
|
||||
all of the settings end up in a Ceph configuration file. In Ceph v0.67 and
|
||||
beyond, you can list zones, get a zone configuration and set a zone
|
||||
configuration.
|
||||
|
||||
|
||||
List Zones
|
||||
----------
|
||||
|
||||
To list the zones in a cluster, execute::
|
||||
|
||||
sudo radosgw-admin zone list
|
||||
|
||||
|
||||
Get a Zone
|
||||
----------
|
||||
|
||||
To get the configuration of a zone, execute::
|
||||
|
||||
sudo radosgw-admin zone get [--rgw-zone=<zone>]
|
||||
|
||||
The ``default`` zone looks like this:
|
||||
|
||||
.. code-block:: javascript
|
||||
|
||||
{ "domain_root": ".rgw",
|
||||
"control_pool": ".rgw.control",
|
||||
"gc_pool": ".rgw.gc",
|
||||
"log_pool": ".log",
|
||||
"intent_log_pool": ".intent-log",
|
||||
"usage_log_pool": ".usage",
|
||||
"user_keys_pool": ".users",
|
||||
"user_email_pool": ".users.email",
|
||||
"user_swift_pool": ".users.swift",
|
||||
"user_uid_pool": ".users.uid",
|
||||
"system_key": { "access_key": "", "secret_key": ""},
|
||||
"placement_pools": [
|
||||
{ "key": "default-placement",
|
||||
"val": { "index_pool": ".rgw.buckets.index",
|
||||
"data_pool": ".rgw.buckets"}
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
|
||||
Set a Zone
|
||||
----------
|
||||
|
||||
Configuring a zone involves specifying a series of Ceph Object Gateway pools.
|
||||
For consistency, we recommend using a pool prefix that is
|
||||
the same as the zone name. See `Pools`_ for details of configuring pools.
|
||||
|
||||
To set a zone, create a JSON object consisting of the pools, save
|
||||
the object to a file (e.g., ``zone.json``); then, execute the following
|
||||
command, replacing ``{zone-name}`` with the name of the zone::
|
||||
|
||||
sudo radosgw-admin zone set --rgw-zone={zone-name} --infile zone.json
|
||||
|
||||
Where ``zone.json`` is the JSON file you created.
|
||||
|
||||
|
||||
Region/Zone Settings
|
||||
====================
|
||||
.. versionadded:: Jewel
|
||||
|
||||
You may include the following settings in your Ceph configuration
|
||||
file under each ``[client.radosgw.{instance-name}]`` instance.
|
||||
|
||||
|
||||
.. versionadded:: v.67
|
||||
|
||||
``rgw zone``
|
||||
|
||||
:Description: The name of the zone for the gateway instance.
|
||||
:Description: The name of the zone for the gateway instance. If no zone is
|
||||
set, a cluster-wide default can be configured with the command
|
||||
``radosgw-admin zone default``.
|
||||
:Type: String
|
||||
:Default: None
|
||||
|
||||
|
||||
.. versionadded:: v.67
|
||||
``rgw zonegroup``
|
||||
|
||||
``rgw region``
|
||||
|
||||
:Description: The name of the region for the gateway instance.
|
||||
:Description: The name of the zonegroup for the gateway instance. If no
|
||||
zonegroup is set, a cluster-wide default can be configured with
|
||||
the command ``radosgw-admin zonegroup default``.
|
||||
:Type: String
|
||||
:Default: None
|
||||
|
||||
|
||||
.. versionadded:: v.67
|
||||
|
||||
``rgw default region info oid``
|
||||
|
||||
:Description: The OID for storing the default region. We do not recommend
|
||||
changing this setting.
|
||||
``rgw realm``
|
||||
|
||||
:Description: The name of the realm for the gateway instance. If no realm is
|
||||
set, a cluster-wide default can be configured with the command
|
||||
``radosgw-admin realm default``.
|
||||
:Type: String
|
||||
:Default: ``default.region``
|
||||
:Default: None
|
||||
|
||||
|
||||
``rgw run sync thread``
|
||||
|
||||
Pools
|
||||
=====
|
||||
|
||||
Ceph zones map to a series of Ceph Storage Cluster pools.
|
||||
|
||||
.. topic:: Manually Created Pools vs. Generated Pools
|
||||
|
||||
If you provide write capabilities to the user key for your Ceph Object
|
||||
Gateway, the gateway has the ability to create pools automatically. This
|
||||
is convenient, but the Ceph Object Storage Cluster uses the default
|
||||
values for the number of placement groups (which may not be ideal) or the
|
||||
values you specified in your Ceph configuration file. If you allow the
|
||||
Ceph Object Gateway to create pools automatically, ensure that you have
|
||||
reasonable defaults for the number of placement groups. See
|
||||
`Pool Configuration`_ for details. See `Cluster Pools`_ for details on
|
||||
creating pools.
|
||||
|
||||
The default pools for the Ceph Object Gateway's default zone include:
|
||||
|
||||
- ``.rgw``
|
||||
- ``.rgw.control``
|
||||
- ``.rgw.gc``
|
||||
- ``.log``
|
||||
- ``.intent-log``
|
||||
- ``.usage``
|
||||
- ``.users``
|
||||
- ``.users.email``
|
||||
- ``.users.swift``
|
||||
- ``.users.uid``
|
||||
|
||||
You have significant discretion in determining how you want a zone to access
|
||||
pools. You can create pools on a per zone basis, or use the same pools for
|
||||
multiple zones. As a best practice, we recommend having a separate set of pools
|
||||
for your master zone and your secondary zones in each region. When creating
|
||||
pools for a specific zone, consider prepending the region name and zone name to
|
||||
the default pool names. For example:
|
||||
|
||||
- ``.region1-zone1.domain.rgw``
|
||||
- ``.region1-zone1.rgw.control``
|
||||
- ``.region1-zone1.rgw.gc``
|
||||
- ``.region1-zone1.log``
|
||||
- ``.region1-zone1.intent-log``
|
||||
- ``.region1-zone1.usage``
|
||||
- ``.region1-zone1.users``
|
||||
- ``.region1-zone1.users.email``
|
||||
- ``.region1-zone1.users.swift``
|
||||
- ``.region1-zone1.users.uid``
|
||||
:Description: If there are other zones in the realm to sync from, spawn threads
|
||||
to handle the sync of data and metadata.
|
||||
:Type: Boolean
|
||||
:Default: ``true``
|
||||
|
||||
|
||||
Ceph Object Gateways store data for the bucket index (``index_pool``) and bucket
|
||||
data (``data_pool``) in placement pools. These may overlap--i.e., you may use
|
||||
the same pool for the index and the data. The index pool for default
|
||||
placement is ``.rgw.buckets.index`` and for the data pool for default placement
|
||||
is ``.rgw.buckets``. See `Zones`_ for details on specifying pools in a zone
|
||||
configuration.
|
||||
``rgw data log window``
|
||||
|
||||
:Description: The data log entries window in seconds.
|
||||
:Type: Integer
|
||||
:Default: ``30``
|
||||
|
||||
|
||||
.. deprecated:: v.67
|
||||
``rgw data log changes size``
|
||||
|
||||
``rgw cluster root pool``
|
||||
:Description: The number of in-memory entries to hold for the data changes log.
|
||||
:Type: Integer
|
||||
:Default: ``1000``
|
||||
|
||||
:Description: The Ceph Storage Cluster pool to store ``radosgw`` metadata for
|
||||
this instance. Not used in Ceph version v.67 and later. Use
|
||||
``rgw zone root pool`` instead.
|
||||
|
||||
``rgw data log obj prefix``
|
||||
|
||||
:Description: The object name prefix for the data log.
|
||||
:Type: String
|
||||
:Required: No
|
||||
:Default: ``.rgw.root``
|
||||
:Replaced By: ``rgw zone root pool``
|
||||
:Default: ``data_log``
|
||||
|
||||
|
||||
.. versionadded:: v.67
|
||||
``rgw data log num shards``
|
||||
|
||||
``rgw region root pool``
|
||||
:Description: The number of shards (objects) on which to keep the
|
||||
data changes log.
|
||||
|
||||
:Description: The pool for storing all region-specific information.
|
||||
Not used in Ceph version ``Jewel``.
|
||||
:Type: String
|
||||
:Default: ``.rgw.root``
|
||||
|
||||
.. versionadded:: Jewel
|
||||
|
||||
``rgw zonegroup root pool``
|
||||
|
||||
:Description: The pool for storing all zonegroup-specific information.
|
||||
:Type: String
|
||||
:Default: ``.rgw.root``
|
||||
:Type: Integer
|
||||
:Default: ``128``
|
||||
|
||||
|
||||
.. versionadded:: v.67
|
||||
``rgw md log max shards``
|
||||
|
||||
``rgw zone root pool``
|
||||
:Description: The maximum number of shards for the metadata log.
|
||||
:Type: Integer
|
||||
:Default: ``64``
|
||||
|
||||
:Description: The pool for storing zone-specific information.
|
||||
:Type: String
|
||||
:Default: ``.rgw.root``
|
||||
.. important:: The values of ``rgw data log num shards`` and
|
||||
``rgw md log max shards`` should not be changed after sync has
|
||||
started.
|
||||
|
||||
|
||||
Swift Settings
|
||||
@ -970,50 +697,6 @@ Logging Settings
|
||||
:Default: ``false``
|
||||
|
||||
|
||||
``rgw data log window``
|
||||
|
||||
:Description: The data log entries window in seconds.
|
||||
:Type: Integer
|
||||
:Default: ``30``
|
||||
|
||||
|
||||
``rgw data log changes size``
|
||||
|
||||
:Description: The number of in-memory entries to hold for the data changes log.
|
||||
:Type: Integer
|
||||
:Default: ``1000``
|
||||
|
||||
|
||||
``rgw data log num shards``
|
||||
|
||||
:Description: The number of shards (objects) on which to keep the
|
||||
data changes log.
|
||||
|
||||
:Type: Integer
|
||||
:Default: ``128``
|
||||
|
||||
|
||||
``rgw data log obj prefix``
|
||||
|
||||
:Description: The object name prefix for the data log.
|
||||
:Type: String
|
||||
:Default: ``data_log``
|
||||
|
||||
|
||||
``rgw replica log obj prefix``
|
||||
|
||||
:Description: The object name prefix for the replica log.
|
||||
:Type: String
|
||||
:Default: ``replica log``
|
||||
|
||||
|
||||
``rgw md log max shards``
|
||||
|
||||
:Description: The maximum number of shards for the metadata log.
|
||||
:Type: Integer
|
||||
:Default: ``64``
|
||||
|
||||
|
||||
|
||||
Keystone Settings
|
||||
=================
|
||||
|
@ -25,7 +25,7 @@ Key Management Service
|
||||
======================
|
||||
|
||||
This mode allows keys to be stored in a secure key management service and
|
||||
retrieved on demand by the Ceph Object Gateway to service requests to encrypt
|
||||
retrieved on demand by the Ceph Object Gateway to serve requests to encrypt
|
||||
or decrypt data.
|
||||
|
||||
This is implemented in S3 according to the `Amazon SSE-KMS`_ specification.
|
||||
|
@ -1,825 +0,0 @@
|
||||
================================
|
||||
Configuring Federated Gateways
|
||||
================================
|
||||
|
||||
.. versionadded:: 0.67 Dumpling
|
||||
|
||||
In Ceph version 0.67 Dumpling and beyond, you may configure each :term:`Ceph
|
||||
Object Gateway` to participate in a federated architecture, with multiple
|
||||
regions, and with multiple zones for a region.
|
||||
|
||||
- **Region**: A region represents a *logical* geographic area and contains one
|
||||
or more zones. A cluster with multiple regions must specify a master region.
|
||||
|
||||
- **Zone**: A zone is a *logical* grouping of one or more Ceph Object Gateway
|
||||
instance(s). A region has a master zone that processes client requests.
|
||||
|
||||
.. important:: Only write objects to the master zone in a region. You may read
|
||||
objects from secondary zones. Currently, the Gateway does not prevent you
|
||||
from writing to a secondary zone, but **DON'T DO IT**.
|
||||
|
||||
|
||||
Background
|
||||
==========
|
||||
|
||||
When you deploy a :term:`Ceph Object Store` service that spans geographical
|
||||
locales, configuring Ceph Object Gateway regions and metadata synchronization
|
||||
agents enables the service to maintain a global namespace, even though Ceph
|
||||
Object Gateway instances run in different geographic locales and potentially on
|
||||
different Ceph Storage Clusters. When you separate one or more Ceph Object
|
||||
Gateway instances within a region into separate logical containers to maintain
|
||||
an extra copy (or copies) of the data, configuring Ceph Object Gateway zones and
|
||||
data synchronization agents enables the service to maintain one or more
|
||||
copy(ies) of the master zone's data. Extra copies of the data are important for
|
||||
failover, backup and disaster recovery.
|
||||
|
||||
You may deploy a single Ceph Storage Cluster with a federated architecture if
|
||||
you have low latency network connections (this isn't recommended). You may also
|
||||
deploy one Ceph Storage Cluster per region with a separate set of pools for
|
||||
each zone (typical). You may also deploy a separate Ceph Storage Cluster for
|
||||
each zone if your requirements and resources warrant this level of redundancy.
|
||||
|
||||
|
||||
About this Guide
|
||||
================
|
||||
|
||||
In the following sections, we will demonstrate how to configure a federated
|
||||
cluster in two logical steps:
|
||||
|
||||
- **Configure a Master Region:** This section of the guide describes how to
|
||||
set up a region with multiple zones, and how to synchronize data between the
|
||||
master zone and the secondary zone(s) within the master region.
|
||||
|
||||
- **Configure a Secondary Region:** This section of the guide describes how
|
||||
to repeat the section on setting up a master region and multiple zones so
|
||||
that you have two regions with intra-zone synchronization in each region.
|
||||
Finally, you will learn how to set up a metadata synchronization agent so
|
||||
that you can maintain a global namespace for the regions in your cluster.
|
||||
|
||||
|
||||
|
||||
Configure a Master Region
|
||||
=========================
|
||||
|
||||
This section provides an exemplary procedure for setting up a region, and two
|
||||
zones within the region. The cluster will comprise two gateway daemon
|
||||
instances--one per zone. This region will serve as the master region.
|
||||
|
||||
|
||||
Naming for the Master Region
|
||||
----------------------------
|
||||
|
||||
Before configuring the cluster, defining region, zone and instance names will
|
||||
help you manage your cluster. Let's assume the region represents the United
|
||||
States, and we refer to it by its standard abbreviation.
|
||||
|
||||
- United States: ``us``
|
||||
|
||||
Let's assume the zones represent the Eastern and Western United States. For
|
||||
continuity, our naming convention will use ``{region name}-{zone name}`` format,
|
||||
but you can use any naming convention you prefer.
|
||||
|
||||
- United States, East Region: ``us-east``
|
||||
- United States, West Region: ``us-west``
|
||||
|
||||
Finally, let's assume that zones may have more than one Ceph Object Gateway
|
||||
instance per zone. For continuity, our naming convention will use
|
||||
``{region name}-{zone name}-{instance}`` format, but you can use any naming
|
||||
convention you prefer.
|
||||
|
||||
|
||||
- United States Region, Master Zone, Instance 1: ``us-east-1``
|
||||
- United States Region, Secondary Zone, Instance 1: ``us-west-1``
|
||||
|
||||
|
||||
Create Pools
|
||||
------------
|
||||
|
||||
You may have a Ceph Storage Cluster for the entire region or a Ceph Storage
|
||||
Cluster for each zone.
|
||||
|
||||
For continuity, our naming convention will use ``{region name}-{zone name}``
|
||||
format prepended to the pool name, but you can use any naming convention you
|
||||
prefer. For example:
|
||||
|
||||
|
||||
- ``.us-east.rgw.root``
|
||||
- ``.us-east.rgw.control``
|
||||
- ``.us-east.rgw.gc``
|
||||
- ``.us-east.rgw.buckets``
|
||||
- ``.us-east.rgw.buckets.index``
|
||||
- ``.us-east.rgw.buckets.extra``
|
||||
- ``.us-east.log``
|
||||
- ``.us-east.intent-log``
|
||||
- ``.us-east.usage``
|
||||
- ``.us-east.users``
|
||||
- ``.us-east.users.email``
|
||||
- ``.us-east.users.swift``
|
||||
- ``.us-east.users.uid``
|
||||
|
||||
|
|
||||
|
||||
- ``.us-west.rgw.root``
|
||||
- ``.us-west.rgw.control``
|
||||
- ``.us-west.rgw.gc``
|
||||
- ``.us-west.rgw.buckets``
|
||||
- ``.us-west.rgw.buckets.index``
|
||||
- ``.us-west.rgw.buckets.extra``
|
||||
- ``.us-west.log``
|
||||
- ``.us-west.intent-log``
|
||||
- ``.us-west.usage``
|
||||
- ``.us-west.users``
|
||||
- ``.us-west.users.email``
|
||||
- ``.us-west.users.swift``
|
||||
- ``.us-west.users.uid``
|
||||
|
||||
See `Configuration Reference - Pools`_ for details on the default pools for
|
||||
gateways. See `Pools`_ for details on creating pools. Execute the following
|
||||
to create a pool::
|
||||
|
||||
ceph osd pool create {poolname} {pg-num} {pgp-num} {replicated | erasure} [{erasure-code-profile}] {ruleset-name} {ruleset-number}
|
||||
|
||||
|
||||
.. tip:: When adding a large number of pools, it may take some time for your
|
||||
cluster to return to a ``active + clean`` state.
|
||||
|
||||
.. topic:: CRUSH Maps
|
||||
|
||||
When deploying a Ceph Storage Cluster for the entire region, consider
|
||||
using a CRUSH rule for the zone such that you do NOT have overlapping
|
||||
failure domains. See `CRUSH Map`_ for details.
|
||||
|
||||
Ceph supports multiple CRUSH hierarchies and CRUSH rulesets, enabling
|
||||
great flexibility in the way you configure your gateway. Pools such
|
||||
as ``rgw.buckets.index`` may benefit from a modestly sized pool of SSDs
|
||||
for fast performance. Backing storage may benefit from the increased economy
|
||||
of erasure-coded storage, and/or the improved performance from cache tiering.
|
||||
|
||||
When you have completed this step, execute the following to ensure that
|
||||
you have created all of the foregoing pools::
|
||||
|
||||
rados lspools
|
||||
|
||||
|
||||
Create a Keyring
|
||||
----------------
|
||||
|
||||
Each instance must have a user name and key to communicate with a Ceph Storage
|
||||
Cluster. In the following steps, we use an admin node to create a keyring.
|
||||
Then, we create a client user name and key for each instance. Next, we add the
|
||||
keys to the Ceph Storage Cluster(s). Finally, we distribute the key ring to
|
||||
each node containing an instance.
|
||||
|
||||
#. Create a keyring. ::
|
||||
|
||||
sudo ceph-authtool --create-keyring /etc/ceph/ceph.client.radosgw.keyring
|
||||
sudo chmod +r /etc/ceph/ceph.client.radosgw.keyring
|
||||
|
||||
|
||||
#. Generate a Ceph Object Gateway user name and key for each instance. ::
|
||||
|
||||
sudo ceph-authtool /etc/ceph/ceph.client.radosgw.keyring -n client.radosgw.us-east-1 --gen-key
|
||||
sudo ceph-authtool /etc/ceph/ceph.client.radosgw.keyring -n client.radosgw.us-west-1 --gen-key
|
||||
|
||||
|
||||
#. Add capabilities to each key. See `Configuration Reference - Pools`_ for details
|
||||
on the effect of write permissions for the monitor and creating pools. ::
|
||||
|
||||
sudo ceph-authtool -n client.radosgw.us-east-1 --cap osd 'allow rwx' --cap mon 'allow rwx' /etc/ceph/ceph.client.radosgw.keyring
|
||||
sudo ceph-authtool -n client.radosgw.us-west-1 --cap osd 'allow rwx' --cap mon 'allow rwx' /etc/ceph/ceph.client.radosgw.keyring
|
||||
|
||||
|
||||
#. Once you have created a keyring and key to enable the Ceph Object Gateway
|
||||
with access to the Ceph Storage Cluster, add each key as an entry to your
|
||||
Ceph Storage Cluster(s). For example::
|
||||
|
||||
sudo ceph -k /etc/ceph/ceph.client.admin.keyring auth add client.radosgw.us-east-1 -i /etc/ceph/ceph.client.radosgw.keyring
|
||||
sudo ceph -k /etc/ceph/ceph.client.admin.keyring auth add client.radosgw.us-west-1 -i /etc/ceph/ceph.client.radosgw.keyring
|
||||
|
||||
|
||||
.. note:: When you use this procedure to configure the secondary region,
|
||||
replace ``us-`` with ``eu-``. You will have a total of four users **after**
|
||||
you create the master region and the secondary region.
|
||||
|
||||
|
||||
Install Apache/FastCGI
|
||||
----------------------
|
||||
|
||||
For each :term:`Ceph Node` that runs a :term:`Ceph Object Gateway` daemon
|
||||
instance, you must install Apache, FastCGI, the Ceph Object Gateway daemon
|
||||
(``radosgw``) and the Ceph Object Gateway Sync Agent (``radosgw-agent``).
|
||||
See `Install Ceph Object Gateway`_ for details.
|
||||
|
||||
|
||||
Create Data Directories
|
||||
-----------------------
|
||||
|
||||
Create data directories for each daemon instance on their respective
|
||||
hosts. ::
|
||||
|
||||
ssh {us-east-1}
|
||||
sudo mkdir -p /var/lib/ceph/radosgw/ceph-radosgw.us-east-1
|
||||
|
||||
ssh {us-west-1}
|
||||
sudo mkdir -p /var/lib/ceph/radosgw/ceph-radosgw.us-west-1
|
||||
|
||||
|
||||
.. note:: When you use this procedure to configure the secondary region,
|
||||
replace ``us-`` with ``eu-``. You will have a total of four data directories
|
||||
**after** you create the master region and the secondary region.
|
||||
|
||||
|
||||
Create a Gateway Configuration
|
||||
------------------------------
|
||||
|
||||
For each instance, create an Ceph Object Gateway configuration file under the
|
||||
``/etc/apache2/sites-available`` directory on the host(s) where you installed
|
||||
the Ceph Object Gateway daemon(s). See below for an exemplary embodiment of a
|
||||
gateway configuration as discussed in the following text.
|
||||
|
||||
.. literalinclude:: rgw.conf
|
||||
:language: ini
|
||||
|
||||
#. Replace the ``/{path}/{socket-name}`` entry with path to the socket and
|
||||
the socket name. For example,
|
||||
``/var/run/ceph/client.radosgw.us-east-1.sock``. Ensure that you use the
|
||||
same path and socket name in your ``ceph.conf`` entry.
|
||||
|
||||
#. Replace the ``{fqdn}`` entry with the fully-qualified domain name of the
|
||||
server.
|
||||
|
||||
#. Replace the ``{email.address}`` entry with the email address for the
|
||||
server administrator.
|
||||
|
||||
#. Add a ``ServerAlias`` if you wish to use S3-style subdomains
|
||||
(of course you do).
|
||||
|
||||
#. Save the configuration to a file (e.g., ``rgw-us-east.conf``).
|
||||
|
||||
Repeat the process for the secondary zone (e.g., ``rgw-us-west.conf``).
|
||||
|
||||
.. note:: When you use this procedure to configure the secondary region,
|
||||
replace ``us-`` with ``eu-``. You will have a total of four gateway
|
||||
configuration files on the respective nodes **after**
|
||||
you create the master region and the secondary region.
|
||||
|
||||
|
||||
Finally, if you enabled SSL, make sure that you set the port to your SSL port
|
||||
(usually 443) and your configuration file includes the following::
|
||||
|
||||
SSLEngine on
|
||||
SSLCertificateFile /etc/apache2/ssl/apache.crt
|
||||
SSLCertificateKeyFile /etc/apache2/ssl/apache.key
|
||||
SetEnv SERVER_PORT_SECURE 443
|
||||
|
||||
|
||||
Enable the Configuration
|
||||
------------------------
|
||||
|
||||
For each instance, enable the gateway configuration and disable the
|
||||
default site.
|
||||
|
||||
#. Enable the site for the gateway configuration. ::
|
||||
|
||||
sudo a2ensite {rgw-conf-filename}
|
||||
|
||||
#. Disable the default site. ::
|
||||
|
||||
sudo a2dissite default
|
||||
|
||||
.. note:: Failure to disable the default site can lead to problems.
|
||||
|
||||
|
||||
Add a FastCGI Script
|
||||
--------------------
|
||||
|
||||
FastCGI requires a script for each Ceph Object Gateway instance to
|
||||
enable the S3-compatible interface. To create the script, execute
|
||||
the following procedures.
|
||||
|
||||
|
||||
#. Go to the ``/var/www`` directory. ::
|
||||
|
||||
cd /var/www
|
||||
|
||||
#. Open an editor with the file name ``s3gw.fcgi``. **Note:** The configuration
|
||||
file specifies this filename. ::
|
||||
|
||||
sudo vim s3gw.fcgi
|
||||
|
||||
#. Add a shell script with ``exec`` and the path to the gateway binary,
|
||||
the path to the Ceph configuration file, and the user name (``-n``;
|
||||
the same user name created in step 2 of `Create a Keyring`_.
|
||||
Copy the following into the editor. ::
|
||||
|
||||
#!/bin/sh
|
||||
exec /usr/bin/radosgw -c /etc/ceph/ceph.conf -n client.radosgw.{ID}
|
||||
|
||||
For example::
|
||||
|
||||
#!/bin/sh
|
||||
exec /usr/bin/radosgw -c /etc/ceph/ceph.conf -n client.radosgw.us-east-1
|
||||
|
||||
#. Save the file.
|
||||
|
||||
#. Change the permissions on the file so that it is executable. ::
|
||||
|
||||
sudo chmod +x s3gw.fcgi
|
||||
|
||||
|
||||
Repeat the process for the secondary zone.
|
||||
|
||||
.. note:: When you use this procedure to configure the secondary region,
|
||||
replace ``us-`` with ``eu-``. You will have a total of four FastCGI scripts
|
||||
**after** you create the master region and the secondary region.
|
||||
|
||||
|
||||
|
||||
Add Instances to Ceph Config File
|
||||
---------------------------------
|
||||
|
||||
On an admin node, add an entry for each instance in the Ceph configuration file
|
||||
for your Ceph Storage Cluster(s). For example::
|
||||
|
||||
[global]
|
||||
rgw region root pool = .us.rgw.root # Deprecated in Jewel
|
||||
rgw zonegroup root pool = .us.rgw.root # From Jewel
|
||||
|
||||
[client.radosgw.us-east-1]
|
||||
rgw region = us
|
||||
rgw zone = us-east
|
||||
rgw zone root pool = .us-east.rgw.root
|
||||
keyring = /etc/ceph/ceph.client.radosgw.keyring
|
||||
rgw dns name = {hostname}
|
||||
rgw socket path = /var/run/ceph/$name.sock
|
||||
host = {host-name}
|
||||
|
||||
[client.radosgw.us-west-1]
|
||||
rgw region = us
|
||||
rgw zone = us-west
|
||||
rgw zone root pool = .us-west.rgw.root
|
||||
keyring = /etc/ceph/ceph.client.radosgw.keyring
|
||||
rgw dns name = {hostname}
|
||||
rgw socket path = /var/run/ceph/$name.sock
|
||||
host = {host-name}
|
||||
|
||||
|
||||
Then, update each :term:`Ceph Node` with the updated Ceph configuration
|
||||
file. For example::
|
||||
|
||||
ceph-deploy --overwrite-conf config push {node1} {node2} {nodex}
|
||||
|
||||
|
||||
.. note:: When you use this procedure to configure the secondary region,
|
||||
replace ``us`` with ``eu`` for the name, region, pools and zones.
|
||||
You will have a total of four entries **after**
|
||||
you create the master region and the secondary region.
|
||||
|
||||
|
||||
|
||||
Create a Region
|
||||
---------------
|
||||
|
||||
#. Configure a region infile called ``us.json`` for the ``us`` region.
|
||||
|
||||
Copy the contents of the following example to a text editor. Set
|
||||
``is_master`` to ``true``. Replace ``{fqdn}`` with the fully-qualified
|
||||
domain name of the endpoint. It will specify a master zone as ``us-east``
|
||||
and list it in the ``zones`` list along with the ``us-west`` zone.
|
||||
See `Configuration Reference - Regions`_ for details.::
|
||||
|
||||
{ "name": "us",
|
||||
"api_name": "us",
|
||||
"is_master": "true",
|
||||
"endpoints": [
|
||||
"http:\/\/{fqdn}:80\/"],
|
||||
"master_zone": "us-east",
|
||||
"zones": [
|
||||
{ "name": "us-east",
|
||||
"endpoints": [
|
||||
"http:\/\/{fqdn}:80\/"],
|
||||
"log_meta": "true",
|
||||
"log_data": "true"},
|
||||
{ "name": "us-west",
|
||||
"endpoints": [
|
||||
"http:\/\/{fqdn}:80\/"],
|
||||
"log_meta": "true",
|
||||
"log_data": "true"}],
|
||||
"placement_targets": [
|
||||
{
|
||||
"name": "default-placement",
|
||||
"tags": []
|
||||
}
|
||||
],
|
||||
"default_placement": "default-placement"}
|
||||
|
||||
|
||||
#. Create the ``us`` region using the ``us.json`` infile you just
|
||||
created. ::
|
||||
|
||||
radosgw-admin region set --infile us.json --name client.radosgw.us-east-1
|
||||
|
||||
#. Delete the default region (if it exists). ::
|
||||
|
||||
rados -p .us.rgw.root rm region_info.default
|
||||
|
||||
#. Set the ``us`` region as the default region. ::
|
||||
|
||||
radosgw-admin region default --rgw-region=us --name client.radosgw.us-east-1
|
||||
|
||||
Only one region can be the default region for a cluster.
|
||||
|
||||
#. Update the region map. ::
|
||||
|
||||
radosgw-admin regionmap update --name client.radosgw.us-east-1
|
||||
|
||||
|
||||
If you use different Ceph Storage Cluster instances for regions, you should
|
||||
repeat steps 2, 4 and 5 in by executing them with ``--name
|
||||
client.radosgw-us-west-1``. You may also export the region map from the initial
|
||||
gateway instance and import it followed by updating the region map.
|
||||
|
||||
.. note:: When you use this procedure to configure the secondary region,
|
||||
replace ``us`` with ``eu``. You will have a total of two regions **after**
|
||||
you create the master region and the secondary region.
|
||||
|
||||
|
||||
|
||||
Create Zones
|
||||
------------
|
||||
|
||||
#. Configure a zone infile called ``us-east.json`` for the
|
||||
``us-east`` zone.
|
||||
|
||||
Copy the contents of the following example to a text editor.
|
||||
This configuration uses pool names prepended with the region name and
|
||||
zone name. See `Configuration Reference - Pools`_ for additional details on
|
||||
gateway pools. See `Configuration Reference - Zones`_ for additional
|
||||
details on zones. ::
|
||||
|
||||
{ "domain_root": ".us-east.domain.rgw",
|
||||
"control_pool": ".us-east.rgw.control",
|
||||
"gc_pool": ".us-east.rgw.gc",
|
||||
"log_pool": ".us-east.log",
|
||||
"intent_log_pool": ".us-east.intent-log",
|
||||
"usage_log_pool": ".us-east.usage",
|
||||
"user_keys_pool": ".us-east.users",
|
||||
"user_email_pool": ".us-east.users.email",
|
||||
"user_swift_pool": ".us-east.users.swift",
|
||||
"user_uid_pool": ".us-east.users.uid",
|
||||
"system_key": { "access_key": "", "secret_key": ""},
|
||||
"placement_pools": [
|
||||
{ "key": "default-placement",
|
||||
"val": { "index_pool": ".us-east.rgw.buckets.index",
|
||||
"data_pool": ".us-east.rgw.buckets"}
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
|
||||
#. Add the ``us-east`` zone using the ``us-east.json`` infile you
|
||||
just created in both the east and west pools by specifying their respective
|
||||
user names (i.e., ``--name``). ::
|
||||
|
||||
radosgw-admin zone set --rgw-zone=us-east --infile us-east.json --name client.radosgw.us-east-1
|
||||
radosgw-admin zone set --rgw-zone=us-east --infile us-east.json --name client.radosgw.us-west-1
|
||||
|
||||
Repeat step 1 to create a zone infile for ``us-west``. Then add the zone
|
||||
using the ``us-west.json`` infile in both the east and west pools by
|
||||
specifying their respective user names (i.e., ``--name``). ::
|
||||
|
||||
radosgw-admin zone set --rgw-zone=us-west --infile us-west.json --name client.radosgw.us-east-1
|
||||
radosgw-admin zone set --rgw-zone=us-west --infile us-west.json --name client.radosgw.us-west-1
|
||||
|
||||
|
||||
#. Delete the default zone (if it exists). ::
|
||||
|
||||
rados -p .us-east.rgw.root rm zone_info.default
|
||||
rados -p .us-west.rgw.root rm zone_info.default
|
||||
|
||||
|
||||
#. Update the region map. ::
|
||||
|
||||
radosgw-admin regionmap update --name client.radosgw.us-east-1
|
||||
|
||||
.. note:: When you use this procedure to configure the secondary region,
|
||||
replace ``us-`` with ``eu-``. You will have a total of four zones **after**
|
||||
you create the master zone and the secondary zone in each region.
|
||||
|
||||
|
||||
Create Zone Users
|
||||
-----------------
|
||||
|
||||
Ceph Object Gateway stores zone users in the zone pools. So you must create zone
|
||||
users after configuring the zones. Copy the ``access_key`` and ``secret_key``
|
||||
fields for each user so you can update your zone configuration once you complete
|
||||
this step. ::
|
||||
|
||||
radosgw-admin user create --uid="us-east" --display-name="Region-US Zone-East" --name client.radosgw.us-east-1 --system
|
||||
radosgw-admin user create --uid="us-west" --display-name="Region-US Zone-West" --name client.radosgw.us-west-1 --system
|
||||
|
||||
|
||||
.. note:: When you use this procedure to configure the secondary region,
|
||||
replace ``us-`` with ``eu-``. You will have a total of four zone users
|
||||
**after** you create the master region and the secondary region and their
|
||||
zones. These users are different from the users created in `Create a
|
||||
Keyring`_.
|
||||
|
||||
|
||||
Update Zone Configurations
|
||||
--------------------------
|
||||
|
||||
You must update the zone configuration with zone users so that
|
||||
the synchronization agents can authenticate with the zones.
|
||||
|
||||
#. Open your ``us-east.json`` zone configuration file and paste the contents of
|
||||
the ``access_key`` and ``secret_key`` fields from the step of creating
|
||||
zone users into the ``system_key`` field of your zone configuration
|
||||
infile. ::
|
||||
|
||||
{ "domain_root": ".us-east.domain.rgw",
|
||||
"control_pool": ".us-east.rgw.control",
|
||||
"gc_pool": ".us-east.rgw.gc",
|
||||
"log_pool": ".us-east.log",
|
||||
"intent_log_pool": ".us-east.intent-log",
|
||||
"usage_log_pool": ".us-east.usage",
|
||||
"user_keys_pool": ".us-east.users",
|
||||
"user_email_pool": ".us-east.users.email",
|
||||
"user_swift_pool": ".us-east.users.swift",
|
||||
"user_uid_pool": ".us-east.users.uid",
|
||||
"system_key": {
|
||||
"access_key": "{paste-access_key-here}",
|
||||
"secret_key": "{paste-secret_key-here}"
|
||||
},
|
||||
"placement_pools": [
|
||||
{ "key": "default-placement",
|
||||
"val": { "index_pool": ".us-east.rgw.buckets.index",
|
||||
"data_pool": ".us-east.rgw.buckets"}
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
#. Save the ``us-east.json`` file. Then, update your zone configuration. ::
|
||||
|
||||
radosgw-admin zone set --rgw-zone=us-east --infile us-east.json --name client.radosgw.us-east-1
|
||||
radosgw-admin zone set --rgw-zone=us-east --infile us-east.json --name client.radosgw.us-west-1
|
||||
|
||||
#. Repeat step 1 to update the zone infile for ``us-west``. Then, update
|
||||
your zone configuration. ::
|
||||
|
||||
radosgw-admin zone set --rgw-zone=us-west --infile us-west.json --name client.radosgw.us-east-1
|
||||
radosgw-admin zone set --rgw-zone=us-west --infile us-west.json --name client.radosgw.us-west-1
|
||||
|
||||
|
||||
.. note:: When you use this procedure to configure the secondary region,
|
||||
replace ``us-`` with ``eu-``. You will have a total of four zones **after**
|
||||
you create the master zone and the secondary zone in each region.
|
||||
|
||||
|
||||
Restart Services
|
||||
----------------
|
||||
|
||||
Once you have redeployed your Ceph configuration files, we recommend restarting
|
||||
your Ceph Storage Cluster(s) and Apache instances.
|
||||
|
||||
For Ubuntu, use the following on each :term:`Ceph Node`::
|
||||
|
||||
sudo restart ceph-all
|
||||
|
||||
For Red Hat/CentOS, use the following::
|
||||
|
||||
sudo /etc/init.d/ceph restart
|
||||
|
||||
To ensure that all components have reloaded their configurations, for each
|
||||
gateway instance we recommend restarting the ``apache2`` service. For example::
|
||||
|
||||
sudo service apache2 restart
|
||||
|
||||
|
||||
Start Gateway Instances
|
||||
-----------------------
|
||||
|
||||
Start up the ``radosgw`` service. ::
|
||||
|
||||
sudo /etc/init.d/radosgw start
|
||||
|
||||
If you are running multiple instances on the same host, you must specify the
|
||||
user name. ::
|
||||
|
||||
sudo /etc/init.d/radosgw start --name client.radosgw.us-east-1
|
||||
|
||||
|
||||
Open a browser and check the endpoints for each zone. A simple HTTP request
|
||||
to the domain name should return the following:
|
||||
|
||||
.. code-block:: xml
|
||||
|
||||
<ListAllMyBucketsResult xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
|
||||
<Owner>
|
||||
<ID>anonymous</ID>
|
||||
<DisplayName/>
|
||||
</Owner>
|
||||
<Buckets/>
|
||||
</ListAllMyBucketsResult>
|
||||
|
||||
|
||||
Configure a Secondary Region
|
||||
============================
|
||||
|
||||
This section provides an exemplary procedure for setting up a cluster with
|
||||
multiple regions. Configuring a cluster that spans regions requires maintaining
|
||||
a global namespace, so that there are no namespace clashes among object names
|
||||
stored across in different regions.
|
||||
|
||||
This section extends the procedure in `Configure a Master Region`_, but
|
||||
changes the region name and modifies a few procedures. See the following
|
||||
sections for details.
|
||||
|
||||
|
||||
Naming for the Secondary Region
|
||||
-------------------------------
|
||||
|
||||
Before configuring the cluster, defining region, zone and instance names will
|
||||
help you manage your cluster. Let's assume the region represents the European
|
||||
Union, and we refer to it by its standard abbreviation.
|
||||
|
||||
- European Union: ``eu``
|
||||
|
||||
Let's assume the zones represent the Eastern and Western European Union. For
|
||||
continuity, our naming convention will use ``{region name}-{zone name}``
|
||||
format, but you can use any naming convention you prefer.
|
||||
|
||||
- European Union, East Region: ``eu-east``
|
||||
- European Union, West Region: ``eu-west``
|
||||
|
||||
Finally, let's assume that zones may have more than one Ceph Object Gateway
|
||||
instance per zone. For continuity, our naming convention will use
|
||||
``{region name}-{zone name}-{instance}`` format, but you can use any naming
|
||||
convention you prefer.
|
||||
|
||||
- European Union Region, Master Zone, Instance 1: ``eu-east-1``
|
||||
- European Union Region, Secondary Zone, Instance 1: ``eu-west-1``
|
||||
|
||||
|
||||
Configuring a Secondary Region
|
||||
------------------------------
|
||||
|
||||
Repeat the exemplary procedure of `Configure a Master Region`_
|
||||
with the following differences:
|
||||
|
||||
#. Use `Naming for the Secondary Region`_ in lieu of `Naming for
|
||||
the Master Region`_.
|
||||
|
||||
#. `Create Pools`_ using ``eu`` instead of ``us``.
|
||||
|
||||
#. `Create a Keyring`_ and the corresponding keys using ``eu`` instead of
|
||||
``us``. You may use the same keyring if you desire, but ensure that you
|
||||
create the keys on the Ceph Storage Cluster for that region (or region
|
||||
and zone).
|
||||
|
||||
#. `Install Apache/FastCGI`_.
|
||||
|
||||
#. `Create Data Directories`_ using ``eu`` instead of ``us``.
|
||||
|
||||
#. `Create a Gateway Configuration`_ using ``eu`` instead of ``us`` for
|
||||
the socket names.
|
||||
|
||||
#. `Enable the Configuration`_.
|
||||
|
||||
#. `Add a FastCGI Script`_ using ``eu`` instead of ``us`` for the user names.
|
||||
|
||||
#. `Add Instances to Ceph Config File`_ using ``eu`` instead of ``us`` for the
|
||||
pool names.
|
||||
|
||||
#. `Create a Region`_ using ``eu`` instead of ``us``. Set ``is_master`` to
|
||||
``false``. For consistency, create the master region in the secondary region
|
||||
too. ::
|
||||
|
||||
radosgw-admin region set --infile us.json --name client.radosgw.eu-east-1
|
||||
|
||||
#. `Create Zones`_ using ``eu`` instead of ``us``. Ensure that you update the
|
||||
user name (i.e., ``--name``) so that you create the zones in the correct
|
||||
cluster.
|
||||
|
||||
#. `Update Zone Configurations`_ using ``eu`` instead of ``us``.
|
||||
|
||||
#. Create zones from master region in the secondary region. ::
|
||||
|
||||
radosgw-admin zone set --rgw-zone=us-east --infile us-east.json --name client.radosgw.eu-east-1
|
||||
radosgw-admin zone set --rgw-zone=us-east --infile us-east.json --name client.radosgw.eu-west-1
|
||||
radosgw-admin zone set --rgw-zone=us-west --infile us-west.json --name client.radosgw.eu-east-1
|
||||
radosgw-admin zone set --rgw-zone=us-west --infile us-west.json --name client.radosgw.eu-west-1
|
||||
|
||||
#. Create zones from secondary region in the master region. ::
|
||||
|
||||
radosgw-admin zone set --rgw-zone=eu-east --infile eu-east.json --name client.radosgw.us-east-1
|
||||
radosgw-admin zone set --rgw-zone=eu-east --infile eu-east.json --name client.radosgw.us-west-1
|
||||
radosgw-admin zone set --rgw-zone=eu-west --infile eu-west.json --name client.radosgw.us-east-1
|
||||
radosgw-admin zone set --rgw-zone=eu-west --infile eu-west.json --name client.radosgw.us-west-1
|
||||
|
||||
#. `Restart Services`_.
|
||||
|
||||
#. `Start Gateway Instances`_.
|
||||
|
||||
|
||||
Multi-Site Data Replication
|
||||
===========================
|
||||
|
||||
The data synchronization agent replicates the data of a master zone to a
|
||||
secondary zone. The master zone of a region is the source for the secondary zone
|
||||
of the region and it gets selected automatically.
|
||||
|
||||
.. image:: ../images/zone-sync.png
|
||||
|
||||
To configure the synchronization agent, retrieve the access key and secret for
|
||||
the source and destination, and the destination URL and port.
|
||||
|
||||
You may use ``radosgw-admin zone list`` to get a list of zone names. You
|
||||
may use ``radosgw-admin zone get`` to identify the key and secret for the
|
||||
zone. You may refer to the gateway configuration file you created under
|
||||
`Create a Gateway Configuration`_ to identify the port number.
|
||||
|
||||
You only need the hostname and port for a single instance (assuming all
|
||||
gateway instances in a region/zone access the same Ceph Storage Cluster).
|
||||
Specify these values in a configuration file
|
||||
(e.g., ``cluster-data-sync.conf``), and include a ``log_file`` name.
|
||||
|
||||
|
||||
For example:
|
||||
|
||||
.. code-block:: ini
|
||||
|
||||
src_access_key: {source-access-key}
|
||||
src_secret_key: {source-secret-key}
|
||||
destination: https://zone-name.fqdn.com:port
|
||||
dest_access_key: {destination-access-key}
|
||||
dest_secret_key: {destination-secret-key}
|
||||
log_file: {log.filename}
|
||||
|
||||
A concrete example may look like this:
|
||||
|
||||
.. code-block:: ini
|
||||
|
||||
src_access_key: DG8RE354EFPZBICHIAF0
|
||||
src_secret_key: i3U0HiRP8CXaBWrcF8bbh6CbsxGYuPPwRkixfFSb
|
||||
destination: https://us-west.storage.net:80
|
||||
dest_access_key: U60RFI6B08F32T2PD30G
|
||||
dest_secret_key: W3HuUor7Gl1Ee93pA2pq2wFk1JMQ7hTrSDecYExl
|
||||
log_file: /var/log/radosgw/radosgw-sync-us-east-west.log
|
||||
|
||||
To activate the data synchronization agent, open a terminal and
|
||||
execute the following::
|
||||
|
||||
radosgw-agent -c region-data-sync.conf
|
||||
|
||||
When the synchronization agent is running, you should see output
|
||||
indicating that the agent is synchronizing shards of data. ::
|
||||
|
||||
INFO:radosgw_agent.sync:Starting incremental sync
|
||||
INFO:radosgw_agent.worker:17910 is processing shard number 0
|
||||
INFO:radosgw_agent.worker:shard 0 has 0 entries after ''
|
||||
INFO:radosgw_agent.worker:finished processing shard 0
|
||||
INFO:radosgw_agent.worker:17910 is processing shard number 1
|
||||
INFO:radosgw_agent.sync:1/64 shards processed
|
||||
INFO:radosgw_agent.worker:shard 1 has 0 entries after ''
|
||||
INFO:radosgw_agent.worker:finished processing shard 1
|
||||
INFO:radosgw_agent.sync:2/64 shards processed
|
||||
...
|
||||
|
||||
.. note:: You must have an agent for each source-destination pair.
|
||||
|
||||
|
||||
Inter-Region Metadata Replication
|
||||
=================================
|
||||
|
||||
The data synchronization agent replicates the metadata of master zone in the
|
||||
master region to a master zone in a secondary region. Metadata consists of
|
||||
gateway users and buckets, but not the objects within the buckets--ensuring a
|
||||
unified namespace across the cluster. The master zone of the master region is
|
||||
the source for the master zone of the secondary region and it gets selected
|
||||
automatically.
|
||||
|
||||
.. image:: ../images/region-sync.png
|
||||
:align: center
|
||||
|
||||
Follow the same steps in `Multi-Site Data Replication`_ by specifying the master
|
||||
zone of the master region as the source zone and the master zone of the
|
||||
secondary region as the secondary zone. When activating the ``radosgw-agent``,
|
||||
specify ``--metadata-only`` so that it only copies metadata. For example::
|
||||
|
||||
radosgw-agent -c inter-region-data-sync.conf --metadata-only
|
||||
|
||||
Once you have completed the foregoing procedure, you should have a cluster
|
||||
consisting of a master region (``us``) and a secondary region (``eu``) where
|
||||
there is a unified namespace between the two regions.
|
||||
|
||||
|
||||
|
||||
.. _CRUSH Map: ../../rados/operations/crush-map
|
||||
.. _Install Ceph Object Gateway: ../../install/install-ceph-gateway
|
||||
.. _Ceph configuration file: ../../rados/configuration/ceph-conf
|
||||
.. _Configuration Reference - Pools: ../config-ref#pools
|
||||
.. _Configuration Reference - Regions: ../config-ref#regions
|
||||
.. _Configuration Reference - Zones: ../config-ref#zones
|
||||
.. _Pools: ../../rados/operations/pools
|
||||
.. _Simple Configuration: ../config-fcgi
|
@ -13,7 +13,7 @@ Ceph Storage Clusters. :term:`Ceph Object Storage` supports two interfaces:
|
||||
that is compatible with a large subset of the OpenStack Swift API.
|
||||
|
||||
Ceph Object Storage uses the Ceph Object Gateway daemon (``radosgw``), which is
|
||||
a FastCGI module for interacting with a Ceph Storage Cluster. Since it
|
||||
an HTTP server for interacting with a Ceph Storage Cluster. Since it
|
||||
provides interfaces compatible with OpenStack Swift and Amazon S3, the Ceph
|
||||
Object Gateway has its own user management. Ceph Object Gateway can store data
|
||||
in the same Ceph Storage Cluster used to store data from Ceph Filesystem clients
|
||||
@ -37,9 +37,8 @@ you may write data with one API and retrieve it with the other.
|
||||
:maxdepth: 1
|
||||
|
||||
Manual Install w/Civetweb <../../install/install-ceph-gateway>
|
||||
Simple Configuration w/Apache/FastCGI <config-fcgi>
|
||||
Federated Configuration (Deprecated) <federated-config>
|
||||
Multisite Configuration <multisite>
|
||||
Configuring Pools <pools>
|
||||
Config Reference <config-ref>
|
||||
Admin Guide <admin>
|
||||
S3 API <s3>
|
||||
@ -51,8 +50,8 @@ you may write data with one API and retrieve it with the other.
|
||||
Multi-tenancy <multitenancy>
|
||||
Compression <compression>
|
||||
Server-Side Encryption <encryption>
|
||||
Bucket Policy <bucketpolicy>
|
||||
Data Layout in RADOS <layout>
|
||||
Upgrade to Older Versions of Jewel <upgrade_to_jewel>
|
||||
troubleshooting
|
||||
Manpage radosgw <../../man/8/radosgw>
|
||||
Manpage radosgw-admin <../../man/8/radosgw-admin>
|
||||
|
@ -82,12 +82,14 @@ The user ID in RGW is a string, typically the actual user name from the user
|
||||
credentials and not a hashed or mapped identifier.
|
||||
|
||||
When accessing a user's data, the user record is loaded from an object
|
||||
"<user_id>" in pool ".users.uid".
|
||||
"<user_id>" in pool "default.rgw.meta" with namespace "users.uid".
|
||||
|
||||
Bucket names are represented directly in the pool ".rgw". Bucket record is
|
||||
Bucket names are represented in the pool "default.rgw.meta" with namespace
|
||||
"root". Bucket record is
|
||||
loaded in order to obtain so-called marker, which serves as a bucket ID.
|
||||
|
||||
The object is located in pool ".rgw.buckets". Object name is "<marker>_<key>",
|
||||
The object is located in pool "default.rgw.buckets.data".
|
||||
Object name is "<marker>_<key>",
|
||||
for example "default.7593.4_image.png", where the marker is "default.7593.4"
|
||||
and the key is "image.png". Since these concatenated names are not parsed,
|
||||
only passed down to RADOS, the choice of the separator is not important and
|
||||
@ -110,7 +112,8 @@ Bucket and Object Listing
|
||||
-------------------------
|
||||
|
||||
Buckets that belong to a given user are listed in an omap of an object named
|
||||
"<user_id>.buckets" (for example, "foo.buckets") in pool ".users.uid".
|
||||
"<user_id>.buckets" (for example, "foo.buckets") in pool "default.rgw.meta"
|
||||
with namespace "users.uid".
|
||||
These objects are accessed when listing buckets, when updating bucket
|
||||
contents, and updating and retrieving bucket statistics (e.g. for quota).
|
||||
|
||||
@ -121,7 +124,7 @@ These listings are kept consistent with buckets in pool ".rgw".
|
||||
|
||||
Objects that belong to a given bucket are listed in a bucket index,
|
||||
as discussed in sub-section 'Bucket Index' above. The default naming
|
||||
for index objects is ".dir.<marker>" in pool ".rgw.buckets.index".
|
||||
for index objects is ".dir.<marker>" in pool "default.rgw.buckets.index".
|
||||
|
||||
Footnotes
|
||||
---------
|
||||
@ -136,64 +139,66 @@ In Hammer, one LevelDB is used to store omap in each OSD.
|
||||
exist and the 'bucket' metadata contained its information. It is possible
|
||||
to encounter such buckets in old installations.
|
||||
|
||||
[3] In Infernalis, a pending commit exists that removes the need of prefixing
|
||||
all the rgw system pools with a period, and also renames all of these pools.
|
||||
See Github pull request #4944 "rgw noperiod".
|
||||
[3] The pool names have been changed starting with the Infernalis release.
|
||||
If you are looking at an older setup, some details may be different. In
|
||||
particular there was a different pool for each of the namespaces that are
|
||||
now being used inside the default.root.meta pool.
|
||||
|
||||
Appendix: Compendum
|
||||
-------------------
|
||||
Appendix: Compendium
|
||||
--------------------
|
||||
|
||||
Known pools:
|
||||
|
||||
.rgw.root
|
||||
Unspecified region, zone, and global information records, one per object.
|
||||
|
||||
.rgw.control
|
||||
<zone>.rgw.control
|
||||
notify.<N>
|
||||
|
||||
.rgw
|
||||
<bucket>
|
||||
.bucket.meta.<bucket>:<marker> # see put_bucket_instance_info()
|
||||
<zone>.rgw.meta
|
||||
Multiple namespaces with different kinds of metadata:
|
||||
|
||||
The tenant is used to disambiguate buckets, but not bucket instances.
|
||||
Example:
|
||||
namespace: root
|
||||
<bucket>
|
||||
.bucket.meta.<bucket>:<marker> # see put_bucket_instance_info()
|
||||
|
||||
.bucket.meta.prodtx:test%25star:default.84099.6
|
||||
.bucket.meta.testcont:default.4126.1
|
||||
.bucket.meta.prodtx:testcont:default.84099.4
|
||||
prodtx/testcont
|
||||
prodtx/test%25star
|
||||
testcont
|
||||
The tenant is used to disambiguate buckets, but not bucket instances.
|
||||
Example::
|
||||
|
||||
.rgw.gc
|
||||
gc.<N>
|
||||
.bucket.meta.prodtx:test%25star:default.84099.6
|
||||
.bucket.meta.testcont:default.4126.1
|
||||
.bucket.meta.prodtx:testcont:default.84099.4
|
||||
prodtx/testcont
|
||||
prodtx/test%25star
|
||||
testcont
|
||||
|
||||
.users.uid
|
||||
Contains _both_ per-user information (RGWUserInfo) in "<user>" objects
|
||||
and per-user lists of buckets in omaps of "<user>.buckets" objects.
|
||||
The "<user>" may contain the tenant if non-empty, for example:
|
||||
namespace: users.uid
|
||||
Contains _both_ per-user information (RGWUserInfo) in "<user>" objects
|
||||
and per-user lists of buckets in omaps of "<user>.buckets" objects.
|
||||
The "<user>" may contain the tenant if non-empty, for example::
|
||||
|
||||
prodtx$prodt
|
||||
test2.buckets
|
||||
prodtx$prodt.buckets
|
||||
test2
|
||||
prodtx$prodt
|
||||
test2.buckets
|
||||
prodtx$prodt.buckets
|
||||
test2
|
||||
|
||||
.users.email
|
||||
Unimportant
|
||||
namespace: users.email
|
||||
Unimportant
|
||||
|
||||
.users
|
||||
47UA98JSTJZ9YAN3OS3O
|
||||
It's unclear why user ID is not used to name objects in this pool.
|
||||
namespace: users.keys
|
||||
47UA98JSTJZ9YAN3OS3O
|
||||
|
||||
.users.swift
|
||||
test:tester
|
||||
This allows radosgw to look up users by their access keys during authentication.
|
||||
|
||||
.rgw.buckets.index
|
||||
namespace: users.swift
|
||||
test:tester
|
||||
|
||||
<zone>.rgw.buckets.index
|
||||
Objects are named ".dir.<marker>", each contains a bucket index.
|
||||
If the index is sharded, each shard appends the shard index after
|
||||
the marker.
|
||||
|
||||
.rgw.buckets
|
||||
<zone>.rgw.buckets.data
|
||||
default.7593.4__shadow_.488urDFerTYXavx4yAd-Op8mxehnvTI_1
|
||||
<marker>_<key>
|
||||
|
||||
|
@ -63,7 +63,7 @@ gateway instances, one for each Ceph storage cluster.
|
||||
|
||||
This guide assumes at least two Ceph storage clusters in geographically
|
||||
separate locations; however, the configuration can work on the same
|
||||
site. This guide also assumes four Ceph object gateway servers named
|
||||
site. This guide also assumes two Ceph object gateway servers named
|
||||
``rgw1`` and ``rgw2``.
|
||||
|
||||
A multi-site configuration requires a master zone group and a master
|
||||
@ -74,58 +74,8 @@ In this guide, the ``rgw1`` host will serve as the master zone of the
|
||||
master zone group; and, the ``rgw2`` host will serve as the secondary zone
|
||||
of the master zone group.
|
||||
|
||||
Pools
|
||||
=====
|
||||
|
||||
We recommend using the `Ceph Placement Group’s per Pool
|
||||
Calculator <http://ceph.com/pgcalc/>`__ to calculate a
|
||||
suitable number of placement groups for the pools the ``ceph-radosgw``
|
||||
daemon will create. Set the calculated values as defaults in your Ceph
|
||||
configuration file. For example:
|
||||
|
||||
::
|
||||
|
||||
osd pool default pg num = 50
|
||||
osd pool default pgp num = 50
|
||||
|
||||
.. note:: Make this change to the Ceph configuration file on your
|
||||
storage cluster; then, either make a runtime change to the
|
||||
configuration so that it will use those defaults when the gateway
|
||||
instance creates the pools.
|
||||
|
||||
Alternatively, create the pools manually. See
|
||||
`Pools <http://docs.ceph.com/docs/master/rados/operations/pools/#pools>`__
|
||||
for details on creating pools.
|
||||
|
||||
Pool names particular to a zone follow the naming convention
|
||||
``{zone-name}.pool-name``. For example, a zone named ``us-east`` will
|
||||
have the following pools:
|
||||
|
||||
- ``.rgw.root``
|
||||
|
||||
- ``us-east.rgw.control``
|
||||
|
||||
- ``us-east.rgw.data.root``
|
||||
|
||||
- ``us-east.rgw.gc``
|
||||
|
||||
- ``us-east.rgw.log``
|
||||
|
||||
- ``us-east.rgw.intent-log``
|
||||
|
||||
- ``us-east.rgw.usage``
|
||||
|
||||
- ``us-east.rgw.users.keys``
|
||||
|
||||
- ``us-east.rgw.users.email``
|
||||
|
||||
- ``us-east.rgw.users.swift``
|
||||
|
||||
- ``us-east.rgw.users.uid``
|
||||
|
||||
- ``us-east.rgw.buckets.index``
|
||||
|
||||
- ``us-east.rgw.buckets.data``
|
||||
See `Pools`_ for instructions on creating and tuning pools for Ceph
|
||||
Object Storage.
|
||||
|
||||
|
||||
Configuring a Master Zone
|
||||
@ -814,8 +764,8 @@ realm. Alternatively, to change which realm is the default, execute:
|
||||
|
||||
# radosgw-admin realm default --rgw-realm=movies
|
||||
|
||||
..note:: When the realm is default, the command line assumes
|
||||
``--rgw-realm=<realm-name>`` as an argument.
|
||||
.. note:: When the realm is default, the command line assumes
|
||||
``--rgw-realm=<realm-name>`` as an argument.
|
||||
|
||||
Delete a Realm
|
||||
~~~~~~~~~~~~~~
|
||||
@ -1504,3 +1454,6 @@ instance.
|
||||
| | keeping inter-zone group | | |
|
||||
| | synchronization progress. | | |
|
||||
+-------------------------------------+-----------------------------------+---------+-----------------------+
|
||||
|
||||
|
||||
.. _`Pools`: ../pools
|
||||
|
366
ceph/doc/radosgw/nfs.rst
Normal file
366
ceph/doc/radosgw/nfs.rst
Normal file
@ -0,0 +1,366 @@
|
||||
===
|
||||
NFS
|
||||
===
|
||||
|
||||
.. versionadded:: Jewel
|
||||
|
||||
Ceph Object Gateway namespaces can now be exported over file-based
|
||||
access protocols such as NFSv3 and NFSv4, alongside traditional HTTP access
|
||||
protocols (S3 and Swift).
|
||||
|
||||
In particular, the Ceph Object Gateway can now be configured to
|
||||
provide file-based access when embedded in the NFS-Ganesha NFS server.
|
||||
|
||||
librgw
|
||||
======
|
||||
|
||||
The librgw.so shared library (Unix) provides a loadable interface to
|
||||
Ceph Object Gateway services, and instantiates a full Ceph Object Gateway
|
||||
instance on initialization.
|
||||
|
||||
In turn, librgw.so exports rgw_file, a stateful API for file-oriented
|
||||
access to RGW buckets and objects. The API is general, but its design
|
||||
is strongly influenced by the File System Abstraction Layer (FSAL) API
|
||||
of NFS-Ganesha, for which it has been primarily designed.
|
||||
|
||||
A set of Python bindings is also provided.
|
||||
|
||||
Namespace Conventions
|
||||
=====================
|
||||
|
||||
The implementation conforms to Amazon Web Services (AWS) hierarchical
|
||||
namespace conventions which map UNIX-style path names onto S3 buckets
|
||||
and objects.
|
||||
|
||||
The top level of the attached namespace consists of S3 buckets,
|
||||
represented as NFS directories. Files and directories subordinate to
|
||||
buckets are each represented as objects, following S3 prefix and
|
||||
delimiter conventions, with '/' being the only supported path
|
||||
delimiter [#]_.
|
||||
|
||||
For example, if an NFS client has mounted an RGW namespace at "/nfs",
|
||||
then a file "/nfs/mybucket/www/index.html" in the NFS namespace
|
||||
corresponds to an RGW object "www/index.html" in a bucket/container
|
||||
"mybucket."
|
||||
|
||||
Although it is generally invisible to clients, the NFS namespace is
|
||||
assembled through concatenation of the corresponding paths implied by
|
||||
the objects in the namespace. Leaf objects, whether files or
|
||||
directories, will always be materialized in an RGW object of the
|
||||
corresponding key name, "<name>" if a file, "<name>/" if a directory.
|
||||
Non-leaf directories (e.g., "www" above) might only be implied by
|
||||
their appearance in the names of one or more leaf objects. Directories
|
||||
created within NFS or directly operated on by an NFS client (e.g., via
|
||||
an attribute-setting operation such as chown or chmod) always have a
|
||||
leaf object representation used to store materialized attributes such
|
||||
as Unix ownership and permissions.
|
||||
|
||||
Supported Operations
|
||||
====================
|
||||
|
||||
The RGW NFS interface supports most operations on files and
|
||||
directories, with the following restrictions:
|
||||
|
||||
- Links, including symlinks, are not supported
|
||||
- NFS ACLs are not supported
|
||||
|
||||
+ Unix user and group ownership and permissions *are* supported
|
||||
|
||||
- Directories may not be moved/renamed
|
||||
|
||||
+ files may be moved between directories
|
||||
|
||||
- Only full, sequential *write* i/o is supported
|
||||
|
||||
+ i.e., write operations are constrained to be **uploads**
|
||||
+ many typical i/o operations such as editing files in place will necessarily fail as they perform non-sequential stores
|
||||
+ some file utilities *apparently* writing sequentially (e.g., some versions of GNU tar) may fail due to infrequent non-sequential stores
|
||||
+ When mounting via NFS, sequential application i/o can generally be constrained to be written sequentially to the NFS server via a synchronous mount option (e.g. -osync in Linux)
|
||||
+ NFS clients which cannot mount synchronously (e.g., MS Windows) will not be able to upload files
|
||||
|
||||
Security
|
||||
========
|
||||
|
||||
The RGW NFS interface provides a hybrid security model with the
|
||||
following characteristics:
|
||||
|
||||
- NFS protocol security is provided by the NFS-Ganesha server, as negotiated by the NFS server and clients
|
||||
|
||||
+ e.g., clients can by trusted (AUTH_SYS), or required to present Kerberos user credentials (RPCSEC_GSS)
|
||||
+ RPCSEC_GSS wire security can be integrity only (krb5i) or integrity and privacy (encryption, krb5p)
|
||||
+ various NFS-specific security and permission rules are available
|
||||
|
||||
* e.g., root-squashing
|
||||
|
||||
- a set of RGW/S3 security credentials (unknown to NFS) is associated with each RGW NFS mount (i.e., NFS-Ganesha EXPORT)
|
||||
|
||||
+ all RGW object operations performed via the NFS server will be performed by the RGW user associated with the credentials stored in the export being accessed (currently only RGW and RGW LDAP credentials are supported)
|
||||
|
||||
* additional RGW authentication types such as Keystone are not currently supported
|
||||
|
||||
Configuring an NFS-Ganesha Instance
|
||||
===================================
|
||||
|
||||
Each NFS RGW instance is an NFS-Ganesha server instance *embeddding*
|
||||
a full Ceph RGW instance.
|
||||
|
||||
Therefore, the RGW NFS configuration includes Ceph and Ceph Object
|
||||
Gateway-specific configuration in a local ceph.conf, as well as
|
||||
NFS-Ganesha-specific configuration in the NFS-Ganesha config file,
|
||||
ganesha.conf.
|
||||
|
||||
ceph.conf
|
||||
---------
|
||||
|
||||
Required ceph.conf configuration for RGW NFS includes:
|
||||
|
||||
* valid [client.radosgw.{instance-name}] section
|
||||
* valid values for minimal instance configuration, in particular, an installed and correct ``keyring``
|
||||
|
||||
Other config variables are optional, front-end-specific and front-end
|
||||
selection variables (e.g., ``rgw data`` and ``rgw frontends``) are
|
||||
optional and in some cases ignored.
|
||||
|
||||
A small number of config variables (e.g., ``rgw_namespace_expire_secs``)
|
||||
are unique to RGW NFS.
|
||||
|
||||
ganesha.conf
|
||||
------------
|
||||
|
||||
A strictly minimal ganesha.conf for use with RGW NFS includes one
|
||||
EXPORT block with embedded FSAL block of type RGW::
|
||||
|
||||
EXPORT
|
||||
{
|
||||
Export_ID={numeric-id};
|
||||
Path = "/";
|
||||
Pseudo = "/";
|
||||
Access_Type = RW;
|
||||
SecType = "sys";
|
||||
NFS_Protocols = 4;
|
||||
Transport_Protocols = TCP;
|
||||
|
||||
# optional, permit unsquashed access by client "root" user
|
||||
#Squash = No_Root_Squash;
|
||||
|
||||
FSAL {
|
||||
Name = RGW;
|
||||
User_Id = {s3-user-id};
|
||||
Access_Key_Id ="{s3-access-key}";
|
||||
Secret_Access_Key = "{s3-secret}";
|
||||
}
|
||||
}
|
||||
|
||||
``Export_ID`` must have an integer value, e.g., "77"
|
||||
|
||||
``Path`` (for RGW) should be "/"
|
||||
|
||||
``Pseudo`` defines an NFSv4 pseudo root name (NFSv4 only)
|
||||
|
||||
``SecType = sys;`` allows clients to attach without Kerberos
|
||||
authentication
|
||||
|
||||
``Squash = No_Root_Squash;`` enables the client root user to override
|
||||
permissions (Unix convention). When root-squashing is enabled,
|
||||
operations attempted by the root user are performed as if by the local
|
||||
"nobody" (and "nogroup") user on the NFS-Ganesha server
|
||||
|
||||
The RGW FSAL additionally supports RGW-specific configuration
|
||||
variables in the RGW config section::
|
||||
|
||||
RGW {
|
||||
cluster = "{cluster name, default 'ceph'}";
|
||||
name = "client.rgw.{instance-name}";
|
||||
ceph_conf = "/opt/ceph-rgw/etc/ceph/ceph.conf";
|
||||
init_args = "-d --debug-rgw=16";
|
||||
}
|
||||
|
||||
``cluster`` sets a Ceph cluster name (must match the cluster being exported)
|
||||
|
||||
``name`` sets an RGW instance name (must match the cluster being exported)
|
||||
|
||||
``ceph_conf`` gives a path to a non-default ceph.conf file to use
|
||||
|
||||
|
||||
Other useful NFS-Ganesha configuration:
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
Any EXPORT block which should support NFSv3 should include version 3
|
||||
in the NFS_Protocols setting. Additionally, NFSv3 is the last major
|
||||
version to support the UDP transport. To enable UDP, include it in the
|
||||
Transport_Protocols setting. For example::
|
||||
|
||||
EXPORT {
|
||||
...
|
||||
NFS_Protocols = 3,4;
|
||||
Transport_Protocols = UDP,TCP;
|
||||
...
|
||||
}
|
||||
|
||||
One important family of options pertains to interaction with the Linux
|
||||
idmapping service, which is used to normalize user and group names
|
||||
across systems. Details of idmapper integration are not provided here.
|
||||
|
||||
With Linux NFS clients, NFS-Ganesha can be configured
|
||||
to accept client-supplied numeric user and group identifiers with
|
||||
NFSv4, which by default stringifies these--this may be useful in small
|
||||
setups and for experimentation::
|
||||
|
||||
NFSV4 {
|
||||
Allow_Numeric_Owners = true;
|
||||
Only_Numeric_Owners = true;
|
||||
}
|
||||
|
||||
Troubleshooting
|
||||
~~~~~~~~~~~~~~~
|
||||
|
||||
NFS-Ganesha configuration problems are usually debugged by running the
|
||||
server with debugging options, controlled by the LOG config section.
|
||||
|
||||
NFS-Ganesha log messages are grouped into various components, logging
|
||||
can be enabled separately for each component. Valid values for
|
||||
component logging include::
|
||||
|
||||
*FATAL* critical errors only
|
||||
*WARN* unusual condition
|
||||
*DEBUG* mildly verbose trace output
|
||||
*FULL_DEBUG* verbose trace output
|
||||
|
||||
Example::
|
||||
|
||||
LOG {
|
||||
|
||||
Components {
|
||||
MEMLEAKS = FATAL;
|
||||
FSAL = FATAL;
|
||||
NFSPROTO = FATAL;
|
||||
NFS_V4 = FATAL;
|
||||
EXPORT = FATAL;
|
||||
FILEHANDLE = FATAL;
|
||||
DISPATCH = FATAL;
|
||||
CACHE_INODE = FATAL;
|
||||
CACHE_INODE_LRU = FATAL;
|
||||
HASHTABLE = FATAL;
|
||||
HASHTABLE_CACHE = FATAL;
|
||||
DUPREQ = FATAL;
|
||||
INIT = DEBUG;
|
||||
MAIN = DEBUG;
|
||||
IDMAPPER = FATAL;
|
||||
NFS_READDIR = FATAL;
|
||||
NFS_V4_LOCK = FATAL;
|
||||
CONFIG = FATAL;
|
||||
CLIENTID = FATAL;
|
||||
SESSIONS = FATAL;
|
||||
PNFS = FATAL;
|
||||
RW_LOCK = FATAL;
|
||||
NLM = FATAL;
|
||||
RPC = FATAL;
|
||||
NFS_CB = FATAL;
|
||||
THREAD = FATAL;
|
||||
NFS_V4_ACL = FATAL;
|
||||
STATE = FATAL;
|
||||
FSAL_UP = FATAL;
|
||||
DBUS = FATAL;
|
||||
}
|
||||
# optional: redirect log output
|
||||
# Facility {
|
||||
# name = FILE;
|
||||
# destination = "/tmp/ganesha-rgw.log";
|
||||
# enable = active;
|
||||
}
|
||||
}
|
||||
|
||||
Running Multiple NFS Gateways
|
||||
=============================
|
||||
|
||||
Each NFS-Ganesha instance acts as a full gateway endpoint, with the
|
||||
limitation that currently an NFS-Ganesha instance cannot be configured
|
||||
to export HTTP services. As with ordinary gateway instances, any
|
||||
number of NFS-Ganesha instances can be started, exporting the same or
|
||||
different resources from the cluster. This enables the clustering of
|
||||
NFS-Ganesha instances. However, this does not imply high availability.
|
||||
|
||||
When regular gateway instances and NFS-Ganesha instances overlap the
|
||||
same data resources, they will be accessible from both the standard S3
|
||||
API and through the NFS-Ganesha instance as exported. You can
|
||||
co-locate the NFS-Ganesha instance with a Ceph Object Gateway instance
|
||||
on the same host.
|
||||
|
||||
RGW vs RGW NFS
|
||||
==============
|
||||
|
||||
Exporting an NFS namespace and other RGW namespaces (e.g., S3 or Swift
|
||||
via the Civetweb HTTP front-end) from the same program instance is
|
||||
currently not supported.
|
||||
|
||||
When adding objects and buckets outside of NFS, those objects will
|
||||
appear in the NFS namespace in the time set by
|
||||
``rgw_nfs_namespace_expire_secs``, which defaults to 300 seconds (5 minutes).
|
||||
Override the default value for ``rgw_nfs_namespace_expire_secs`` in the
|
||||
Ceph configuration file to change the refresh rate.
|
||||
|
||||
If exporting Swift containers that do not conform to valid S3 bucket
|
||||
naming requirements, set ``rgw_relaxed_s3_bucket_names`` to true in the
|
||||
[client.radosgw] section of the Ceph configuration file. For example,
|
||||
if a Swift container name contains underscores, it is not a valid S3
|
||||
bucket name and will be rejected unless ``rgw_relaxed_s3_bucket_names``
|
||||
is set to true.
|
||||
|
||||
Configuring NFSv4 clients
|
||||
=========================
|
||||
|
||||
To access the namespace, mount the configured NFS-Ganesha export(s)
|
||||
into desired locations in the local POSIX namespace. As noted, this
|
||||
implementation has a few unique restrictions:
|
||||
|
||||
- NFS 4.1 and higher protocol flavors are preferred
|
||||
|
||||
+ NFSv4 OPEN and CLOSE operations are used to track upload transactions
|
||||
|
||||
- To upload data successfully, clients must preserve write ordering
|
||||
|
||||
+ on Linux and many Unix NFS clients, use the -osync mount option
|
||||
|
||||
Conventions for mounting NFS resources are platform-specific. The
|
||||
following conventions work on Linux and some Unix platforms:
|
||||
|
||||
From the command line::
|
||||
|
||||
mount -t nfs -o nfsvers=4,noauto,soft,proto=tcp <ganesha-host-name>:/ <mount-point>
|
||||
|
||||
In /etc/fstab::
|
||||
|
||||
<ganesha-host-name>:/ <mount-point> nfs noauto,soft,nfsvers=4.1,sync,proto=tcp 0 0
|
||||
|
||||
Specify the NFS-Ganesha host name and the path to the mount point on
|
||||
the client.
|
||||
|
||||
Configuring NFSv3 Clients
|
||||
=========================
|
||||
|
||||
Linux clients can be configured to mount with NFSv3 by supplying
|
||||
``nfsvers=3`` and ``noacl`` as mount options. To use UDP as the
|
||||
transport, add ``proto=udp`` to the mount options. However, TCP is the
|
||||
preferred transport::
|
||||
|
||||
<ganesha-host-name>:/ <mount-point> nfs noauto,noacl,soft,nfsvers=3,sync,proto=tcp 0 0
|
||||
|
||||
Configure the NFS Ganesha EXPORT block Protocols setting with version
|
||||
3 and the Transports setting with UDP if the mount will use version 3 with UDP.
|
||||
|
||||
NFSv3 Semantics
|
||||
---------------
|
||||
|
||||
Since NFSv3 does not communicate client OPEN and CLOSE operations to
|
||||
file servers, RGW NFS cannot use these operations to mark the
|
||||
beginning and ending of file upload transactions. Instead, RGW NFS
|
||||
starts a new upload when the first write is sent to a file at offset
|
||||
0, and finalizes the upload when no new writes to the file have been
|
||||
seen for a period of time, by default, 10 seconds. To change this
|
||||
timeout, set an alternate value for ``rgw_nfs_write_completion_interval_s``
|
||||
in the RGW section(s) of the Ceph configuration file.
|
||||
|
||||
References
|
||||
==========
|
||||
|
||||
.. [#] http://docs.aws.amazon.com/AmazonS3/latest/dev/ListingKeysHierarchy.html
|
55
ceph/doc/radosgw/pools.rst
Normal file
55
ceph/doc/radosgw/pools.rst
Normal file
@ -0,0 +1,55 @@
|
||||
=====
|
||||
Pools
|
||||
=====
|
||||
|
||||
The Ceph Object Gateway uses several pools for its various storage needs,
|
||||
which are listed in the Zone object (see ``radosgw-admin zone get``). A
|
||||
single zone named ``default`` is created automatically with pool names
|
||||
starting with ``default.rgw.``, but a `Multisite Configuration`_ will have
|
||||
multiple zones.
|
||||
|
||||
Tuning
|
||||
======
|
||||
|
||||
When ``radosgw`` first tries to operate on a zone pool that does not
|
||||
exist, it will create that pool with the default values from
|
||||
``osd pool default pg num`` and ``osd pool default pgp num``. These defaults
|
||||
are sufficient for some pools, but others (especially those listed in
|
||||
``placement_pools`` for the bucket index and data) will require additional
|
||||
tuning. We recommend using the `Ceph Placement Group’s per Pool
|
||||
Calculator <http://ceph.com/pgcalc/>`__ to calculate a suitable number of
|
||||
placement groups for these pools. See
|
||||
`Pools <http://docs.ceph.com/docs/master/rados/operations/pools/#pools>`__
|
||||
for details on pool creation.
|
||||
|
||||
Pool Namespaces
|
||||
===============
|
||||
|
||||
.. versionadded:: Luminous
|
||||
|
||||
Pool names particular to a zone follow the naming convention
|
||||
``{zone-name}.pool-name``. For example, a zone named ``us-east`` will
|
||||
have the following pools:
|
||||
|
||||
- ``.rgw.root``
|
||||
|
||||
- ``us-east.rgw.control``
|
||||
|
||||
- ``us-east.rgw.meta``
|
||||
|
||||
- ``us-east.rgw.log``
|
||||
|
||||
- ``us-east.rgw.buckets.index``
|
||||
|
||||
- ``us-east.rgw.buckets.data``
|
||||
|
||||
The zone definitions list several more pools than that, but many of those
|
||||
are consolidated through the use of rados namespaces. For example, all of
|
||||
the following pool entries use namespaces of the ``us-east.rgw.meta`` pool::
|
||||
|
||||
"user_keys_pool": "us-east.rgw.meta:users.keys",
|
||||
"user_email_pool": "us-east.rgw.meta:users.email",
|
||||
"user_swift_pool": "us-east.rgw.meta:users.swift",
|
||||
"user_uid_pool": "us-east.rgw.meta:users.uid",
|
||||
|
||||
.. _`Multisite Configuration`: ../multisite
|
@ -1,30 +0,0 @@
|
||||
FastCgiExternalServer /var/www/html/s3gw.fcgi -socket /var/run/ceph/ceph.radosgw.gateway.fastcgi.sock
|
||||
|
||||
|
||||
<VirtualHost *:80>
|
||||
|
||||
ServerName {fqdn}
|
||||
<!--Remove the comment. Add a server alias with *.{fqdn} for S3 subdomains-->
|
||||
<!--ServerAlias *.{fqdn}-->
|
||||
ServerAdmin {email.address}
|
||||
DocumentRoot /var/www/html
|
||||
RewriteEngine On
|
||||
RewriteRule ^/(.*) /s3gw.fcgi?%{QUERY_STRING} [E=HTTP_AUTHORIZATION:%{HTTP:Authorization},L]
|
||||
|
||||
<IfModule mod_fastcgi.c>
|
||||
<Directory /var/www/html>
|
||||
Options +ExecCGI
|
||||
AllowOverride All
|
||||
SetHandler fastcgi-script
|
||||
Order allow,deny
|
||||
Allow from all
|
||||
AuthBasicAuthoritative Off
|
||||
</Directory>
|
||||
</IfModule>
|
||||
|
||||
AllowEncodedSlashes On
|
||||
ErrorLog /var/log/httpd/error.log
|
||||
CustomLog /var/log/httpd/access.log combined
|
||||
ServerSignature Off
|
||||
|
||||
</VirtualHost>
|
@ -1,29 +0,0 @@
|
||||
FastCgiExternalServer /var/www/s3gw.fcgi -socket /var/run/ceph/ceph.radosgw.gateway.fastcgi.sock
|
||||
|
||||
<VirtualHost *:80>
|
||||
|
||||
ServerName {fqdn}
|
||||
<!--Remove the comment. Add a server alias with *.{fqdn} for S3 subdomains-->
|
||||
<!--ServerAlias *.{fqdn}-->
|
||||
ServerAdmin {email.address}
|
||||
DocumentRoot /var/www
|
||||
RewriteEngine On
|
||||
RewriteRule ^/(.*) /s3gw.fcgi?%{QUERY_STRING} [E=HTTP_AUTHORIZATION:%{HTTP:Authorization},L]
|
||||
|
||||
<IfModule mod_fastcgi.c>
|
||||
<Directory /var/www>
|
||||
Options +ExecCGI
|
||||
AllowOverride All
|
||||
SetHandler fastcgi-script
|
||||
Order allow,deny
|
||||
Allow from all
|
||||
AuthBasicAuthoritative Off
|
||||
</Directory>
|
||||
</IfModule>
|
||||
|
||||
AllowEncodedSlashes On
|
||||
ErrorLog /var/log/apache2/error.log
|
||||
CustomLog /var/log/apache2/access.log combined
|
||||
ServerSignature Off
|
||||
|
||||
</VirtualHost>
|
@ -1,30 +0,0 @@
|
||||
FastCgiExternalServer /var/www/s3gw.fcgi -socket /{path}/{socket-name}.sock
|
||||
|
||||
|
||||
<VirtualHost *:80>
|
||||
|
||||
ServerName {fqdn}
|
||||
<!--Remove the comment. Add a server alias with *.{fqdn} for S3 subdomains-->
|
||||
<!--ServerAlias *.{fqdn}-->
|
||||
ServerAdmin {email.address}
|
||||
DocumentRoot /var/www
|
||||
RewriteEngine On
|
||||
RewriteRule ^/(.*) /s3gw.fcgi?%{QUERY_STRING} [E=HTTP_AUTHORIZATION:%{HTTP:Authorization},L]
|
||||
|
||||
<IfModule mod_fastcgi.c>
|
||||
<Directory /var/www>
|
||||
Options +ExecCGI
|
||||
AllowOverride All
|
||||
SetHandler fastcgi-script
|
||||
Order allow,deny
|
||||
Allow from all
|
||||
AuthBasicAuthoritative Off
|
||||
</Directory>
|
||||
</IfModule>
|
||||
|
||||
AllowEncodedSlashes On
|
||||
ErrorLog /var/log/apache2/error.log
|
||||
CustomLog /var/log/apache2/access.log combined
|
||||
ServerSignature Off
|
||||
|
||||
</VirtualHost>
|
@ -1,43 +0,0 @@
|
||||
=================================================================
|
||||
RGW upgrading to Jewel versions 10.2.0, 10.2.1, 10.2.2 and 10.2.3
|
||||
=================================================================
|
||||
|
||||
.. versionadded:: Jewel
|
||||
|
||||
Upgrade of :term:`Ceph Object Gateway` to older versions of jewel (up to 10.2.3 included) caused issues. This document describes the needed recovery procedure.
|
||||
|
||||
Mixed version of :term:`Ceph Object Gateway` is not supported
|
||||
|
||||
Backup of old configuration
|
||||
===========================
|
||||
rados mkpool .rgw.root.backup
|
||||
rados cppool .rgw.root .rgw.root.backup
|
||||
|
||||
Non default setting for `rgw region root pool`
|
||||
==============================================
|
||||
If an existing multisite configuration uses a non-default setting for
|
||||
`rgw region root pool`, the new pool settings `rgw zonegroup root pool`,
|
||||
`rgw period root pool` and `rgw realm root pool` should be set to match.
|
||||
|
||||
Fix confgiuration after upgrade
|
||||
===============================
|
||||
Stop all :term:`Ceph Object Gateway` running in the cluster.
|
||||
|
||||
Run the following commands:::
|
||||
|
||||
$ rados rmpool .rgw.root
|
||||
|
||||
$ radosgw-admin zonegroup get --rgw-zonegroup=default | sed 's/"id":.*/"id": "default",/g' | sed 's/"master_zone.*/"master_zone":"default",/g' > default-zg.json
|
||||
|
||||
$ raodsgw-admin zone get --zone-id=default > default-zone.json
|
||||
|
||||
$ radosgw-admin realm create --rgw-realm=myrealm
|
||||
|
||||
$ radosgw-admin zonegroup set --rgw-zonegroup=default --default < default-zg.json
|
||||
|
||||
$ radosgw-admin zone set --rgw-zone=default --default < default-zone.json
|
||||
|
||||
$ radosgw-admin period update --commit
|
||||
|
||||
Start all :term:`Ceph Object Gateway` in the cluster.
|
||||
|
@ -2,6 +2,823 @@
|
||||
Release Notes
|
||||
===============
|
||||
|
||||
v12.1.0 Luminous (RC)
|
||||
====================
|
||||
|
||||
This is the first release candidate for Luminous, the next long term stable release.
|
||||
|
||||
Major Changes from Kraken
|
||||
-------------------------
|
||||
|
||||
* When assigning a network to the public network and not to
|
||||
the cluster network the network specification of the public
|
||||
network will be used for the cluster network as well.
|
||||
In older versions this would lead to cluster services
|
||||
being bound to 0.0.0.0:<port>, thus making the
|
||||
cluster service even more publicly available than the
|
||||
public services. When only specifying a cluster network it
|
||||
will still result in the public services binding to 0.0.0.0.
|
||||
|
||||
* In previous versions, if a client sent an op to the wrong OSD, the OSD
|
||||
would reply with ENXIO. The rationale here is that the client or OSD is
|
||||
clearly buggy and we want to surface the error as clearly as possible.
|
||||
We now only send the ENXIO reply if the osd_enxio_on_misdirected_op option
|
||||
is enabled (it's off by default). This means that a VM using librbd that
|
||||
previously would have gotten an EIO and gone read-only will now see a
|
||||
blocked/hung IO instead.
|
||||
|
||||
* The "journaler allow split entries" config setting has been removed.
|
||||
|
||||
- *librados*:
|
||||
|
||||
* Some variants of the omap_get_keys and omap_get_vals librados
|
||||
functions have been deprecated in favor of omap_get_vals2 and
|
||||
omap_get_keys2. The new methods include an output argument
|
||||
indicating whether there are additional keys left to fetch.
|
||||
Previously this had to be inferred from the requested key count vs
|
||||
the number of keys returned, but this breaks with new OSD-side
|
||||
limits on the number of keys or bytes that can be returned by a
|
||||
single omap request. These limits were introduced by kraken but
|
||||
are effectively disabled by default (by setting a very large limit
|
||||
of 1 GB) because users of the newly deprecated interface cannot
|
||||
tell whether they should fetch more keys or not. In the case of
|
||||
the standalone calls in the C++ interface
|
||||
(IoCtx::get_omap_{keys,vals}), librados has been updated to loop on
|
||||
the client side to provide a correct result via multiple calls to
|
||||
the OSD. In the case of the methods used for building
|
||||
multi-operation transactions, however, client-side looping is not
|
||||
practical, and the methods have been deprecated. Note that use of
|
||||
either the IoCtx methods on older librados versions or the
|
||||
deprecated methods on any version of librados will lead to
|
||||
incomplete results if/when the new OSD limits are enabled.
|
||||
|
||||
* The original librados rados_objects_list_open (C) and objects_begin
|
||||
(C++) object listing API, deprecated in Hammer, has finally been
|
||||
removed. Users of this interface must update their software to use
|
||||
either the rados_nobjects_list_open (C) and nobjects_begin (C++) API or
|
||||
the new rados_object_list_begin (C) and object_list_begin (C++) API
|
||||
before updating the client-side librados library to Luminous.
|
||||
Object enumeration (via any API) with the latest librados version
|
||||
and pre-Hammer OSDs is no longer supported. Note that no in-tree
|
||||
Ceph services rely on object enumeration via the deprecated APIs, so
|
||||
only external librados users might be affected.
|
||||
|
||||
The newest (and recommended) rados_object_list_begin (C) and
|
||||
object_list_begin (C++) API is only usable on clusters with the
|
||||
SORTBITWISE flag enabled (Jewel and later). (Note that this flag is
|
||||
required to be set before upgrading beyond Jewel.)
|
||||
|
||||
- *CephFS*:
|
||||
|
||||
* When configuring ceph-fuse mounts in /etc/fstab, a new syntax is
|
||||
available that uses "ceph.<arg>=<val>" in the options column, instead
|
||||
of putting configuration in the device column. The old style syntax
|
||||
still works. See the documentation page "Mount CephFS in your
|
||||
file systems table" for details.
|
||||
|
||||
* CephFS clients without the 'p' flag in their authentication capability
|
||||
string will no longer be able to set quotas or any layout fields. This
|
||||
flag previously only restricted modification of the pool and namespace
|
||||
fields in layouts.
|
||||
* CephFS directory fragmentation (large directory support) is enabled
|
||||
by default on new filesystems. To enable it on existing filesystems
|
||||
use "ceph fs set <fs_name> allow_dirfrags".
|
||||
* CephFS will generate a health warning if you have fewer standby daemons
|
||||
than it thinks you wanted. By default this will be 1 if you ever had
|
||||
a standby, and 0 if you did not. You can customize this using
|
||||
``ceph fs set <fs> standby_count_wanted <number>``. Setting it
|
||||
to zero will effectively disable the health check.
|
||||
* The "ceph mds tell ..." command has been removed. It is superceded
|
||||
by "ceph tell mds.<id> ..."
|
||||
|
||||
- *MGR*
|
||||
|
||||
* ceph-mgr supports a default dashboard
|
||||
* ceph-mgr introduces a new pecan based rest API
|
||||
|
||||
- *RGW*:
|
||||
|
||||
* RGW introduces server side encryption of uploaded objects with 3 options for
|
||||
the management of encryption keys, automatic encryption (only recommended for
|
||||
test setups), customer provided keys similar to Amazon SSE-C specification and
|
||||
using a key management service (Openstack Barbician) similar to Amazon SSE-KMS
|
||||
specification.
|
||||
* RGW's metadata search with ElasticSearch now supports end user requests
|
||||
serviced via RGW itself and now supports custom metadata fields
|
||||
* RGW has consolidated the several metadata index pools via the use of rados
|
||||
namespaces
|
||||
* RGW now supports dynamic bucket index sharding
|
||||
|
||||
Notable Changes
|
||||
---------------
|
||||
* bluestore: ceph-disk: add --filestore argument, default to --bluestore (`pr#15437 <https://github.com/ceph/ceph/pull/15437>`_, Loic Dachary, Sage Weil)
|
||||
* bluestore,core: os/bluestore: fix warning (`pr#15435 <https://github.com/ceph/ceph/pull/15435>`_, Sage Weil)
|
||||
* bluestore,core: os/bluestore: improve mempool usage (`pr#15402 <https://github.com/ceph/ceph/pull/15402>`_, Sage Weil)
|
||||
* bluestore,core: os/bluestore: write "mkfs_done" into disk only if we pass fsck() tests (`pr#15238 <https://github.com/ceph/ceph/pull/15238>`_, xie xingguo)
|
||||
* bluestore,core: os: remove experimental status for BlueStore (`pr#15177 <https://github.com/ceph/ceph/pull/15177>`_, Sage Weil)
|
||||
* bluestore: os/bluestore/BlockDevice: support pmem device as bluestore backend (`pr#15102 <https://github.com/ceph/ceph/pull/15102>`_, Jianpeng Ma)
|
||||
* bluestore: os/bluestore: fix a typo about bleustore (`pr#15357 <https://github.com/ceph/ceph/pull/15357>`_, Dongsheng Yang)
|
||||
* bluestore: os/bluestore: fix BitMapAllocator assert on out-of-bound hint value (`pr#15289 <https://github.com/ceph/ceph/pull/15289>`_, Igor Fedotov)
|
||||
* bluestore: os/bluestore: fix buffers pinned by indefinitely deferred writes (`pr#15398 <https://github.com/ceph/ceph/pull/15398>`_, Sage Weil)
|
||||
* bluestore: os/bluestore: fix false assert in IOContext::aio_wake (`pr#15268 <https://github.com/ceph/ceph/pull/15268>`_, Igor Fedotov)
|
||||
* bluestore: os/bluestore: fix false asserts in Cache::trim_all() (`pr#15470 <https://github.com/ceph/ceph/pull/15470>`_, xie xingguo)
|
||||
* bluestore: os/bluestore: fix fsck deferred_replay (`pr#15295 <https://github.com/ceph/ceph/pull/15295>`_, Sage Weil)
|
||||
* bluestore: os/bluestore/KernelDevice: fix comments (`pr#15264 <https://github.com/ceph/ceph/pull/15264>`_, xie xingguo)
|
||||
* bluestore: os/bluestore/KernelDevice: helpful warning when aio limit exhausted (`pr#15116 <https://github.com/ceph/ceph/pull/15116>`_, Sage Weil)
|
||||
* bluestore,performance: os/bluestore: avoid overloading extents during reshard; atomic deferred_batch_ops (`pr#15502 <https://github.com/ceph/ceph/pull/15502>`_, xie xingguo)
|
||||
* bluestore,performance: os/bluestore: batch throttle (`pr#15284 <https://github.com/ceph/ceph/pull/15284>`_, Jianpeng Ma)
|
||||
* bluestore,performance: os/bluestore: keep statfs replica in RAM to avoid expensive KV retrieval (`pr#15309 <https://github.com/ceph/ceph/pull/15309>`_, Igor Fedotov)
|
||||
* bluestore,performance: os/bluestore/KernelDevice: fix sync write vs flush (`pr#15034 <https://github.com/ceph/ceph/pull/15034>`_, Sage Weil)
|
||||
* bluestore,performance: os/bluestore: move cache_trim into MempoolThread (`pr#15380 <https://github.com/ceph/ceph/pull/15380>`_, xie xingguo)
|
||||
* bluestore,performance: os/bluestore: put bluefs in the middle of the shared device (`pr#14873 <https://github.com/ceph/ceph/pull/14873>`_, Sage Weil)
|
||||
* bluestore,performance: os/bluestore: separate kv_sync_thread into two parts (`pr#14035 <https://github.com/ceph/ceph/pull/14035>`_, Jianpeng Ma, Igor Fedotov, Sage Weil)
|
||||
* bluestore,performance: os/bluestore: try to unshare blobs for EC overwrite workload (`pr#14239 <https://github.com/ceph/ceph/pull/14239>`_, Sage Weil)
|
||||
* build/ops: 12.0.3 (`pr#15600 <https://github.com/ceph/ceph/pull/15600>`_, Jenkins Build Slave User)
|
||||
* build/ops: build: move bash_completion.d/ceph to ceph-common (`pr#15148 <https://github.com/ceph/ceph/pull/15148>`_, Leo Zhang)
|
||||
* build/ops: build: remove ceph-disk-udev entirely (`pr#15259 <https://github.com/ceph/ceph/pull/15259>`_, Leo Zhang)
|
||||
* build/ops: build: revert -Wvla from #15342 (`pr#15469 <https://github.com/ceph/ceph/pull/15469>`_, Willem Jan Withagen)
|
||||
* build/ops: build: Use .S suffix for ppc64le assembly files (`issue#20106 <http://tracker.ceph.com/issues/20106>`_, `pr#15373 <https://github.com/ceph/ceph/pull/15373>`_, Andrew Solomon)
|
||||
* build/ops: ceph-detect-init: detect init system by poking the system (`issue#19884 <http://tracker.ceph.com/issues/19884>`_, `pr#15043 <https://github.com/ceph/ceph/pull/15043>`_, Kefu Chai)
|
||||
* build/ops,common: build: Adds C++ warning flag for C Variable-Length Arrays. (`pr#15342 <https://github.com/ceph/ceph/pull/15342>`_, Jesse Williamson)
|
||||
* build/ops,common: common/blkdev.cc: propagate get_device_by_fd to different OSes (`pr#15547 <https://github.com/ceph/ceph/pull/15547>`_, Willem Jan Withagen)
|
||||
* build/ops: conditionalize rgw Beast frontend so it isn't built on s390x architecture (`issue#20048 <http://tracker.ceph.com/issues/20048>`_, `pr#15225 <https://github.com/ceph/ceph/pull/15225>`_, Willem Jan Withagen, Nathan Cutler, Kefu Chai, Tim Serong, Casey Bodley)
|
||||
* build/ops,core,tests: osd/dmclock/testing: reorganize testing, building now optional (`pr#15375 <https://github.com/ceph/ceph/pull/15375>`_, J. Eric Ivancich)
|
||||
* build/ops: debian: ceph-mgr: fix package description (`pr#15513 <https://github.com/ceph/ceph/pull/15513>`_, Fabian Grünbichler)
|
||||
* build/ops: debian: sync logrotate packaging with downstream (`issue#19938 <http://tracker.ceph.com/issues/19938>`_, `pr#15567 <https://github.com/ceph/ceph/pull/15567>`_, Fabian Grünbichler)
|
||||
* build/ops: do_cmake.sh: enable ccache if installed (`pr#15274 <https://github.com/ceph/ceph/pull/15274>`_, Sage Weil)
|
||||
* build/ops: drop libfcgi build dependency (`pr#15285 <https://github.com/ceph/ceph/pull/15285>`_, Nathan Cutler)
|
||||
* build/ops: install-deps.sh: workaround setuptools' dependency on six (`pr#15406 <https://github.com/ceph/ceph/pull/15406>`_, Kefu Chai)
|
||||
* build/ops: rpm: apply epoch only if %epoch macro is defined (`pr#15286 <https://github.com/ceph/ceph/pull/15286>`_, Nathan Cutler)
|
||||
* build/ops: rpm: make librbd1 %post scriptlet depend on coreutils (`issue#20052 <http://tracker.ceph.com/issues/20052>`_, `pr#15231 <https://github.com/ceph/ceph/pull/15231>`_, Giacomo Comes, Nathan Cutler)
|
||||
* build/ops: rpm: move _epoch_prefix below Epoch definition (`pr#15417 <https://github.com/ceph/ceph/pull/15417>`_, Nathan Cutler)
|
||||
* build/ops: rpm: move RDMA and python-prettytables build dependencies to distro-conditional section (`pr#15200 <https://github.com/ceph/ceph/pull/15200>`_, Nathan Cutler)
|
||||
* build/ops: selinux: Allow read on var_run_t (`issue#16674 <http://tracker.ceph.com/issues/16674>`_, `pr#15523 <https://github.com/ceph/ceph/pull/15523>`_, Boris Ranto)
|
||||
* build/ops: selinux: Do parallel relabel on package install (`issue#20077 <http://tracker.ceph.com/issues/20077>`_, `pr#14871 <https://github.com/ceph/ceph/pull/14871>`_, Boris Ranto)
|
||||
* build/ops: selinux: Install ceph-base before ceph-selinux (`issue#20184 <http://tracker.ceph.com/issues/20184>`_, `pr#15490 <https://github.com/ceph/ceph/pull/15490>`_, Boris Ranto)
|
||||
* build/ops: Set subman cron attributes in spec file (`issue#20074 <http://tracker.ceph.com/issues/20074>`_, `pr#15270 <https://github.com/ceph/ceph/pull/15270>`_, Thomas Serlin)
|
||||
* build/ops: The Clangtastic Mr. Clocks (`pr#15186 <https://github.com/ceph/ceph/pull/15186>`_, Adam C. Emerson)
|
||||
* build/ops: yasm-wrapper: filter -pthread (`pr#15249 <https://github.com/ceph/ceph/pull/15249>`_, Alessandro Barbieri)
|
||||
* cephfs: #17980: MDS client blacklisting and blacklist on eviction (`issue#17980 <http://tracker.ceph.com/issues/17980>`_, `issue#9754 <http://tracker.ceph.com/issues/9754>`_, `pr#14610 <https://github.com/ceph/ceph/pull/14610>`_, John Spray)
|
||||
* cephfs: ceph: simplify CInode::maybe_export_pin() (`pr#15106 <https://github.com/ceph/ceph/pull/15106>`_, "Yan, Zheng")
|
||||
* cephfs: client: fix display ino in the ldout (`pr#15314 <https://github.com/ceph/ceph/pull/15314>`_, huanwen ren)
|
||||
* cephfs: client/inode: fix the dump type of Inode::dump() (`pr#15198 <https://github.com/ceph/ceph/pull/15198>`_, huanwen ren)
|
||||
* cephfs,common,rbd: blkin: librbd trace hooks (`pr#15053 <https://github.com/ceph/ceph/pull/15053>`_, Victor Araujo, Jason Dillaman)
|
||||
* cephfs: mon/FSCommand: fix indentation (`pr#15423 <https://github.com/ceph/ceph/pull/15423>`_, Sage Weil)
|
||||
* cephfs: mon/MDSMonitor: respect mds_standby_for_rank config (`pr#15129 <https://github.com/ceph/ceph/pull/15129>`_, "Yan, Zheng")
|
||||
* cephfs: osdc/Journaler: avoid executing on_safe contexts prematurely (`issue#20055 <http://tracker.ceph.com/issues/20055>`_, `pr#15240 <https://github.com/ceph/ceph/pull/15240>`_, "Yan, Zheng")
|
||||
* cephfs: qa/cephfs: disable mds_bal_frag for TestStrays.test_purge_queue_op_rate (`issue#19892 <http://tracker.ceph.com/issues/19892>`_, `pr#15105 <https://github.com/ceph/ceph/pull/15105>`_, "Yan, Zheng")
|
||||
* cephfs: qa/tasks/cephfs: use getattr to guarantee inode is in client cache (`issue#19912 <http://tracker.ceph.com/issues/19912>`_, `pr#15062 <https://github.com/ceph/ceph/pull/15062>`_, "Yan, Zheng")
|
||||
* cephfs: qa: update log whitelists for kcephfs suite (`pr#14922 <https://github.com/ceph/ceph/pull/14922>`_, "Yan, Zheng")
|
||||
* cephfs,tests: qa: fix float parse error in test_fragment (`pr#15122 <https://github.com/ceph/ceph/pull/15122>`_, Patrick Donnelly)
|
||||
* cephfs,tests: qa: silence upgrade test failure (`issue#19934 <http://tracker.ceph.com/issues/19934>`_, `pr#15126 <https://github.com/ceph/ceph/pull/15126>`_, Patrick Donnelly)
|
||||
* cephfs,tests: qa: simplify TestJournalRepair (`pr#15096 <https://github.com/ceph/ceph/pull/15096>`_, John Spray)
|
||||
* cleanup: src: put-to operator function - const input cleanup (`issue#3977 <http://tracker.ceph.com/issues/3977>`_, `pr#15364 <https://github.com/ceph/ceph/pull/15364>`_, Jos Collin)
|
||||
* cmake: Add -finstrument-functions flag to OSD code (`pr#15055 <https://github.com/ceph/ceph/pull/15055>`_, Mohamad Gebai)
|
||||
* cmake: check the existence of gperf before using it (`pr#15164 <https://github.com/ceph/ceph/pull/15164>`_, Kefu Chai)
|
||||
* cmake: do not link libcommon against some libs (`pr#15340 <https://github.com/ceph/ceph/pull/15340>`_, Willem Jan Withagen)
|
||||
* cmake: fix boost components for WITH_SYSTEM_BOOST (`pr#15160 <https://github.com/ceph/ceph/pull/15160>`_, Bassam Tabbara)
|
||||
* cmake: improved build speed by 5x when using ccache (`pr#15147 <https://github.com/ceph/ceph/pull/15147>`_, Bassam Tabbara)
|
||||
* cmake: link against fcgi only if enabled (`pr#15425 <https://github.com/ceph/ceph/pull/15425>`_, Yao Zongyou)
|
||||
* cmake: misc fixes for build on i386 (`pr#15516 <https://github.com/ceph/ceph/pull/15516>`_, James Page)
|
||||
* cmake: rgw: do not link against boost in a wholesale (`pr#15347 <https://github.com/ceph/ceph/pull/15347>`_, Nathan Cutler, Kefu Chai)
|
||||
* cmake: workaound ccache issue with .S assembly files (`pr#15142 <https://github.com/ceph/ceph/pull/15142>`_, Bassam Tabbara)
|
||||
* common: add ceph::size() (`pr#15181 <https://github.com/ceph/ceph/pull/15181>`_, Kefu Chai)
|
||||
* common: ceph_osd: remove client message cap limit (`pr#14944 <https://github.com/ceph/ceph/pull/14944>`_, Haomai Wang)
|
||||
* common: cls: optimize header file dependency (`pr#15165 <https://github.com/ceph/ceph/pull/15165>`_, Brad Hubbard, Xiaowei Chen)
|
||||
* common: cmdparse: more constness (`pr#15023 <https://github.com/ceph/ceph/pull/15023>`_, Kefu Chai)
|
||||
* common: common/ceph_context: 'config diff get' option added (`pr#10736 <https://github.com/ceph/ceph/pull/10736>`_, Daniel Oliveira)
|
||||
* common: common/ceph_context: fix leak of registered commands on exit (`pr#15302 <https://github.com/ceph/ceph/pull/15302>`_, xie xingguo)
|
||||
* common: common/iso_8601.cc: Make return expression Clang compatible (`pr#15336 <https://github.com/ceph/ceph/pull/15336>`_, Willem Jan Withagen)
|
||||
* common: common/LogEntry: include EntityName in log entries (`pr#15395 <https://github.com/ceph/ceph/pull/15395>`_, Sage Weil)
|
||||
* common: common,osdc: remove atomic_t completely (`pr#15562 <https://github.com/ceph/ceph/pull/15562>`_, Kefu Chai)
|
||||
* common: common/perf_counters: add average time for PERFCOUNTER_TIME (`pr#15478 <https://github.com/ceph/ceph/pull/15478>`_, xie xingguo)
|
||||
* common: common/perf_counters: make schema more friendly and update docs (`pr#14933 <https://github.com/ceph/ceph/pull/14933>`_, Sage Weil)
|
||||
* common: common,test: migrate atomic_t to std::atomic (`pr#14866 <https://github.com/ceph/ceph/pull/14866>`_, Jesse Williamson)
|
||||
* common,core: ceph_test_rados_api_misc: fix LibRadosMiscConnectFailure.ConnectFailure retry (`issue#19901 <http://tracker.ceph.com/issues/19901>`_, `pr#15522 <https://github.com/ceph/ceph/pull/15522>`_, Sage Weil)
|
||||
* common,core: osd/OSDMap: make osd_state 32 bits wide (`pr#15390 <https://github.com/ceph/ceph/pull/15390>`_, Sage Weil)
|
||||
* common,core: osd/OSDMap: replace require_*_osds flags with a single require_osd_release field (`pr#15068 <https://github.com/ceph/ceph/pull/15068>`_, Sage Weil)
|
||||
* common,core: osd/OSDMap: replace string-based min_compat_client with a CEPH_RELEASE_* uint8_t (`pr#15351 <https://github.com/ceph/ceph/pull/15351>`_, Sage Weil)
|
||||
* common: crc32c: include acconfig.h to fix ceph_crc32c_aarch64() (`pr#15515 <https://github.com/ceph/ceph/pull/15515>`_, Kefu Chai)
|
||||
* common: crush/CrushWrapper: fix has_incompat_choose_args (`pr#15218 <https://github.com/ceph/ceph/pull/15218>`_, Sage Weil)
|
||||
* common: crush/CrushWrapper: fix has_incompat_choose_args() (`pr#15244 <https://github.com/ceph/ceph/pull/15244>`_, Sage Weil)
|
||||
* common: denc: add encode/decode for basic_sstring (`pr#15135 <https://github.com/ceph/ceph/pull/15135>`_, Kefu Chai, Casey Bodley)
|
||||
* common: get_process_name: use getprogname on bsd systems (`pr#15338 <https://github.com/ceph/ceph/pull/15338>`_, Mykola Golub)
|
||||
* common: Improved CRC calculation for zero buffers (`pr#11966 <https://github.com/ceph/ceph/pull/11966>`_, Adam Kupczyk)
|
||||
* common: include/lru.h: add const to member functions (`pr#15408 <https://github.com/ceph/ceph/pull/15408>`_, yonghengdexin735)
|
||||
* common: include/rados: Fix typo in rados_ioctx_cct() doc (`pr#15220 <https://github.com/ceph/ceph/pull/15220>`_, Jos Collin)
|
||||
* common: include: Redo some includes for FreeBSD (`issue#19883 <http://tracker.ceph.com/issues/19883>`_, `pr#15337 <https://github.com/ceph/ceph/pull/15337>`_, Willem Jan Withagen)
|
||||
* common: int_types.h: remove hacks to workaround old systems (`pr#15069 <https://github.com/ceph/ceph/pull/15069>`_, Kefu Chai)
|
||||
* common: librados,libradosstriper,test: migrate atomic_t to std::atomic (baragon) (`pr#14658 <https://github.com/ceph/ceph/pull/14658>`_, Jesse Williamson)
|
||||
* common: libradosstriper: Add example code (`pr#15350 <https://github.com/ceph/ceph/pull/15350>`_, Logan Blyth)
|
||||
* common: mempool: improve dump; fix buffer accounting bugs (`pr#15403 <https://github.com/ceph/ceph/pull/15403>`_, Sage Weil)
|
||||
* common,mon: messenger,client,compressor: migrate atomic_t to std::atomic (`pr#14657 <https://github.com/ceph/ceph/pull/14657>`_, Jesse Williamson)
|
||||
* common,mon: mon,crush: add 'osd crush swap-bucket' command (`pr#15072 <https://github.com/ceph/ceph/pull/15072>`_, Sage Weil)
|
||||
* common,performance: buffer: allow buffers to be accounted in arbitrary mempools (`pr#15352 <https://github.com/ceph/ceph/pull/15352>`_, Sage Weil)
|
||||
* common,performance: crc32c: Add ppc64le fast zero optimized assembly. (`pr#15100 <https://github.com/ceph/ceph/pull/15100>`_, Andrew Solomon)
|
||||
* common,performance: inline_memory: optimized mem_is_zero for non-x64 (`pr#15307 <https://github.com/ceph/ceph/pull/15307>`_, Piotr Dałek)
|
||||
* common,performance: kv/rocksdb: supports SliceParts interface (`pr#15058 <https://github.com/ceph/ceph/pull/15058>`_, Haomai Wang)
|
||||
* common,performance: osd/OSDMap: make pg_temp more efficient (`pr#15291 <https://github.com/ceph/ceph/pull/15291>`_, Sage Weil)
|
||||
* common: Remove redundant includes - 2 (`issue#19883 <http://tracker.ceph.com/issues/19883>`_, `pr#15169 <https://github.com/ceph/ceph/pull/15169>`_, Jos Collin)
|
||||
* common: Remove redundant includes - 3 (`issue#19883 <http://tracker.ceph.com/issues/19883>`_, `pr#15204 <https://github.com/ceph/ceph/pull/15204>`_, Jos Collin)
|
||||
* common: Remove redundant includes - 5 (`issue#19883 <http://tracker.ceph.com/issues/19883>`_, `pr#15267 <https://github.com/ceph/ceph/pull/15267>`_, Jos Collin)
|
||||
* common: Remove redundant includes - 6 (`issue#19883 <http://tracker.ceph.com/issues/19883>`_, `pr#15299 <https://github.com/ceph/ceph/pull/15299>`_, Jos Collin)
|
||||
* common: Remove redundant includes (`issue#19883 <http://tracker.ceph.com/issues/19883>`_, `pr#15042 <https://github.com/ceph/ceph/pull/15042>`_, Brad Hubbard)
|
||||
* common: Remove redundant includes (`issue#19883 <http://tracker.ceph.com/issues/19883>`_, `pr#15086 <https://github.com/ceph/ceph/pull/15086>`_, Jos Collin)
|
||||
* common,tests: ceph_test_rados_api_list: more fix LibRadosListNP.ListObjectsError (`issue#19963 <http://tracker.ceph.com/issues/19963>`_, `pr#15138 <https://github.com/ceph/ceph/pull/15138>`_, Sage Weil)
|
||||
* common: xio: migrate atomic_t to std::atomic<> (`pr#15230 <https://github.com/ceph/ceph/pull/15230>`_, Jesse Williamson)
|
||||
* core: ceph-disk: do not setup_statedir on trigger (`issue#19941 <http://tracker.ceph.com/issues/19941>`_, `pr#15410 <https://github.com/ceph/ceph/pull/15410>`_, Loic Dachary)
|
||||
* core: compressor: add LZ4 support (`pr#15434 <https://github.com/ceph/ceph/pull/15434>`_, Haomai Wang)
|
||||
* core: compressor: optimize header file dependency (`pr#15187 <https://github.com/ceph/ceph/pull/15187>`_, Brad Hubbard, Xiaowei Chen)
|
||||
* core: crush, mon: make jewel the lower bound for client/crush compat for new clusters (`pr#15370 <https://github.com/ceph/ceph/pull/15370>`_, Sage Weil)
|
||||
* core: erasure-code: optimize header file dependency (`pr#15172 <https://github.com/ceph/ceph/pull/15172>`_, Brad Hubbard, Xiaowei Chen)
|
||||
* core: erasure-code: Remove duplicate of isa-l files (`pr#15372 <https://github.com/ceph/ceph/pull/15372>`_, Ganesh Mahalingam)
|
||||
* core: filestore: migrate atomic_t to std::atomic<> (`pr#15228 <https://github.com/ceph/ceph/pull/15228>`_, Jesse Williamson)
|
||||
* core: include/types.h, introduce host_to_ceph_errno (`pr#15496 <https://github.com/ceph/ceph/pull/15496>`_, Willem Jan Withagen)
|
||||
* core: Install Pecan for FreeBSD (`pr#15610 <https://github.com/ceph/ceph/pull/15610>`_, Willem Jan Withagen)
|
||||
* core,mgr: mgr/DaemonServer: stop spamming log with pg stats (`pr#15487 <https://github.com/ceph/ceph/pull/15487>`_, Sage Weil)
|
||||
* core,mgr,mon: mon/PGMap: fix osd_epoch update when removing osd_stat (`issue#20208 <http://tracker.ceph.com/issues/20208>`_, `pr#15573 <https://github.com/ceph/ceph/pull/15573>`_, Sage Weil)
|
||||
* core,mon: mon/LogMonitor: 'log last' command (`pr#15497 <https://github.com/ceph/ceph/pull/15497>`_, Sage Weil)
|
||||
* core,mon: mon/OSDMonitor: cancel mapping job from update_from_paxos (`issue#20067 <http://tracker.ceph.com/issues/20067>`_, `pr#15320 <https://github.com/ceph/ceph/pull/15320>`_, Sage Weil)
|
||||
* core,mon: mon/OSDMonitor: use up set instead of acting set in reweight_by_utilization (`pr#13802 <https://github.com/ceph/ceph/pull/13802>`_, Mingxin Liu)
|
||||
* core,mon: mon/PGMap: call blocked requests ERR not WARN (`pr#15501 <https://github.com/ceph/ceph/pull/15501>`_, Sage Weil)
|
||||
* core: mon/OSDMonitor: change info in 'osd failed' messages (`pr#15321 <https://github.com/ceph/ceph/pull/15321>`_, Sage Weil)
|
||||
* core: mon,osd/OSDMap: a couple pg-upmap fixes (`pr#15319 <https://github.com/ceph/ceph/pull/15319>`_, Sage Weil)
|
||||
* core: msg/async: avoid requeue racing with handle_write (`issue#20093 <http://tracker.ceph.com/issues/20093>`_, `pr#15324 <https://github.com/ceph/ceph/pull/15324>`_, Haomai Wang)
|
||||
* core: osd,librados: add manifest, redirect (`pr#15325 <https://github.com/ceph/ceph/pull/15325>`_, Sage Weil)
|
||||
* core: osd/OSDMap: improve upmap calculation (`issue#19818 <http://tracker.ceph.com/issues/19818>`_, `pr#14902 <https://github.com/ceph/ceph/pull/14902>`_, Sage Weil)
|
||||
* core: osd/PG: drop pre-firefly compat_mode for choose_*_acting (`pr#15057 <https://github.com/ceph/ceph/pull/15057>`_, Sage Weil)
|
||||
* core: osd/pglog: remove loop through empty collection (`pr#15121 <https://github.com/ceph/ceph/pull/15121>`_, J. Eric Ivancich)
|
||||
* core,performance: msg/async: reduce write_lock contention (`pr#15092 <https://github.com/ceph/ceph/pull/15092>`_, Haomai Wang)
|
||||
* core,rgw: qa: Removed all 'default_idle_timeout' due to chnage in rwg task (`pr#15420 <https://github.com/ceph/ceph/pull/15420>`_, Yuri Weinstein)
|
||||
* core,rgw,tests: qa/rgw_snaps: move default_idle_timeout config under the client (`issue#20128 <http://tracker.ceph.com/issues/20128>`_, `pr#15400 <https://github.com/ceph/ceph/pull/15400>`_, Yehuda Sadeh)
|
||||
* core: src/ceph.in: Use env(CEPH_DEV) to suppress noise from ceph (`pr#14746 <https://github.com/ceph/ceph/pull/14746>`_, Willem Jan Withagen)
|
||||
* core,tests: ceph-disk: sensible default for block.db (`pr#15576 <https://github.com/ceph/ceph/pull/15576>`_, Loic Dachary)
|
||||
* core,tests: qa/suites/rados/*/at-end: wait for healthy before scrubbing (`pr#15245 <https://github.com/ceph/ceph/pull/15245>`_, Sage Weil)
|
||||
* core,tests: qa/suites/rados/singleton-nomsg/health-warnings: behave on ext4 (`issue#20043 <http://tracker.ceph.com/issues/20043>`_, `pr#15207 <https://github.com/ceph/ceph/pull/15207>`_, Sage Weil)
|
||||
* core,tests: qa/suites/rados: temporarily remove scrub_test from basic/ until post-luminous (`issue#19935 <http://tracker.ceph.com/issues/19935>`_, `pr#15202 <https://github.com/ceph/ceph/pull/15202>`_, Sage Weil)
|
||||
* core,tests: qa/suites/upgrade/kraken-x: enable experimental for bluestore (`pr#15359 <https://github.com/ceph/ceph/pull/15359>`_, Sage Weil)
|
||||
* core,tests: qa/workunits/cephtool/test.sh: fix osd full health detail grep (`issue#20187 <http://tracker.ceph.com/issues/20187>`_, `pr#15494 <https://github.com/ceph/ceph/pull/15494>`_, Sage Weil)
|
||||
* core,tests: qa/workunits/rados/test_health_warning: misc fixes (`issue#19990 <http://tracker.ceph.com/issues/19990>`_, `pr#15201 <https://github.com/ceph/ceph/pull/15201>`_, Sage Weil)
|
||||
* core,tests: test/osd/TestRados.cc: run set-redirect test after finishing setup (`issue#20114 <http://tracker.ceph.com/issues/20114>`_, `pr#15385 <https://github.com/ceph/ceph/pull/15385>`_, Myoungwon Oh)
|
||||
* core,tools: osdmaptool: require --upmap-save before modifying input osdmap (`pr#15247 <https://github.com/ceph/ceph/pull/15247>`_, Sage Weil)
|
||||
* crush: add missing tunable in tests (`pr#15412 <https://github.com/ceph/ceph/pull/15412>`_, Loic Dachary)
|
||||
* crush: encode can override weights with weight set (`issue#19836 <http://tracker.ceph.com/issues/19836>`_, `pr#15002 <https://github.com/ceph/ceph/pull/15002>`_, Loic Dachary)
|
||||
* crush: optimize header file dependency (`pr#9307 <https://github.com/ceph/ceph/pull/9307>`_, Xiaowei Chen)
|
||||
* crush: update choose_args when items are added/removed (`pr#15311 <https://github.com/ceph/ceph/pull/15311>`_, Loic Dachary)
|
||||
* doc: add descriptions for mon/mgr options (`pr#15032 <https://github.com/ceph/ceph/pull/15032>`_, Kefu Chai)
|
||||
* doc: add FreeBSD manual install (`pr#14941 <https://github.com/ceph/ceph/pull/14941>`_, Willem Jan Withagen)
|
||||
* doc: add new cn ceph mirror to doc and mirroring (`pr#15089 <https://github.com/ceph/ceph/pull/15089>`_, Shengjing Zhu)
|
||||
* doc: add rados xattr commands to manpage (`pr#15362 <https://github.com/ceph/ceph/pull/15362>`_, Andreas Gerstmayr)
|
||||
* doc: add README to dmclock subdir to inform developers it's a git subtree (`pr#15386 <https://github.com/ceph/ceph/pull/15386>`_, J. Eric Ivancich)
|
||||
* doc: AUTHORS: update with release manager, backport team (`pr#15391 <https://github.com/ceph/ceph/pull/15391>`_, Sage Weil)
|
||||
* doc: Change the default values of some OSD options (`issue#20199 <http://tracker.ceph.com/issues/20199>`_, `pr#15566 <https://github.com/ceph/ceph/pull/15566>`_, Bara Ancincova)
|
||||
* doc: describe CephFS max_file_size (`pr#15287 <https://github.com/ceph/ceph/pull/15287>`_, Ken Dreyer)
|
||||
* doc: dev improve the s3tests doc to reflect current scripts (`pr#15180 <https://github.com/ceph/ceph/pull/15180>`_, Abhishek Lekshmanan)
|
||||
* doc: doc/cephfs: mention RADOS object size limit (`pr#15550 <https://github.com/ceph/ceph/pull/15550>`_, John Spray)
|
||||
* doc: doc/release-notes: update which jewel version does sortbitwise warning (`pr#15209 <https://github.com/ceph/ceph/pull/15209>`_, Sage Weil)
|
||||
* doc: doc/rgw: remove fastcgi page and sample configs (`pr#15133 <https://github.com/ceph/ceph/pull/15133>`_, Casey Bodley)
|
||||
* doc: Documentation Fixes for http://tracker.ceph.com/issues/19879 (`issue#20057 <http://tracker.ceph.com/issues/20057>`_, `issue#19879 <http://tracker.ceph.com/issues/19879>`_, `pr#15606 <https://github.com/ceph/ceph/pull/15606>`_, Sameer Tiwari)
|
||||
* doc: document perf historgrams (`pr#15150 <https://github.com/ceph/ceph/pull/15150>`_, Piotr Dałek)
|
||||
* doc: Document RGW quota cache options (`issue#18747 <http://tracker.ceph.com/issues/18747>`_, `pr#13395 <https://github.com/ceph/ceph/pull/13395>`_, Daniel Gryniewicz)
|
||||
* doc: fix broken link in erasure-code.rst (`issue#19972 <http://tracker.ceph.com/issues/19972>`_, `pr#15143 <https://github.com/ceph/ceph/pull/15143>`_, MinSheng Lin)
|
||||
* doc: fix factual inaccuracy in doc/architecture.rst (`pr#15235 <https://github.com/ceph/ceph/pull/15235>`_, Nathan Cutler, Sage Weil)
|
||||
* doc: fixing an error in 12.0.3 release notes (`pr#15195 <https://github.com/ceph/ceph/pull/15195>`_, Abhishek Lekshmanan)
|
||||
* doc: fix syntax on code snippets in cephfs/multimds (`pr#15499 <https://github.com/ceph/ceph/pull/15499>`_, John Spray)
|
||||
* doc: kill some broken links (`pr#15203 <https://github.com/ceph/ceph/pull/15203>`_, liuchang0812)
|
||||
* doc: mailmap: Leo Zhang infomation and affiliation (`pr#15145 <https://github.com/ceph/ceph/pull/15145>`_, Leo Zhang)
|
||||
* doc: mention certain conf vars should be in global (`pr#15119 <https://github.com/ceph/ceph/pull/15119>`_, Ali Maredia)
|
||||
* doc: Merge pull request from stiwari/wip-19879 (`issue#19879 <http://tracker.ceph.com/issues/19879>`_, `pr#15609 <https://github.com/ceph/ceph/pull/15609>`_, Sameer Tiwari)
|
||||
* doc: minor fixes in radosgw/ (`pr#15103 <https://github.com/ceph/ceph/pull/15103>`_, Drunkard Zhang)
|
||||
* doc: PendingReleaseNotes: notes on whiteouts vs pgnls (`pr#15575 <https://github.com/ceph/ceph/pull/15575>`_, Sage Weil)
|
||||
* doc: PendingReleaseNotes: warning about 'osd rm ...' and #19119 (`issue#19119 <http://tracker.ceph.com/issues/19119>`_, `pr#13731 <https://github.com/ceph/ceph/pull/13731>`_, Sage Weil)
|
||||
* doc: release-notes clarify about rgw encryption (`pr#14800 <https://github.com/ceph/ceph/pull/14800>`_, Abhishek Lekshmanan)
|
||||
* doc: release notes for v12.0.3 (dev) (`pr#15090 <https://github.com/ceph/ceph/pull/15090>`_, Abhishek Lekshmanan)
|
||||
* docs document "osd recovery max single start" setting (`issue#17396 <http://tracker.ceph.com/issues/17396>`_, `pr#15275 <https://github.com/ceph/ceph/pull/15275>`_, Ken Dreyer)
|
||||
* doc: typo fixes on hyperlink/words (`pr#15144 <https://github.com/ceph/ceph/pull/15144>`_, Drunkard Zhang)
|
||||
* doc: update sample explaning "%" operator in test suites (`pr#15511 <https://github.com/ceph/ceph/pull/15511>`_, Kefu Chai)
|
||||
* doc: Update some RGW documentation (`pr#15175 <https://github.com/ceph/ceph/pull/15175>`_, Jens Rosenboom)
|
||||
* doc: update the usage of 'ceph-deploy purge' (`pr#15080 <https://github.com/ceph/ceph/pull/15080>`_, Yu Shengzuo)
|
||||
* doc: use do_cmake.sh instead of `cmake ..` (`pr#15110 <https://github.com/ceph/ceph/pull/15110>`_, Kefu Chai)
|
||||
* librbd: discard related IO should skip op if object non-existent (`issue#19962 <http://tracker.ceph.com/issues/19962>`_, `pr#15239 <https://github.com/ceph/ceph/pull/15239>`_, Mykola Golub)
|
||||
* librbd: do not raise an error if trash list returns -ENOENT (`pr#15085 <https://github.com/ceph/ceph/pull/15085>`_, runsisi)
|
||||
* librbd: filter expected error codes from is_exclusive_lock_owner (`issue#20182 <http://tracker.ceph.com/issues/20182>`_, `pr#15483 <https://github.com/ceph/ceph/pull/15483>`_, Jason Dillaman)
|
||||
* librbd: fix valgrind errors and ensure tests detect future leaks (`pr#15415 <https://github.com/ceph/ceph/pull/15415>`_, Jason Dillaman)
|
||||
* librbd: optimize copy-up to add hints only once to object op (`issue#19875 <http://tracker.ceph.com/issues/19875>`_, `pr#15037 <https://github.com/ceph/ceph/pull/15037>`_, Mykola Golub)
|
||||
* librbd: potential read IO hang when image is flattened (`issue#19832 <http://tracker.ceph.com/issues/19832>`_, `pr#15234 <https://github.com/ceph/ceph/pull/15234>`_, Jason Dillaman)
|
||||
* librbd: reacquire lock should update lock owner client id (`issue#19929 <http://tracker.ceph.com/issues/19929>`_, `pr#15093 <https://github.com/ceph/ceph/pull/15093>`_, Jason Dillaman)
|
||||
* librbd: reduce potential of erroneous blacklisting on image close (`issue#19970 <http://tracker.ceph.com/issues/19970>`_, `pr#15162 <https://github.com/ceph/ceph/pull/15162>`_, Jason Dillaman)
|
||||
* librbd: remove unused rbd_image_options_t ostream operator (`pr#15443 <https://github.com/ceph/ceph/pull/15443>`_, Mykola Golub)
|
||||
* mds: change the type of data_pools (`pr#15278 <https://github.com/ceph/ceph/pull/15278>`_, Vicente Cheng)
|
||||
* mds: check export pin during replay (`issue#20039 <http://tracker.ceph.com/issues/20039>`_, `pr#15205 <https://github.com/ceph/ceph/pull/15205>`_, Patrick Donnelly)
|
||||
* mds: fix CDir::merge() for mds_debug_auth_pins (`issue#19946 <http://tracker.ceph.com/issues/19946>`_, `pr#15130 <https://github.com/ceph/ceph/pull/15130>`_, "Yan, Zheng")
|
||||
* mds: fix client ID truncation (`pr#15258 <https://github.com/ceph/ceph/pull/15258>`_, Henry Chang)
|
||||
* mds: limit client writable range increment (`issue#19955 <http://tracker.ceph.com/issues/19955>`_, `pr#15131 <https://github.com/ceph/ceph/pull/15131>`_, "Yan, Zheng")
|
||||
* mds: miscellaneous multimds fixes (`pr#14550 <https://github.com/ceph/ceph/pull/14550>`_, "Yan, Zheng")
|
||||
* mds: Pass empty string to clear mantle balancer (`issue#20076 <http://tracker.ceph.com/issues/20076>`_, `pr#15282 <https://github.com/ceph/ceph/pull/15282>`_, Zhi Zhang)
|
||||
* mds: properly create aux subtrees for pinned directory (`issue#20083 <http://tracker.ceph.com/issues/20083>`_, `pr#15300 <https://github.com/ceph/ceph/pull/15300>`_, "Yan, Zheng")
|
||||
* mgr: ceph-create-keys: update client.admin if it already exists (`issue#19940 <http://tracker.ceph.com/issues/19940>`_, `pr#15112 <https://github.com/ceph/ceph/pull/15112>`_, John Spray)
|
||||
* mgr: ceph: introduce "tell x help" subcommand (`issue#19885 <http://tracker.ceph.com/issues/19885>`_, `pr#15111 <https://github.com/ceph/ceph/pull/15111>`_, liuchang0812)
|
||||
* mgr: ceph-mgr: Implement new pecan-based rest api (`pr#14457 <https://github.com/ceph/ceph/pull/14457>`_, Boris Ranto)
|
||||
* mgr: cleanup, stop clients sending in perf counters (`pr#15578 <https://github.com/ceph/ceph/pull/15578>`_, John Spray)
|
||||
* mgr: dashboard code cleanup (`pr#15577 <https://github.com/ceph/ceph/pull/15577>`_, John Spray)
|
||||
* mgr: dashboard GUI module (`pr#14946 <https://github.com/ceph/ceph/pull/14946>`_, John Spray, Dan Mick)
|
||||
* mgr: load modules in separate python sub-interpreters (`pr#14971 <https://github.com/ceph/ceph/pull/14971>`_, Tim Serong)
|
||||
* mgr: Mark session connections down on shutdown (`issue#19900 <http://tracker.ceph.com/issues/19900>`_, `pr#15192 <https://github.com/ceph/ceph/pull/15192>`_, Brad Hubbard)
|
||||
* mgr: mgr/DaemonServer.cc: log daemon type string as well as id (`pr#15560 <https://github.com/ceph/ceph/pull/15560>`_, Dan Mick)
|
||||
* mgr: mgr/MgrStandby: prevent use-after-free on just-shut-down Mgr (`issue#19595 <http://tracker.ceph.com/issues/19595>`_, `pr#15297 <https://github.com/ceph/ceph/pull/15297>`_, Sage Weil)
|
||||
* mgr: mgr/MgrStandby: respawn when deactivated (`issue#19595 <http://tracker.ceph.com/issues/19595>`_, `issue#19549 <http://tracker.ceph.com/issues/19549>`_, `pr#15557 <https://github.com/ceph/ceph/pull/15557>`_, Sage Weil)
|
||||
* mgr: mgr,osd: ceph-mgr --help, unify usage text of other daemons (`pr#15176 <https://github.com/ceph/ceph/pull/15176>`_, Tim Serong)
|
||||
* mgr,mon: mon,mgr: extricate PGmap from monitor (`issue#20067 <http://tracker.ceph.com/issues/20067>`_, `issue#20174 <http://tracker.ceph.com/issues/20174>`_, `issue#20050 <http://tracker.ceph.com/issues/20050>`_, `pr#15073 <https://github.com/ceph/ceph/pull/15073>`_, Kefu Chai, Sage Weil, Greg Farnum)
|
||||
* mgr,mon: mon/MgrMonitor: add 'mgr dump [epoch]' command (`pr#15158 <https://github.com/ceph/ceph/pull/15158>`_, Sage Weil)
|
||||
* mgr: optimize DaemonStateIndex::cull() a little bit (`pr#14967 <https://github.com/ceph/ceph/pull/14967>`_, Kefu Chai)
|
||||
* mgr: pybind/mgr/dashboard: monkeypatch os.exit to stop cherrypy from taking down mgr (`issue#20216 <http://tracker.ceph.com/issues/20216>`_, `pr#15588 <https://github.com/ceph/ceph/pull/15588>`_, Sage Weil)
|
||||
* mgr: pybind/mgr: Delete `rest` module (`pr#15429 <https://github.com/ceph/ceph/pull/15429>`_, John Spray)
|
||||
* mgr: pybind/mgr/restful: improve cert handling; work with vstart (`pr#15405 <https://github.com/ceph/ceph/pull/15405>`_, Sage Weil)
|
||||
* mon: add crush type down health warnings (`pr#14914 <https://github.com/ceph/ceph/pull/14914>`_, Neha Ojha)
|
||||
* mon: Add override for FsNewHandler::handle() (`pr#15331 <https://github.com/ceph/ceph/pull/15331>`_, yonghengdexin735)
|
||||
* mon: cleanups (`pr#15272 <https://github.com/ceph/ceph/pull/15272>`_, Kefu Chai)
|
||||
* mon: delete useless function definition (`pr#15188 <https://github.com/ceph/ceph/pull/15188>`_, shiqi)
|
||||
* mon: don't prefix mgr summary with epoch number (`pr#15512 <https://github.com/ceph/ceph/pull/15512>`_, John Spray)
|
||||
* mon: fix accesing pending_fsmap from peon (`issue#20040 <http://tracker.ceph.com/issues/20040>`_, `pr#15213 <https://github.com/ceph/ceph/pull/15213>`_, John Spray)
|
||||
* mon: fix a few bugs with the osd health reporting (`pr#15179 <https://github.com/ceph/ceph/pull/15179>`_, Sage Weil)
|
||||
* mon: Fixed typo in function comment blocks and in other comments (`pr#15304 <https://github.com/ceph/ceph/pull/15304>`_, linbing)
|
||||
* mon: Fixed typo in @post of _active() (`pr#15191 <https://github.com/ceph/ceph/pull/15191>`_, Linbing)
|
||||
* mon: fix mon_keyvaluedb application (`pr#15059 <https://github.com/ceph/ceph/pull/15059>`_, Sage Weil)
|
||||
* mon: it's no need to get pg action_primary osd twice in pg scrub (`pr#15313 <https://github.com/ceph/ceph/pull/15313>`_, linbing)
|
||||
* mon: mon/MgrMonitor: send digests only if is_active() (`pr#15109 <https://github.com/ceph/ceph/pull/15109>`_, Kefu Chai)
|
||||
* mon: mon/MonClient: cancel pending commands on shutdown (`issue#20051 <http://tracker.ceph.com/issues/20051>`_, `pr#15227 <https://github.com/ceph/ceph/pull/15227>`_, Kefu Chai, Sage Weil)
|
||||
* mon: {mon,osd,mds} {versions,count-metadata} (`pr#15436 <https://github.com/ceph/ceph/pull/15436>`_, Sage Weil)
|
||||
* mon: mon/PGMap: show %used in formatted output (`issue#20123 <http://tracker.ceph.com/issues/20123>`_, `pr#15387 <https://github.com/ceph/ceph/pull/15387>`_, Joao Eduardo Luis)
|
||||
* mon: Removed unnecessary function declaration in MDSMonitor.h (`pr#15374 <https://github.com/ceph/ceph/pull/15374>`_, yonghengdexin735)
|
||||
* mon: replace osds with `osd destroy` and `osd new` (`pr#14074 <https://github.com/ceph/ceph/pull/14074>`_, Joao Eduardo Luis, Sage Weil)
|
||||
* mon: revise "ceph status" output (`pr#15396 <https://github.com/ceph/ceph/pull/15396>`_, John Spray)
|
||||
* mon: show io status quickly if no update in a long period (`pr#14176 <https://github.com/ceph/ceph/pull/14176>`_, Mingxin Liu)
|
||||
* mon: track features from connect clients, and use it to gate set-require-min-compat-client (`pr#15371 <https://github.com/ceph/ceph/pull/15371>`_, Sage Weil)
|
||||
* mon: trim the creating_pgs after updating it with pgmap (`issue#20067 <http://tracker.ceph.com/issues/20067>`_, `pr#15318 <https://github.com/ceph/ceph/pull/15318>`_, Kefu Chai)
|
||||
* msg: do not enable client-side binding by default (`issue#20049 <http://tracker.ceph.com/issues/20049>`_, `pr#15392 <https://github.com/ceph/ceph/pull/15392>`_, Jason Dillaman)
|
||||
* msg: don't set msgr addr when disabing client bind (`pr#15243 <https://github.com/ceph/ceph/pull/15243>`_, Haomai Wang)
|
||||
* msgr: msg/async: Lower down the AsyncMessenger's standby warning from debug (`pr#15242 <https://github.com/ceph/ceph/pull/15242>`_, Pan Liu)
|
||||
* msgr: msg/async: remove false alert "assert" (`pr#15288 <https://github.com/ceph/ceph/pull/15288>`_, Haomai Wang)
|
||||
* osd: don't leak pgrefs or reservations in SnapTrimmer (`issue#19931 <http://tracker.ceph.com/issues/19931>`_, `pr#15214 <https://github.com/ceph/ceph/pull/15214>`_, Greg Farnum)
|
||||
* osd: fix argument-dependent lookup of swap() (`pr#15124 <https://github.com/ceph/ceph/pull/15124>`_, Casey Bodley)
|
||||
* osd: fix past_intervals base case by adding epoch_pool_created to pg_history_t (`issue#19877 <http://tracker.ceph.com/issues/19877>`_, `pr#14989 <https://github.com/ceph/ceph/pull/14989>`_, Sage Weil)
|
||||
* osd: hdd vs ssd defaults for osd op thread pool (`pr#15422 <https://github.com/ceph/ceph/pull/15422>`_, Sage Weil)
|
||||
* osd: Implement asynchronous recovery sleep (`pr#15212 <https://github.com/ceph/ceph/pull/15212>`_, Neha Ojha)
|
||||
* osd: Move scrub sleep timer to osdservice (`issue#19986 <http://tracker.ceph.com/issues/19986>`_, `pr#15217 <https://github.com/ceph/ceph/pull/15217>`_, Brad Hubbard)
|
||||
* osd: Object level shard errors are tracked and used if no auth available (`issue#20089 <http://tracker.ceph.com/issues/20089>`_, `pr#15397 <https://github.com/ceph/ceph/pull/15397>`_, David Zafman)
|
||||
* osd: osd/OSDMap.cc: check if osd is out in subtree_type_is_down (`issue#19989 <http://tracker.ceph.com/issues/19989>`_, `pr#15250 <https://github.com/ceph/ceph/pull/15250>`_, Neha Ojha)
|
||||
* osd: 'osd tree in|out|up|down' to filter tree results (`pr#15294 <https://github.com/ceph/ceph/pull/15294>`_, Sage Weil)
|
||||
* osd: reduce buffer pinning from EC entries (`pr#15120 <https://github.com/ceph/ceph/pull/15120>`_, Sage Weil)
|
||||
* osd: reduce map cache size (`pr#15292 <https://github.com/ceph/ceph/pull/15292>`_, Sage Weil)
|
||||
* osd: reduce rados_max_object_size from 100 GB -> 128 MB (`pr#15520 <https://github.com/ceph/ceph/pull/15520>`_, Sage Weil)
|
||||
* osd: rename osd -> osd_pglog; include pglog-related bufferlists (`pr#15531 <https://github.com/ceph/ceph/pull/15531>`_, Sage Weil)
|
||||
* osd: Return early on shutdown (`issue#19900 <http://tracker.ceph.com/issues/19900>`_, `pr#15345 <https://github.com/ceph/ceph/pull/15345>`_, Brad Hubbard)
|
||||
* osd: take PGRef for recovery sleep wakeup event (`issue#20226 <http://tracker.ceph.com/issues/20226>`_, `pr#15582 <https://github.com/ceph/ceph/pull/15582>`_, Sage Weil)
|
||||
* osd: when osd in not in failure_pending, we don't need to get osd inst from osdmap. (`pr#15558 <https://github.com/ceph/ceph/pull/15558>`_, linbing)
|
||||
* osd: When scrub finds an attr error mark shard inconsistent (`issue#20089 <http://tracker.ceph.com/issues/20089>`_, `pr#15368 <https://github.com/ceph/ceph/pull/15368>`_, David Zafman)
|
||||
* performance: common/config_opts.h: Lower HDD throttle cost. (`pr#15485 <https://github.com/ceph/ceph/pull/15485>`_, Mark Nelson)
|
||||
* performance: denc: add need_contiguous to denc_traits (`pr#15224 <https://github.com/ceph/ceph/pull/15224>`_, Kefu Chai)
|
||||
* pybind: pybind/ceph_argparse: fix empty string check (`issue#20135 <http://tracker.ceph.com/issues/20135>`_, `pr#15500 <https://github.com/ceph/ceph/pull/15500>`_, Sage Weil)
|
||||
* pybind: pybind/ceph_daemon.py: fix Termsize.update (`pr#15253 <https://github.com/ceph/ceph/pull/15253>`_, Kefu Chai)
|
||||
* pybind: pybind/rados: avoid call free() on invalid pointer (`pr#15159 <https://github.com/ceph/ceph/pull/15159>`_, Mingxin Liu)
|
||||
* pybind,rbd: pybind/rbd: OSError should be picklable (`issue#20223 <http://tracker.ceph.com/issues/20223>`_, `pr#15574 <https://github.com/ceph/ceph/pull/15574>`_, Jason Dillaman)
|
||||
* pybind: support mon target in pybind (`pr#15409 <https://github.com/ceph/ceph/pull/15409>`_, liuchang0812)
|
||||
* rbd-mirror: coordinate image syncs with leader (`issue#18789 <http://tracker.ceph.com/issues/18789>`_, `pr#14745 <https://github.com/ceph/ceph/pull/14745>`_, Mykola Golub)
|
||||
* rbd-mirror: lock loss during sync should wait for in-flight copies (`pr#15532 <https://github.com/ceph/ceph/pull/15532>`_, Jason Dillaman)
|
||||
* rbd-mirror: permit release of local image exclusive lock after force promotion (`issue#18963 <http://tracker.ceph.com/issues/18963>`_, `pr#15140 <https://github.com/ceph/ceph/pull/15140>`_, Jason Dillaman)
|
||||
* rbd: properly decode features when using image name optional (`issue#20185 <http://tracker.ceph.com/issues/20185>`_, `pr#15492 <https://github.com/ceph/ceph/pull/15492>`_, Jason Dillaman)
|
||||
* rbd: pybind/rbd: fix crash if more than 1024 images in trash bin (`pr#15134 <https://github.com/ceph/ceph/pull/15134>`_, runsisi)
|
||||
* rbd: rbd/bench: fix write gaps when doing sequential writes with io-threads > 1 (`pr#15206 <https://github.com/ceph/ceph/pull/15206>`_, Igor Fedotov)
|
||||
* rbd: removed hardcoded default pool (`pr#15518 <https://github.com/ceph/ceph/pull/15518>`_, Jason Dillaman)
|
||||
* rbd,tests: qa: krbd discard/zeroout tests (`pr#15388 <https://github.com/ceph/ceph/pull/15388>`_, Ilya Dryomov)
|
||||
* rbd,tests: qa/suites/krbd: unmap subsuite needs straw buckets (`pr#15290 <https://github.com/ceph/ceph/pull/15290>`_, Ilya Dryomov)
|
||||
* rbd,tests: qa: update krbd_data_pool.sh to match the new rados ls behavior (`pr#15594 <https://github.com/ceph/ceph/pull/15594>`_, Ilya Dryomov)
|
||||
* rbd,tests: test/librbd: unit tests cleanup (`pr#15113 <https://github.com/ceph/ceph/pull/15113>`_, Mykola Golub)
|
||||
* rdma: msg/async/rdma: Add DSCP support (`pr#15484 <https://github.com/ceph/ceph/pull/15484>`_, Sarit Zubakov)
|
||||
* rdma: msg/async: Revert RDMA-CM (`pr#15262 <https://github.com/ceph/ceph/pull/15262>`_, Amir Vadai)
|
||||
* rgw: Adding code to create tenanted user for s3 bucket policy tests. (`pr#15028 <https://github.com/ceph/ceph/pull/15028>`_, Pritha Srivastava)
|
||||
* rgw: add "rgw_verify_ssl" config (`pr#15301 <https://github.com/ceph/ceph/pull/15301>`_, Shasha Lu)
|
||||
* rgw: add the Vim's modeline into rgw_orphan.cc. (`pr#15431 <https://github.com/ceph/ceph/pull/15431>`_, Radoslaw Zarzynski)
|
||||
* rgw: bucket index check in radosgw-admin removes valid index. (`issue#18470 <http://tracker.ceph.com/issues/18470>`_, `pr#12851 <https://github.com/ceph/ceph/pull/12851>`_, Zhang Shaowen)
|
||||
* rgw: datalog trim and mdlog trim handles the result returned by osd incorrectly. (`issue#20190 <http://tracker.ceph.com/issues/20190>`_, `pr#15507 <https://github.com/ceph/ceph/pull/15507>`_, Zhang Shaowen)
|
||||
* rgw: display more info when using radosgw-admin bucket stats (`pr#15256 <https://github.com/ceph/ceph/pull/15256>`_, fang.yuxiang)
|
||||
* rgw: drop asio/{yield,coroutine}.hpp replacements (`pr#15413 <https://github.com/ceph/ceph/pull/15413>`_, Kefu Chai)
|
||||
* rgw: drop using std ns in header files and other cleanups (`pr#15137 <https://github.com/ceph/ceph/pull/15137>`_, Abhishek Lekshmanan)
|
||||
* rgw: dynamic resharding (`pr#15493 <https://github.com/ceph/ceph/pull/15493>`_, Yehuda Sadeh, Orit Wasserman)
|
||||
* rgw: fix 'gc list --include-all' command infinite loop the first items (`issue#19978 <http://tracker.ceph.com/issues/19978>`_, `pr#12774 <https://github.com/ceph/ceph/pull/12774>`_, Shasha Lu, fang yuxiang)
|
||||
* rgw: fix lc list failure when shards not be all created (`issue#19898 <http://tracker.ceph.com/issues/19898>`_, `pr#15025 <https://github.com/ceph/ceph/pull/15025>`_, Jiaying Ren)
|
||||
* rgw: fix radosgw-admin retcode (`pr#15257 <https://github.com/ceph/ceph/pull/15257>`_, Shasha Lu)
|
||||
* rgw: fix test_multi.py default config file path (`pr#15306 <https://github.com/ceph/ceph/pull/15306>`_, Jiaying Ren)
|
||||
* rgw: fix X-Object-Meta-Static-Large-Object in SLO download (`issue#19951 <http://tracker.ceph.com/issues/19951>`_, `pr#15045 <https://github.com/ceph/ceph/pull/15045>`_, Shasha Lu)
|
||||
* rgw: metadata search part 2 (`pr#14351 <https://github.com/ceph/ceph/pull/14351>`_, Yehuda Sadeh)
|
||||
* rgw: migrate atomic_t to std::atomic<> (`pr#15001 <https://github.com/ceph/ceph/pull/15001>`_, Jesse Williamson)
|
||||
* rgw: optimize data sync. Add zones_trace in log to avoid needless sync. (`issue#19219 <http://tracker.ceph.com/issues/19219>`_, `pr#13851 <https://github.com/ceph/ceph/pull/13851>`_, Zhang Shaowen)
|
||||
* rgw: optimize generating torrent file. Object data won't stay in memory now. (`pr#15153 <https://github.com/ceph/ceph/pull/15153>`_, Zhang Shaowen)
|
||||
* rgw: pass authentication domain to civetweb (`issue#17657 <http://tracker.ceph.com/issues/17657>`_, `pr#12861 <https://github.com/ceph/ceph/pull/12861>`_, Abhishek Lekshmanan)
|
||||
* rgw: polymorphic error codes (`pr#10690 <https://github.com/ceph/ceph/pull/10690>`_, Pritha Srivastava, Marcus Watts)
|
||||
* rgw: remove fastcgi from default rgw frontends (`pr#15098 <https://github.com/ceph/ceph/pull/15098>`_, Casey Bodley)
|
||||
* rgw: rename s3_code to err_code for swift (`pr#12300 <https://github.com/ceph/ceph/pull/12300>`_, Guo Zhandong)
|
||||
* rgw: return the version id in get object and object metadata request. (`issue#19370 <http://tracker.ceph.com/issues/19370>`_, `pr#14117 <https://github.com/ceph/ceph/pull/14117>`_, Zhang Shaowen)
|
||||
* rgw: rgw-admin: fix bucket limit check argparse, div(0) (`pr#15316 <https://github.com/ceph/ceph/pull/15316>`_, Matt Benjamin)
|
||||
* rgw: rgw_common: use string::npos for the results of str.find (`pr#14341 <https://github.com/ceph/ceph/pull/14341>`_, Abhishek Lekshmanan)
|
||||
* rgw: rgw_file: add lock protection for readdir against gc (`issue#20121 <http://tracker.ceph.com/issues/20121>`_, `pr#15329 <https://github.com/ceph/ceph/pull/15329>`_, Gui Hecheng)
|
||||
* rgw: rgw_file cleanup names (`pr#15568 <https://github.com/ceph/ceph/pull/15568>`_, Gui Hecheng)
|
||||
* rgw: rgw_file: fix flags set on unsuccessful unlink (`pr#15222 <https://github.com/ceph/ceph/pull/15222>`_, Gui Hecheng)
|
||||
* rgw: rgw_file: release rgw_fh lock and ref on ENOTEMPTY (`issue#20061 <http://tracker.ceph.com/issues/20061>`_, `pr#15246 <https://github.com/ceph/ceph/pull/15246>`_, Matt Benjamin)
|
||||
* rgw: rgw_file: removed extra rele() on fs in rgw_umount() (`pr#15152 <https://github.com/ceph/ceph/pull/15152>`_, Gui Hecheng)
|
||||
* rgw: rgw_file: remove hidden uxattr objects from buckets on delete (`issue#20045 <http://tracker.ceph.com/issues/20045>`_, `pr#15210 <https://github.com/ceph/ceph/pull/15210>`_, Matt Benjamin)
|
||||
* rgw: rgw_file: remove post-unlink lookup check (`issue#20047 <http://tracker.ceph.com/issues/20047>`_, `pr#15216 <https://github.com/ceph/ceph/pull/15216>`_, Matt Benjamin)
|
||||
* rgw: rgw_file: replace raw fs->fh_lru.unref with predefined fs->unref (`pr#15541 <https://github.com/ceph/ceph/pull/15541>`_, Gui Hecheng)
|
||||
* rgw: rgw_file: store bucket uxattrs on the bucket (`issue#20082 <http://tracker.ceph.com/issues/20082>`_, `pr#15293 <https://github.com/ceph/ceph/pull/15293>`_, Matt Benjamin)
|
||||
* rgw: rgw_file: v3: fix write-timer action (`issue#19932 <http://tracker.ceph.com/issues/19932>`_, `pr#15097 <https://github.com/ceph/ceph/pull/15097>`_, Matt Benjamin)
|
||||
* rgw: rgw_rados: create sync module instances only if run_sync_thread is set (`issue#19830 <http://tracker.ceph.com/issues/19830>`_, `pr#14994 <https://github.com/ceph/ceph/pull/14994>`_, Abhishek Lekshmanan)
|
||||
* rgw: rgw/rgw_swift_auth.cc: using string::back() instead as the C++11 recommend (`pr#14827 <https://github.com/ceph/ceph/pull/14827>`_, liuyuhong)
|
||||
* rgw: segment fault when shard id out of range (`issue#19732 <http://tracker.ceph.com/issues/19732>`_, `pr#14389 <https://github.com/ceph/ceph/pull/14389>`_, redickwang)
|
||||
* rgw: set object accounted size correctly (`issue#20071 <http://tracker.ceph.com/issues/20071>`_, `pr#14950 <https://github.com/ceph/ceph/pull/14950>`_, fang yuxiang)
|
||||
* rgw: set placement rule properly (`pr#15221 <https://github.com/ceph/ceph/pull/15221>`_, fang.yuxiang)
|
||||
* rgw: support certain archaic and antiquated distributions (`pr#15498 <https://github.com/ceph/ceph/pull/15498>`_, Adam C. Emerson)
|
||||
* rgw,tests: qa/rgw: add multisite suite to configure and run multisite tests (`pr#14688 <https://github.com/ceph/ceph/pull/14688>`_, Casey Bodley)
|
||||
* rgw,tests: qa/rgw: remove apache/fastcgi and radosgw-agent tests (`pr#15184 <https://github.com/ceph/ceph/pull/15184>`_, Casey Bodley)
|
||||
* rgw: Turn off fcgi as a frontend (`issue#16784 <http://tracker.ceph.com/issues/16784>`_, `pr#15070 <https://github.com/ceph/ceph/pull/15070>`_, Thomas Serlin)
|
||||
* rgw: use get_data_extra_pool() when get extra pool (`issue#20064 <http://tracker.ceph.com/issues/20064>`_, `pr#15219 <https://github.com/ceph/ceph/pull/15219>`_, fang yuxiang)
|
||||
* rgw: use pre-defined calls to replace raw flag operation (`pr#15107 <https://github.com/ceph/ceph/pull/15107>`_, Gui Hecheng)
|
||||
* tests: Add integration tests for admin socket output (`pr#15223 <https://github.com/ceph/ceph/pull/15223>`_, Brad Hubbard)
|
||||
* tests: ceph-disk: add setting for external py-modules for tox-testing (`pr#15433 <https://github.com/ceph/ceph/pull/15433>`_, Willem Jan Withagen)
|
||||
* tests: Check make_writeable() return value (`pr#15266 <https://github.com/ceph/ceph/pull/15266>`_, zhanglei)
|
||||
* tests: config_opts: drop unused opts (`pr#15031 <https://github.com/ceph/ceph/pull/15031>`_, Kefu Chai)
|
||||
* tests: qa: add task for dnsmasq configuration (`pr#15071 <https://github.com/ceph/ceph/pull/15071>`_, Casey Bodley)
|
||||
* tests: qa: split test_tiering into smaller pieces (`pr#15146 <https://github.com/ceph/ceph/pull/15146>`_, Kefu Chai)
|
||||
* tests: qa/suites/rados: fix ec thrashing (`pr#15087 <https://github.com/ceph/ceph/pull/15087>`_, Sage Weil)
|
||||
* tests: qa/suites/rados/singleton-nomsgr: fix syntax (`pr#15276 <https://github.com/ceph/ceph/pull/15276>`_, Sage Weil)
|
||||
* tests: qa/suites/rados/thrash: make sure osds have map before legacy scrub (`pr#15117 <https://github.com/ceph/ceph/pull/15117>`_, Sage Weil)
|
||||
* tests: qa/suites/rados/upgrade: restart mds (`pr#15517 <https://github.com/ceph/ceph/pull/15517>`_, Sage Weil)
|
||||
* tests: qa/tasks/ceph_manager: 'ceph $service tell ...' is obsolete (`pr#15252 <https://github.com/ceph/ceph/pull/15252>`_, Sage Weil)
|
||||
* tests: qa/tasks/rebuild_mondb: grant "mgr:allow *" to client.admin (`issue#19439 <http://tracker.ceph.com/issues/19439>`_, `pr#14284 <https://github.com/ceph/ceph/pull/14284>`_, Kefu Chai)
|
||||
* tests: qa/tasks/repair_test: unset flags we set (`pr#15296 <https://github.com/ceph/ceph/pull/15296>`_, Sage Weil)
|
||||
* tests: qa/workunits/ceph-helpers.sh: use syntax understood by jq 1.3 (`pr#15530 <https://github.com/ceph/ceph/pull/15530>`_, Kefu Chai)
|
||||
* tests: Rename FileJournal object to distinguish (`pr#15279 <https://github.com/ceph/ceph/pull/15279>`_, Jos Collin)
|
||||
* tests: test/crush: silence warnings from -Walloc-size-larger-than= and -Wstringop-overflow= (`pr#15173 <https://github.com/ceph/ceph/pull/15173>`_, Jos Collin)
|
||||
* tests: test: migrate atomic_t to std::atomic (`pr#14655 <https://github.com/ceph/ceph/pull/14655>`_, Jesse Williamson)
|
||||
* tests: test/msgr: silence warnings from -Wsign-compare (`pr#15356 <https://github.com/ceph/ceph/pull/15356>`_, Jos Collin)
|
||||
* tests: test/msgr: silence warnings from -Wsign-compare (`pr#15570 <https://github.com/ceph/ceph/pull/15570>`_, Jos Collin)
|
||||
* tests: test/objectstore: Check apply_transaction() return values (`pr#15171 <https://github.com/ceph/ceph/pull/15171>`_, zhanglei)
|
||||
* tests: test/old: Removed commented code (`pr#15366 <https://github.com/ceph/ceph/pull/15366>`_, Jos Collin)
|
||||
* tests: test/osdc: fix comparison error and silence warning from -Wunused-value (`pr#15353 <https://github.com/ceph/ceph/pull/15353>`_, Willem Jan Withagen)
|
||||
* tests: test: osd/TestOSDMap.cc: fix Clang complain about promotion (`pr#15525 <https://github.com/ceph/ceph/pull/15525>`_, Willem Jan Withagen)
|
||||
* tests: test: test_denc.cc: silence warning from -Wsign-compare (`pr#15355 <https://github.com/ceph/ceph/pull/15355>`_, Jos Collin)
|
||||
* tests: test: Test fix for SnapSet change (`pr#15161 <https://github.com/ceph/ceph/pull/15161>`_, David Zafman)
|
||||
* tests: test/unittest_bluefs: check whether mounted success (`pr#14988 <https://github.com/ceph/ceph/pull/14988>`_, shiqi)
|
||||
* tools: ceph.in: adjust usage width according to user's tty (`pr#15190 <https://github.com/ceph/ceph/pull/15190>`_, Kefu Chai)
|
||||
* tools: ceph.in: assert(state==connected) before help_for_target() (`pr#15156 <https://github.com/ceph/ceph/pull/15156>`_, Kefu Chai)
|
||||
* tools: ceph.in: drop the compatiiblity to handle non json commands (`pr#15508 <https://github.com/ceph/ceph/pull/15508>`_, Kefu Chai)
|
||||
* tools: ceph.in: print return code when json_command failed (`pr#15378 <https://github.com/ceph/ceph/pull/15378>`_, liuchang0812)
|
||||
* tools: tools/ceph_kvstore_tool: add "bluestore-kv" to usage (`pr#15326 <https://github.com/ceph/ceph/pull/15326>`_, xie xingguo)
|
||||
* tools: tools/crushtool: replicated-rule API support (`pr#15011 <https://github.com/ceph/ceph/pull/15011>`_, xie xingguo)
|
||||
* tools: vstart: "debug_ms=1" for mgr by default (`pr#15127 <https://github.com/ceph/ceph/pull/15127>`_, Kefu Chai)
|
||||
* tools: vstart: print "start osd.$id" instead of "start osd$id" (`pr#15427 <https://github.com/ceph/ceph/pull/15427>`_, Kefu Chai)
|
||||
|
||||
v12.0.3 Luminous (dev)
|
||||
======================
|
||||
|
||||
This is the fourth development checkpoint release of Luminous, the next long
|
||||
term stable release. This release introduces several improvements in bluestore,
|
||||
monitor, rbd & rgw.
|
||||
|
||||
Major changes from v12.0.2
|
||||
--------------------------
|
||||
|
||||
Notable Changes
|
||||
---------------
|
||||
* bluestore,core: osd/OSDMap: should update input param if osd dne (`pr#14863 <https://github.com/ceph/ceph/pull/14863>`_, Kefu Chai)
|
||||
* bluestore: include/intarith: templatize ctz/clz/cbits helpers (`pr#14862 <https://github.com/ceph/ceph/pull/14862>`_, Kefu Chai)
|
||||
* bluestore: os/bluestore: align reclaim size to bluefs_alloc_size (`pr#14744 <https://github.com/ceph/ceph/pull/14744>`_, Haomai Wang)
|
||||
* bluestore: os/bluestore: assert blob map returns success (`pr#14473 <https://github.com/ceph/ceph/pull/14473>`_, shiqi)
|
||||
* bluestore: os/bluestore: fix deferred write race (`issue#19880 <http://tracker.ceph.com/issues/19880>`_, `pr#15004 <https://github.com/ceph/ceph/pull/15004>`_, Sage Weil)
|
||||
* bluestore: os/bluestore: fix typo(s/trasnaction/transaction/) (`pr#14890 <https://github.com/ceph/ceph/pull/14890>`_, xie xingguo)
|
||||
* bluestore: os/bluestore: fix use after free race with aio_wait (`pr#14956 <https://github.com/ceph/ceph/pull/14956>`_, Sage Weil)
|
||||
* bluestore: os/bluestore: pre-calculate number of ghost buffers to evict (`pr#15029 <https://github.com/ceph/ceph/pull/15029>`_, xie xingguo)
|
||||
* bluestore: os/bluestore: Record l_bluestore_state_kv_queued_lat for sync_submit_… (`pr#14448 <https://github.com/ceph/ceph/pull/14448>`_, Jianpeng Ma)
|
||||
* bluestore: os/bluestore: Remove ExtentFreeListManager. (`pr#14772 <https://github.com/ceph/ceph/pull/14772>`_, Jianpeng Ma)
|
||||
* bluestore: os/bluestore: remove unused condition variable (`pr#14973 <https://github.com/ceph/ceph/pull/14973>`_, Igor Fedotov)
|
||||
* bluestore: os/bluestore: rename/fix throttle options (`pr#14717 <https://github.com/ceph/ceph/pull/14717>`_, Sage Weil)
|
||||
* bluestore: os/bluestore: roundoff bluefs allocs to bluefs_alloc_size (`pr#14876 <https://github.com/ceph/ceph/pull/14876>`_, Ramesh Chander)
|
||||
* bluestore: os/bluestore: use correct bound encode size for unused (`pr#14731 <https://github.com/ceph/ceph/pull/14731>`_, Haomai Wang)
|
||||
* bluestore,performance: common/config_opts.h: compaction readahead for bluestore/rocksdb (`pr#14932 <https://github.com/ceph/ceph/pull/14932>`_, Mark Nelson)
|
||||
* bluestore,performance: os/bluestore/BlueFS: add bluefs_sync_write option (`pr#14510 <https://github.com/ceph/ceph/pull/14510>`_, Sage Weil)
|
||||
* bluestore,performance: os/bluestore: do not balance bluefs on every kv_sync_thread iteration (`pr#14557 <https://github.com/ceph/ceph/pull/14557>`_, Sage Weil)
|
||||
* bluestore,performance: os/bluestore: eliminate some excessive stuff (`pr#14675 <https://github.com/ceph/ceph/pull/14675>`_, Igor Fedotov)
|
||||
* bluestore,performance: os/bluestore: get rid off excessive lock at BitMapAllocator (`pr#14749 <https://github.com/ceph/ceph/pull/14749>`_, Igor Fedotov)
|
||||
* bluestore,performance: os/blueStore: In osd_tp_thread, call _txc_finalize_kv. (`pr#14709 <https://github.com/ceph/ceph/pull/14709>`_, Jianpeng Ma)
|
||||
* bluestore,performance: os/bluestore: make bluestore_max_blob_size parameter hdd/ssd case dependant (`pr#14434 <https://github.com/ceph/ceph/pull/14434>`_, Igor Fedotov)
|
||||
* bluestore,performance: os/bluestore: rewrite deferred write handling (`issue#16644 <http://tracker.ceph.com/issues/16644>`_, `pr#14491 <https://github.com/ceph/ceph/pull/14491>`_, Sage Weil)
|
||||
* bluestore,performance: os/bluestore: use denc for varint encoding (`pr#14911 <https://github.com/ceph/ceph/pull/14911>`_, Piotr Dałek)
|
||||
* bluestore,performance: os/fs/aio: use small_vector for aio_t; clean up header location (`pr#14853 <https://github.com/ceph/ceph/pull/14853>`_, Sage Weil)
|
||||
* bluestore,tests: unittest_alloc: add test_alloc_big (`issue#16662 <http://tracker.ceph.com/issues/16662>`_, `pr#14844 <https://github.com/ceph/ceph/pull/14844>`_, Sage Weil)
|
||||
* bluestore,tools: ceph-kvstore-tool: allow 'bluestore-kv' as kvdb type; add escaping, compaction (`pr#14718 <https://github.com/ceph/ceph/pull/14718>`_, Sage Weil)
|
||||
* build/ops: alpine: add alpine linux dev support (`pr#9853 <https://github.com/ceph/ceph/pull/9853>`_, John Coyle)
|
||||
* build/ops: arch: use __get_cpuid instead of do_cpuid (`issue#7869 <http://tracker.ceph.com/issues/7869>`_, `pr#14857 <https://github.com/ceph/ceph/pull/14857>`_, Jos Collin)
|
||||
* build/ops: CMakeLists.txt: don't do crypto/isa-l if not Intel (`pr#14721 <https://github.com/ceph/ceph/pull/14721>`_, Dan Mick)
|
||||
* build/ops: compressor/zlib: fix plugin for non-Intel arches (`pr#14947 <https://github.com/ceph/ceph/pull/14947>`_, Dan Mick)
|
||||
* build/ops: debian/rpm: move radosgw-admin to ceph-common (`issue#19577 <http://tracker.ceph.com/issues/19577>`_, `pr#14940 <https://github.com/ceph/ceph/pull/14940>`_, Ali Maredia)
|
||||
* build/ops: dmclock: error: ‘function’ in namespace ‘std’ does not name a template type (`pr#14909 <https://github.com/ceph/ceph/pull/14909>`_, Jos Collin)
|
||||
* build/ops: dmclock: initial commit of dmclock QoS library (`pr#14330 <https://github.com/ceph/ceph/pull/14330>`_, J. Eric Ivancich)
|
||||
* build/ops: init-ceph: add ceph libraries path to environment (`pr#14693 <https://github.com/ceph/ceph/pull/14693>`_, Mohamad Gebai)
|
||||
* build/ops: init-ceph: should have a space before "]" (`pr#14796 <https://github.com/ceph/ceph/pull/14796>`_, Kefu Chai)
|
||||
* build/ops: merge v12.0.2 release tag (`pr#15091 <https://github.com/ceph/ceph/pull/15091>`_, Jenkins Build Slave User)
|
||||
* build/ops,mgr: debian/ceph-base.dirs: create bootstrap-mgr dirs (`pr#14838 <https://github.com/ceph/ceph/pull/14838>`_, Sage Weil)
|
||||
* build/ops,mon: mon/ConfigKeyService: add 'config-key dump' to show keys and vals (`pr#14858 <https://github.com/ceph/ceph/pull/14858>`_, Dan Mick)
|
||||
* build/ops,performance,rbd: byteorder: use gcc intrinsics for byteswap (`pr#15012 <https://github.com/ceph/ceph/pull/15012>`_, Kefu Chai)
|
||||
* build/ops: rocksdb: sync with upstream (`pr#14818 <https://github.com/ceph/ceph/pull/14818>`_, Nathan Cutler, Kefu Chai)
|
||||
* build/ops: rpm: fix python-Sphinx package name for SUSE (`pr#15015 <https://github.com/ceph/ceph/pull/15015>`_, Nathan Cutler, Jan Matejek)
|
||||
* build/ops: rpm: gperftools-devel >= 2.4 (`issue#13522 <http://tracker.ceph.com/issues/13522>`_, `pr#14870 <https://github.com/ceph/ceph/pull/14870>`_, Nathan Cutler)
|
||||
* build/ops: rpm: package crypto on x86_64 only (`pr#14779 <https://github.com/ceph/ceph/pull/14779>`_, Nathan Cutler)
|
||||
* build/ops: debian: package crypto plugin only on amd64 (`pr#14820 <https://github.com/ceph/ceph/pull/14820>`_, Kefu Chai)
|
||||
* build/ops: src/init-ceph.in: allow one((re)?start|stop) as commands (`pr#14560 <https://github.com/ceph/ceph/pull/14560>`_, Willem Jan Withagen)
|
||||
* build/ops: yasm-wrapper: strip -E (stops ccache trashing source files) (`pr#14633 <https://github.com/ceph/ceph/pull/14633>`_, Tim Serong)
|
||||
* cephfs: ceph-fuse: use user space permission check by default (`issue#19820 <http://tracker.ceph.com/issues/19820>`_, `pr#14907 <https://github.com/ceph/ceph/pull/14907>`_, "Yan, Zheng")
|
||||
* cephfs: client: client_quota no longer optional (`pr#14978 <https://github.com/ceph/ceph/pull/14978>`_, Dan van der Ster)
|
||||
* cephfs: client: fix UserPerm::gid_in_group() (`issue#19903 <http://tracker.ceph.com/issues/19903>`_, `pr#15039 <https://github.com/ceph/ceph/pull/15039>`_, "Yan, Zheng")
|
||||
* cephfs: client: getattr before returning quota/layout xattrs (`issue#17939 <http://tracker.ceph.com/issues/17939>`_, `pr#14018 <https://github.com/ceph/ceph/pull/14018>`_, John Spray)
|
||||
* cephfs: fs/ceph-fuse: normalize file open flags on the wire (`pr#14822 <https://github.com/ceph/ceph/pull/14822>`_, Jan Fajerski)
|
||||
* cephfs: mds/Server.cc: Don't evict a slow client if... (`issue#17855 <http://tracker.ceph.com/issues/17855>`_, `pr#12935 <https://github.com/ceph/ceph/pull/12935>`_, Michal Jarzabek)
|
||||
* cephfs: osdc/Filer: truncate large file party by party (`issue#19755 <http://tracker.ceph.com/issues/19755>`_, `pr#14769 <https://github.com/ceph/ceph/pull/14769>`_, "Yan, Zheng")
|
||||
* cephfs: osdc: remove journaler_allow_split_entries option (`issue#19691 <http://tracker.ceph.com/issues/19691>`_, `pr#14636 <https://github.com/ceph/ceph/pull/14636>`_, John Spray)
|
||||
* cephfs,performance: client: make seeky readdir more efficiency (`issue#19306 <http://tracker.ceph.com/issues/19306>`_, `pr#14317 <https://github.com/ceph/ceph/pull/14317>`_, "Yan, Zheng")
|
||||
* cephfs: qa/cephfs: Fix for test_data_scan (`issue#19893 <http://tracker.ceph.com/issues/19893>`_, `pr#15094 <https://github.com/ceph/ceph/pull/15094>`_, Douglas Fuller)
|
||||
* cephfs: qa/suites/fs: reserve more space for mds in full tests (`issue#19891 <http://tracker.ceph.com/issues/19891>`_, `pr#15026 <https://github.com/ceph/ceph/pull/15026>`_, "Yan, Zheng")
|
||||
* cephfs,tests: qa: silence spurious insufficient standby health warnings (`pr#15035 <https://github.com/ceph/ceph/pull/15035>`_, Patrick Donnelly)
|
||||
* cephfs,tests: qa: Tidy up fs/ suite (`pr#14575 <https://github.com/ceph/ceph/pull/14575>`_, John Spray)
|
||||
* cleanup: dmclock: include missing <functional> header. (`pr#14923 <https://github.com/ceph/ceph/pull/14923>`_, Jos Collin)
|
||||
* cleanup: kill clang warnings (`pr#14549 <https://github.com/ceph/ceph/pull/14549>`_, Kefu Chai)
|
||||
* cleanup: test: c_write_operations.cc: silence warning from -Wsign-compare (`pr#14889 <https://github.com/ceph/ceph/pull/14889>`_, Jos Collin)
|
||||
* cmake: add "container" to required boost components (`pr#14850 <https://github.com/ceph/ceph/pull/14850>`_, Kefu Chai)
|
||||
* cmake: align cmake names of library packages (`issue#19853 <http://tracker.ceph.com/issues/19853>`_, `pr#14951 <https://github.com/ceph/ceph/pull/14951>`_, Nathan Cutler)
|
||||
* cmake: Allow tests to build without NSS (`pr#13315 <https://github.com/ceph/ceph/pull/13315>`_, Daniel Gryniewicz)
|
||||
* cmake: do not compile crush twice (`pr#14725 <https://github.com/ceph/ceph/pull/14725>`_, Kefu Chai)
|
||||
* cmake: enable cross-compilation of boost (`issue#18938 <http://tracker.ceph.com/issues/18938>`_, `pr#14881 <https://github.com/ceph/ceph/pull/14881>`_, Kefu Chai)
|
||||
* cmake: fix the linked lib reference of unittest_rgw_crypto (`pr#14869 <https://github.com/ceph/ceph/pull/14869>`_, Willem Jan Withagen)
|
||||
* cmake: kill duplicated cmake commands (`pr#14948 <https://github.com/ceph/ceph/pull/14948>`_, liuchang0812)
|
||||
* cmake: pass -d0 to b2 if not CMAKE_VERBOSE_MAKEFILE (`pr#14651 <https://github.com/ceph/ceph/pull/14651>`_, Kefu Chai)
|
||||
* cmake: should not compile crc32c_ppc.c on intel arch. (`pr#14423 <https://github.com/ceph/ceph/pull/14423>`_, Kefu Chai)
|
||||
* common: Better handling for missing/inaccessible ceph.conf files (`issue#19658 <http://tracker.ceph.com/issues/19658>`_, `pr#14757 <https://github.com/ceph/ceph/pull/14757>`_, Dan Mick)
|
||||
* common: cls/log/cls_log.cc: reduce logging noise (`issue#19835 <http://tracker.ceph.com/issues/19835>`_, `pr#14879 <https://github.com/ceph/ceph/pull/14879>`_, Willem Jan Withagen)
|
||||
* common: common/admin_socket: add config for admin socket permission bits (`pr#11684 <https://github.com/ceph/ceph/pull/11684>`_, runsisi)
|
||||
* common: common/ceph_context.cc: Use CEPH_DEV to reduce logfile noise (`pr#10384 <https://github.com/ceph/ceph/pull/10384>`_, Willem Jan Withagen)
|
||||
* common: common/config: Add /usr/local/etc/ceph to default paths (`pr#14797 <https://github.com/ceph/ceph/pull/14797>`_, Willem Jan Withagen)
|
||||
* common: common/config_opts: Set the HDD throttle cost to 1.5M (`pr#14808 <https://github.com/ceph/ceph/pull/14808>`_, Mark Nelson)
|
||||
* common: common/Finisher: fix uninitialized variable warning (`pr#14958 <https://github.com/ceph/ceph/pull/14958>`_, Piotr Dałek)
|
||||
* common: common/interval_set: return int64_t for size() (`pr#12898 <https://github.com/ceph/ceph/pull/12898>`_, Xinze Chi)
|
||||
* common: crypto: cleanup NSPR in main thread (`pr#14801 <https://github.com/ceph/ceph/pull/14801>`_, Kefu Chai)
|
||||
* common: fix building against libcryptopp (`pr#14949 <https://github.com/ceph/ceph/pull/14949>`_, Shengjing Zhu)
|
||||
* common: Fix unused variable references warnings (`pr#14790 <https://github.com/ceph/ceph/pull/14790>`_, Willem Jan Withagen)
|
||||
* common: msg/async: return right away in NetHandler::set_priority() if not supported (`pr#14795 <https://github.com/ceph/ceph/pull/14795>`_, Kefu Chai)
|
||||
* common: osdc/Objecter: fix pool dne corner case (`issue#19552 <http://tracker.ceph.com/issues/19552>`_, `pr#14901 <https://github.com/ceph/ceph/pull/14901>`_, Sage Weil)
|
||||
* common: osdc/Objecter: handle command target that goes down (`issue#19452 <http://tracker.ceph.com/issues/19452>`_, `pr#14302 <https://github.com/ceph/ceph/pull/14302>`_, Sage Weil)
|
||||
* common: osd/OSDMap: fix feature commit comment (`pr#15056 <https://github.com/ceph/ceph/pull/15056>`_, Sage Weil)
|
||||
* common,performance: common/Finisher: batch handle perfcounter && only send signal when waiter existed. (`pr#14363 <https://github.com/ceph/ceph/pull/14363>`_, Jianpeng Ma)
|
||||
* common: Remove redundant includes (`issue#19883 <http://tracker.ceph.com/issues/19883>`_, `pr#15003 <https://github.com/ceph/ceph/pull/15003>`_, Brad Hubbard)
|
||||
* common: Remove redundant includes (`issue#19883 <http://tracker.ceph.com/issues/19883>`_, `pr#15019 <https://github.com/ceph/ceph/pull/15019>`_, Brad Hubbard)
|
||||
* common: src/common: change last_work_queue to next_work_queue. (`pr#14738 <https://github.com/ceph/ceph/pull/14738>`_, Pan Liu)
|
||||
* core: ceph-disk: ceph-disk on FreeBSD should not use mpath-code (`pr#14837 <https://github.com/ceph/ceph/pull/14837>`_, Willem Jan Withagen)
|
||||
* core: ceph-disk: implement prepare --no-locking (`pr#14728 <https://github.com/ceph/ceph/pull/14728>`_, Dan van der Ster, Loic Dachary)
|
||||
* core: ceph-disk: separate ceph-osd --check-needs-* logs (`issue#19888 <http://tracker.ceph.com/issues/19888>`_, `pr#15016 <https://github.com/ceph/ceph/pull/15016>`_, Loic Dachary)
|
||||
* core: erasure-code: sync jerasure/gf-complete submodules (`pr#14424 <https://github.com/ceph/ceph/pull/14424>`_, Loic Dachary)
|
||||
* core: introduce DirectMessenger (`pr#14755 <https://github.com/ceph/ceph/pull/14755>`_, Casey Bodley, Matt Benjamin)
|
||||
* core: messages: remove compat cruft (`pr#14475 <https://github.com/ceph/ceph/pull/14475>`_, Sage Weil)
|
||||
* core,mon: common/Finisher: fix uninitialized variable warning (`issue#19874 <http://tracker.ceph.com/issues/19874>`_, `pr#14979 <https://github.com/ceph/ceph/pull/14979>`_, Sage Weil)
|
||||
* core: mon,osd: add require_min_compat_client setting to enforce and clarify client compatibility (`pr#14959 <https://github.com/ceph/ceph/pull/14959>`_, Sage Weil)
|
||||
* core: mon/OSDMonitor: skip prime_pg_temp if mapping is prior to osdmap (`pr#14826 <https://github.com/ceph/ceph/pull/14826>`_, Kefu Chai)
|
||||
* core: mon/PGMonitor: fix wrongly report "pg stuck in inactive" (`pr#14391 <https://github.com/ceph/ceph/pull/14391>`_, Mingxin Liu)
|
||||
* core: osd,librados: cmpext support (`pr#14715 <https://github.com/ceph/ceph/pull/14715>`_, Zhengyong Wang, David Disseldorp, Mike Christie)
|
||||
* core: osd/OSDMap: bump encoding version for require_min_compat_client (`pr#15046 <https://github.com/ceph/ceph/pull/15046>`_, "Yan, Zheng")
|
||||
* core: osd/PG.cc: Optimistic estimation on PG.last_active (`pr#14799 <https://github.com/ceph/ceph/pull/14799>`_, Xiaoxi Chen)
|
||||
* core: osd/PG.cc: unify the call of checking whether lock is held (`pr#15013 <https://github.com/ceph/ceph/pull/15013>`_, Jin Cai)
|
||||
* core: osd/PG: fix possible overflow on unfound objects (`pr#12669 <https://github.com/ceph/ceph/pull/12669>`_, huangjun)
|
||||
* core: osd/PrimaryLogPG: do not call on_shutdown() if (pg.deleting) (`issue#19902 <http://tracker.ceph.com/issues/19902>`_, `pr#15040 <https://github.com/ceph/ceph/pull/15040>`_, Kefu Chai)
|
||||
* core: osd/PrimayLogPG: update modified range to include the whole object size for write_full op (`pr#15021 <https://github.com/ceph/ceph/pull/15021>`_, runsisi)
|
||||
* core: osd/ReplicatedBackend: remove MOSDSubOp cruft from repop_applied (`pr#14358 <https://github.com/ceph/ceph/pull/14358>`_, Jianpeng Ma)
|
||||
* core: os/filestore/FileJournal: Fix typo in the comment (`pr#14493 <https://github.com/ceph/ceph/pull/14493>`_, Zhou Zhengping)
|
||||
* core: os/filestore: fix infinit loops in fiemap() (`pr#14367 <https://github.com/ceph/ceph/pull/14367>`_, Ning Yao)
|
||||
* core,performance: osd, os: reduce fiemap burden (`pr#14640 <https://github.com/ceph/ceph/pull/14640>`_, Piotr Dałek)
|
||||
* core,performance: os/filestore: use new sleep strategy when io_submit gets EAGAIN. (`pr#14860 <https://github.com/ceph/ceph/pull/14860>`_, Pan Liu)
|
||||
* core,performance: os/kstore: Added rocksdb bloom filter settings (`pr#13053 <https://github.com/ceph/ceph/pull/13053>`_, Ted-Chang)
|
||||
* core,tests: ceph_test_rados_api_watch_notify: make LibRadosWatchNotify.Watch3Timeout tolerate thrashing (`issue#19433 <http://tracker.ceph.com/issues/19433>`_, `pr#14899 <https://github.com/ceph/ceph/pull/14899>`_, Sage Weil)
|
||||
* core,tools: ceph: perfcounter priorities and daemonperf updates to use them (`pr#14793 <https://github.com/ceph/ceph/pull/14793>`_, Sage Weil, Dan Mick)
|
||||
* core,tools: kv: move 'bluestore-kv' hackery out of KeyValueDB into ceph-kvstore-tool (`issue#19778 <http://tracker.ceph.com/issues/19778>`_, `pr#14895 <https://github.com/ceph/ceph/pull/14895>`_, Sage Weil)
|
||||
* crush: builder: legacy has chooseleaf_stable = 0 (`pr#14695 <https://github.com/ceph/ceph/pull/14695>`_, Loic Dachary)
|
||||
* crush: crush_init_workspace starts with struct crush_work (`pr#14696 <https://github.com/ceph/ceph/pull/14696>`_, Loic Dachary)
|
||||
* crush: update documentation for negative choose step (`pr#14970 <https://github.com/ceph/ceph/pull/14970>`_, Loic Dachary)
|
||||
* doc: AUTHORS: update tech leads (`pr#14350 <https://github.com/ceph/ceph/pull/14350>`_, Patrick Donnelly)
|
||||
* doc: correct and improve add user capability section (`pr#14055 <https://github.com/ceph/ceph/pull/14055>`_, Chu, Hua-Rong)
|
||||
* doc: Correcting the remove bucket example and adding bucket link/unlink examples (`pr#12460 <https://github.com/ceph/ceph/pull/12460>`_, Uday Mullangi)
|
||||
* doc: dev add a note about ccache (`pr#14478 <https://github.com/ceph/ceph/pull/14478>`_, Abhishek Lekshmanan)
|
||||
* doc: doc/dev: add some info about FreeBSD (`pr#14503 <https://github.com/ceph/ceph/pull/14503>`_, Willem Jan Withagen)
|
||||
* doc: fio: update README.md so only the fio ceph engine is built (`pr#15081 <https://github.com/ceph/ceph/pull/15081>`_, Kefu Chai)
|
||||
* doc: fix link that pointed to a nonexistent file (`pr#14740 <https://github.com/ceph/ceph/pull/14740>`_, Peter Maloney)
|
||||
* doc: Indicate how to add multiple admin capbabilies (`pr#13956 <https://github.com/ceph/ceph/pull/13956>`_, Chu, Hua-Rong)
|
||||
* doc: mailmap: add Alibaba into organization map (`pr#14900 <https://github.com/ceph/ceph/pull/14900>`_, James Liu)
|
||||
* doc: mailmap: update organization info (`pr#14747 <https://github.com/ceph/ceph/pull/14747>`_, liuchang0812)
|
||||
* doc: mailmap: V12.0.1 credits (`pr#14479 <https://github.com/ceph/ceph/pull/14479>`_, M Ranga Swami Reddy)
|
||||
* doc: mailmap: Weibing Zhang mailmap affiliation (`pr#15076 <https://github.com/ceph/ceph/pull/15076>`_, Weibing Zhang)
|
||||
* doc: mailmap: ztczll affiliation (`pr#15079 <https://github.com/ceph/ceph/pull/15079>`_, zhanglei)
|
||||
* doc: mention teuthology-worker security group (`pr#14748 <https://github.com/ceph/ceph/pull/14748>`_, Nathan Cutler)
|
||||
* doc: peoplemap: add pdonnell alias (`pr#14352 <https://github.com/ceph/ceph/pull/14352>`_, Patrick Donnelly)
|
||||
* doc: remove deprecated subcommand in man/8/ceph.rst (`pr#14928 <https://github.com/ceph/ceph/pull/14928>`_, Drunkard Zhang)
|
||||
* doc: Re-word the warnings about using git subtrees. (`pr#14999 <https://github.com/ceph/ceph/pull/14999>`_, J. Eric Ivancich)
|
||||
* doc: rgw: Clean up create subuser parameters (`pr#14335 <https://github.com/ceph/ceph/pull/14335>`_, hrchu)
|
||||
* doc: rgw: correct get usage parameter default value (`pr#14372 <https://github.com/ceph/ceph/pull/14372>`_, hrchu)
|
||||
* doc: rgw: Get user usage needs to specify user (`pr#14804 <https://github.com/ceph/ceph/pull/14804>`_, hrchu)
|
||||
* doc: rgw: note rgw_enable_usage_log option in adminops guide (`pr#14803 <https://github.com/ceph/ceph/pull/14803>`_, hrchu)
|
||||
* doc: rgw: Rewrite Java swift examples (`pr#14268 <https://github.com/ceph/ceph/pull/14268>`_, Chu, Hua-Rong)
|
||||
* doc: rgw: Rewrite the key management (`pr#14384 <https://github.com/ceph/ceph/pull/14384>`_, hrchu)
|
||||
* doc: style fix for doc/cephfs/client-config-ref.rst (`pr#14840 <https://github.com/ceph/ceph/pull/14840>`_, Drunkard Zhang)
|
||||
* doc: two minor fixes (`pr#14494 <https://github.com/ceph/ceph/pull/14494>`_, Drunkard Zhang)
|
||||
* doc: update the support status of swift static website (`pr#13824 <https://github.com/ceph/ceph/pull/13824>`_, Jing Wenjun)
|
||||
* doc: v12.0.2 (dev) release notes (`pr#14625 <https://github.com/ceph/ceph/pull/14625>`_, Abhishek Lekshmanan)
|
||||
* librados: fix rados_pool_list when buf is null (`pr#14859 <https://github.com/ceph/ceph/pull/14859>`_, Sage Weil)
|
||||
* librbd: cleanup logging code under librbd/io (`pr#14975 <https://github.com/ceph/ceph/pull/14975>`_, runsisi)
|
||||
* librbd: do not instantiate templates while building tests (`issue#18938 <http://tracker.ceph.com/issues/18938>`_, `pr#14891 <https://github.com/ceph/ceph/pull/14891>`_, Kefu Chai)
|
||||
* librbd: minor fixes for image trash move (`pr#14834 <https://github.com/ceph/ceph/pull/14834>`_, runsisi)
|
||||
* librbd: remove redundant check for image id emptiness (`pr#14830 <https://github.com/ceph/ceph/pull/14830>`_, runsisi)
|
||||
* librbd: silence -Wunused-variable warning (`pr#14953 <https://github.com/ceph/ceph/pull/14953>`_, Kefu Chai)
|
||||
* mds: add perf counters for file system operations (`pr#14938 <https://github.com/ceph/ceph/pull/14938>`_, Michael Sevilla)
|
||||
* mds: change_attr++ and set ctime for set_vxattr (`issue#19583 <http://tracker.ceph.com/issues/19583>`_, `pr#14726 <https://github.com/ceph/ceph/pull/14726>`_, Patrick Donnelly)
|
||||
* mds: fix mantle script to not fail for last rank (`issue#19589 <http://tracker.ceph.com/issues/19589>`_, `pr#14704 <https://github.com/ceph/ceph/pull/14704>`_, Patrick Donnelly)
|
||||
* mds: fix use-after-free in Locker::file_update_finish() (`issue#19828 <http://tracker.ceph.com/issues/19828>`_, `pr#14991 <https://github.com/ceph/ceph/pull/14991>`_, "Yan, Zheng")
|
||||
* mds: issue new caps when sending reply to client (`issue#19635 <http://tracker.ceph.com/issues/19635>`_, `pr#14743 <https://github.com/ceph/ceph/pull/14743>`_, "Yan, Zheng")
|
||||
* mds: relocate PTRWAITER put near get (`pr#14921 <https://github.com/ceph/ceph/pull/14921>`_, Patrick Donnelly)
|
||||
* mds: remove boost::pool usage and use tcmalloc directly (`issue#18425 <http://tracker.ceph.com/issues/18425>`_, `pr#12792 <https://github.com/ceph/ceph/pull/12792>`_, Zhi Zhang)
|
||||
* mds: remove "mds log" config option (`issue#18816 <http://tracker.ceph.com/issues/18816>`_, `pr#14652 <https://github.com/ceph/ceph/pull/14652>`_, John Spray)
|
||||
* mds: support export pinning on directories (`issue#17834 <http://tracker.ceph.com/issues/17834>`_, `pr#14598 <https://github.com/ceph/ceph/pull/14598>`_, "Yan, Zheng", Patrick Donnelly)
|
||||
* mds: use debug_mds for most subsys (`issue#19734 <http://tracker.ceph.com/issues/19734>`_, `pr#15052 <https://github.com/ceph/ceph/pull/15052>`_, Patrick Donnelly)
|
||||
* mgr: add machinery for python modules to send MCommands to daemons (`pr#14920 <https://github.com/ceph/ceph/pull/14920>`_, John Spray)
|
||||
* mgr: add mgr allow * to client.admin (`pr#14864 <https://github.com/ceph/ceph/pull/14864>`_, huanwen ren)
|
||||
* mgr: do shutdown using finisher so we can do it in the right order (`issue#19743 <http://tracker.ceph.com/issues/19743>`_, `pr#14835 <https://github.com/ceph/ceph/pull/14835>`_, Kefu Chai)
|
||||
* mgr: do the shutdown in the right order (`issue#19813 <http://tracker.ceph.com/issues/19813>`_, `pr#14952 <https://github.com/ceph/ceph/pull/14952>`_, Kefu Chai)
|
||||
* mgr: fix crash on set_config from python module with insufficient caps (`issue#19629 <http://tracker.ceph.com/issues/19629>`_, `pr#14706 <https://github.com/ceph/ceph/pull/14706>`_, Tim Serong)
|
||||
* mgr: fix metadata handling from old MDS daemons (`pr#14161 <https://github.com/ceph/ceph/pull/14161>`_, John Spray)
|
||||
* mgr: fix python module teardown & add tests (`issue#19407 <http://tracker.ceph.com/issues/19407>`_, `issue#19412 <http://tracker.ceph.com/issues/19412>`_, `issue#19258 <http://tracker.ceph.com/issues/19258>`_, `pr#14232 <https://github.com/ceph/ceph/pull/14232>`_, John Spray)
|
||||
* mgr: fix session leak (`issue#19591 <http://tracker.ceph.com/issues/19591>`_, `pr#14720 <https://github.com/ceph/ceph/pull/14720>`_, Sage Weil)
|
||||
* mgr: Misc. bug fixes (`issue#18994 <http://tracker.ceph.com/issues/18994>`_, `pr#14883 <https://github.com/ceph/ceph/pull/14883>`_, John Spray)
|
||||
* mgr: mkdir bootstrap-mgr (`pr#14824 <https://github.com/ceph/ceph/pull/14824>`_, huanwen ren)
|
||||
* mgr,mon: mon/MgrMonitor: only propose if we updated (`pr#14645 <https://github.com/ceph/ceph/pull/14645>`_, Sage Weil)
|
||||
* mgr,mon: mon,mgr: trim osdmap without the help of pgmap (`pr#14504 <https://github.com/ceph/ceph/pull/14504>`_, Kefu Chai)
|
||||
* mgr: pybind/mgr/rest: completely terminate cherrypy in shutdown (`pr#14995 <https://github.com/ceph/ceph/pull/14995>`_, Tim Serong)
|
||||
* mgr: redirect python stdout,stderr to ceph log (`pr#14189 <https://github.com/ceph/ceph/pull/14189>`_, Kefu Chai, Tim Serong, Dan Mick)
|
||||
* mgr: release allocated PyString (`pr#14716 <https://github.com/ceph/ceph/pull/14716>`_, Kefu Chai)
|
||||
* mgr: remove non-existent MDS daemons from FSMap (`issue#17453 <http://tracker.ceph.com/issues/17453>`_, `pr#14937 <https://github.com/ceph/ceph/pull/14937>`_, Spandan Kumar Sahu)
|
||||
* mgr,tests: qa/suites: move mgr tests into rados suite (`pr#14687 <https://github.com/ceph/ceph/pull/14687>`_, John Spray)
|
||||
* mgr: various cleanups (`pr#14802 <https://github.com/ceph/ceph/pull/14802>`_, Kefu Chai)
|
||||
* mon: check is_shutdown() in timer callbacks (`issue#19825 <http://tracker.ceph.com/issues/19825>`_, `pr#14919 <https://github.com/ceph/ceph/pull/14919>`_, Kefu Chai)
|
||||
* mon: do not prime_pg_temp creating pgs; clean up pg create conditions (`issue#19826 <http://tracker.ceph.com/issues/19826>`_, `pr#14913 <https://github.com/ceph/ceph/pull/14913>`_, Sage Weil)
|
||||
* mon: don't call propose_pending in prepare_update() (`issue#19738 <http://tracker.ceph.com/issues/19738>`_, `pr#14711 <https://github.com/ceph/ceph/pull/14711>`_, John Spray)
|
||||
* mon: logclient: use the seq id of the 1st log entry when resetting session (`issue#19427 <http://tracker.ceph.com/issues/19427>`_, `pr#14927 <https://github.com/ceph/ceph/pull/14927>`_, Kefu Chai)
|
||||
* mon: mon/OSDMonitor: check get()'s return value instead of bl's length (`pr#14805 <https://github.com/ceph/ceph/pull/14805>`_, Kefu Chai)
|
||||
* mon: mon/OSDMonitor: cleanup pending_created_pgs after done with it (`pr#14898 <https://github.com/ceph/ceph/pull/14898>`_, Kefu Chai)
|
||||
* mon: mon/OSDMonitor: do not alter the "created" epoch of a pg (`issue#19787 <http://tracker.ceph.com/issues/19787>`_, `pr#14849 <https://github.com/ceph/ceph/pull/14849>`_, Kefu Chai)
|
||||
* mon: mon/OSDMonitor: fix output func name in can_mark_out (`pr#14758 <https://github.com/ceph/ceph/pull/14758>`_, xie xingguo)
|
||||
* mon: mon/OSDMonitor: increase last_epoch_clean's lower bound if possible (`pr#14855 <https://github.com/ceph/ceph/pull/14855>`_, Kefu Chai)
|
||||
* mon: mon/OSDMonitor: tolerate upgrade from post-kraken dev cluster (`pr#14442 <https://github.com/ceph/ceph/pull/14442>`_, Sage Weil)
|
||||
* mon: mon/OSDMonitor: update creating_pgs using pending_creatings (`issue#19814 <http://tracker.ceph.com/issues/19814>`_, `pr#14897 <https://github.com/ceph/ceph/pull/14897>`_, Kefu Chai)
|
||||
* mon: mon/OSDMonitor: update pg_creatings even the new acting set is empty (`issue#19744 <http://tracker.ceph.com/issues/19744>`_, `pr#14730 <https://github.com/ceph/ceph/pull/14730>`_, Kefu Chai)
|
||||
* mon: mon/PGMap: add up_primary pg number field for pg-dump cmd (`pr#13451 <https://github.com/ceph/ceph/pull/13451>`_, xie xingguo)
|
||||
* mon: mon/PGMap.cc: fix "osd_epochs" section of dump_basic (`pr#14996 <https://github.com/ceph/ceph/pull/14996>`_, xie xingguo)
|
||||
* mon: mon/PGMonitor: clean up min/max span warning (`pr#14611 <https://github.com/ceph/ceph/pull/14611>`_, Sage Weil)
|
||||
* mon: move 'pg map' to OSDMonitor (`pr#14559 <https://github.com/ceph/ceph/pull/14559>`_, Sage Weil)
|
||||
* mon: osd/PGMonitor: always update pgmap with latest osdmap (`issue#19398 <http://tracker.ceph.com/issues/19398>`_, `pr#14777 <https://github.com/ceph/ceph/pull/14777>`_, Kefu Chai)
|
||||
* mon: show inactive % in ceph status (`pr#14810 <https://github.com/ceph/ceph/pull/14810>`_, Sage Weil)
|
||||
* msg: Increase loglevels on some messages (`pr#14707 <https://github.com/ceph/ceph/pull/14707>`_, Willem Jan Withagen)
|
||||
* msg: msg/async/net_handler: errno should be stored before calling next function (`pr#14985 <https://github.com/ceph/ceph/pull/14985>`_, Zhou Zhengping)
|
||||
* msg: msg/simple: use my addr when setting sock priority (`issue#19801 <http://tracker.ceph.com/issues/19801>`_, `pr#14878 <https://github.com/ceph/ceph/pull/14878>`_, Kefu Chai)
|
||||
* msg: src/msg/async/AsyncConnect.cc: Use of sizeof() on a Pointer Type (`pr#14773 <https://github.com/ceph/ceph/pull/14773>`_, Svyatoslav)
|
||||
* msg: src/msg/simple/Pipe.cc: Fix the inclusion of '}' (`pr#14843 <https://github.com/ceph/ceph/pull/14843>`_, Willem Jan Withagen)
|
||||
* osd: check fsid is normal before osd mkfs (`pr#13898 <https://github.com/ceph/ceph/pull/13898>`_, song baisen)
|
||||
* osd: clean nonused work queue (`pr#14990 <https://github.com/ceph/ceph/pull/14990>`_, Wei Jin)
|
||||
* osd: eliminate snapdir objects and move clone snaps vector into SnapSet (`pr#13610 <https://github.com/ceph/ceph/pull/13610>`_, Sage Weil)
|
||||
* osd: fix occasional MOSDMap leak (`issue#18293 <http://tracker.ceph.com/issues/18293>`_, `pr#14558 <https://github.com/ceph/ceph/pull/14558>`_, Sage Weil)
|
||||
* osd: fix typo in comment (`pr#13061 <https://github.com/ceph/ceph/pull/13061>`_, Gu Zhongyan)
|
||||
* osd: Implement asynchronous scrub sleep (`issue#19497 <http://tracker.ceph.com/issues/19497>`_, `pr#14886 <https://github.com/ceph/ceph/pull/14886>`_, Brad Hubbard)
|
||||
* osd: Implement peering state timing (`pr#14627 <https://github.com/ceph/ceph/pull/14627>`_, Brad Hubbard)
|
||||
* osd: objclass sdk (`pr#14723 <https://github.com/ceph/ceph/pull/14723>`_, Neha Ojha)
|
||||
* osd: osdc/Objecter: more constness (`pr#14819 <https://github.com/ceph/ceph/pull/14819>`_, Kefu Chai)
|
||||
* osd: osdc: silence warning from `-Wsign-compare` (`pr#14729 <https://github.com/ceph/ceph/pull/14729>`_, Jos Collin)
|
||||
* osd: pglog trimming fixes (`pr#12882 <https://github.com/ceph/ceph/pull/12882>`_, Zhiqiang Wang)
|
||||
* osd: pglog: with config, don't assert in the presence of stale diverg… (`issue#17916 <http://tracker.ceph.com/issues/17916>`_, `pr#14648 <https://github.com/ceph/ceph/pull/14648>`_, Greg Farnum)
|
||||
* osd: put osdmap in mempool (`pr#14780 <https://github.com/ceph/ceph/pull/14780>`_, Sage Weil)
|
||||
* osd: renamed the new vector name in OSDMap::build_simple_crush_map_from_conf (`pr#14583 <https://github.com/ceph/ceph/pull/14583>`_, Jos Collin)
|
||||
* osd: ReplicatedBackend::prep_push() remove redundant variable assignments (`pr#14817 <https://github.com/ceph/ceph/pull/14817>`_, Jin Cai)
|
||||
* osd: sched_scrub() lock pg only if all scrubbing conditions are fulfilled (`pr#14968 <https://github.com/ceph/ceph/pull/14968>`_, Jin Cai)
|
||||
* osd: simplify past_intervals representation (`pr#14444 <https://github.com/ceph/ceph/pull/14444>`_, Samuel Just, Sage Weil)
|
||||
* osd: stop mgrc earlier in shutdown() (`issue#19638 <http://tracker.ceph.com/issues/19638>`_, `pr#14904 <https://github.com/ceph/ceph/pull/14904>`_, Kefu Chai)
|
||||
* osd: stop MgrClient callbacks on shutdown (`issue#19638 <http://tracker.ceph.com/issues/19638>`_, `pr#14896 <https://github.com/ceph/ceph/pull/14896>`_, Sage Weil)
|
||||
* osd: strip pglog op name (`pr#14764 <https://github.com/ceph/ceph/pull/14764>`_, liuchang0812)
|
||||
* osd: support dumping long ops (`pr#13019 <https://github.com/ceph/ceph/pull/13019>`_, Zhiqiang Wang)
|
||||
* osd: switch filestore to default to rocksdb (`pr#14814 <https://github.com/ceph/ceph/pull/14814>`_, Neha Ojha)
|
||||
* osd: tag fast dispatch messages with min_epoch (`pr#13681 <https://github.com/ceph/ceph/pull/13681>`_, Sage Weil)
|
||||
* osd: use append(bufferlist &) to avoid unnecessary copy (`pr#12272 <https://github.com/ceph/ceph/pull/12272>`_, Yunchuan Wen)
|
||||
* osd: zipkin tracing (`pr#14305 <https://github.com/ceph/ceph/pull/14305>`_, Sage Weil, Marios-Evaggelos Kogias, Victor Araujo, Casey Bodley, Andrew Shewmaker, Chendi.Xue)
|
||||
* performance: buffer, osd: add missing crc cache miss perf counter (`pr#14957 <https://github.com/ceph/ceph/pull/14957>`_, Piotr Dałek)
|
||||
* performance: osd/PG.cc: loop invariant code motion (`pr#12720 <https://github.com/ceph/ceph/pull/12720>`_, Li Wang)
|
||||
* pybind: better error msg (`pr#14497 <https://github.com/ceph/ceph/pull/14497>`_, Kefu Chai)
|
||||
* pybind: fix open flags calculation (`issue#19890 <http://tracker.ceph.com/issues/19890>`_, `pr#15018 <https://github.com/ceph/ceph/pull/15018>`_, "Yan, Zheng")
|
||||
* qa: qa/added overrides (`pr#14917 <https://github.com/ceph/ceph/pull/14917>`_, Yuri Weinstein)
|
||||
* qa: qa/suite: replace reference to fs/xfs.yaml (`pr#14756 <https://github.com/ceph/ceph/pull/14756>`_, Yehuda Sadeh)
|
||||
* qa: qa/suites/rados/singleton-bluestore: concat settings (`pr#14884 <https://github.com/ceph/ceph/pull/14884>`_, Kefu Chai)
|
||||
* rbd: cls_rbd: default initialize snapshot namespace for legacy clients (`issue#19413 <http://tracker.ceph.com/issues/19413>`_, `pr#14903 <https://github.com/ceph/ceph/pull/14903>`_, Jason Dillaman)
|
||||
* rbd: common/bit_vector: utilize deep-copy during data decode (`issue#19863 <http://tracker.ceph.com/issues/19863>`_, `pr#15017 <https://github.com/ceph/ceph/pull/15017>`_, Jason Dillaman)
|
||||
* rbd: import needs to sanity check auto-generated image name (`issue#19128 <http://tracker.ceph.com/issues/19128>`_, `pr#14754 <https://github.com/ceph/ceph/pull/14754>`_, Mykola Golub)
|
||||
* rbd: info command should indicate if parent is in trash (`pr#14875 <https://github.com/ceph/ceph/pull/14875>`_, Jason Dillaman)
|
||||
* rbd-mirror: ensure missing images are re-synced when detected (`issue#19811 <http://tracker.ceph.com/issues/19811>`_, `pr#14945 <https://github.com/ceph/ceph/pull/14945>`_, Jason Dillaman)
|
||||
* rbd-mirror: failover and failback of unmodified image results in split-brain (`issue#19858 <http://tracker.ceph.com/issues/19858>`_, `pr#14963 <https://github.com/ceph/ceph/pull/14963>`_, Jason Dillaman)
|
||||
* rbd-mirror: image deletions should be handled by assigned instance (`pr#14832 <https://github.com/ceph/ceph/pull/14832>`_, Jason Dillaman)
|
||||
* rbd-mirror: remove tracking of image names from pool watcher (`pr#14712 <https://github.com/ceph/ceph/pull/14712>`_, Jason Dillaman)
|
||||
* rbd-mirror: resolve admin socket path names collision (`issue#19907 <http://tracker.ceph.com/issues/19907>`_, `pr#15048 <https://github.com/ceph/ceph/pull/15048>`_, Mykola Golub)
|
||||
* rbd-nbd: relax size check for newer kernel versions (`issue#19871 <http://tracker.ceph.com/issues/19871>`_, `pr#14976 <https://github.com/ceph/ceph/pull/14976>`_, Mykola Golub)
|
||||
* rbd: rbd/bench: add notes of default values, it's easy to use (`pr#14762 <https://github.com/ceph/ceph/pull/14762>`_, Zheng Yin)
|
||||
* rbd: rbd, librbd: migrate atomic_t to std::atomic (`pr#14656 <https://github.com/ceph/ceph/pull/14656>`_, Jesse Williamson)
|
||||
* rbd: rbd-mirror A/A: proxy InstanceReplayer APIs via InstanceWatcher RPC (`issue#18787 <http://tracker.ceph.com/issues/18787>`_, `pr#13978 <https://github.com/ceph/ceph/pull/13978>`_, Mykola Golub)
|
||||
* rbd: recognize exclusive option (`pr#14785 <https://github.com/ceph/ceph/pull/14785>`_, Ilya Dryomov)
|
||||
* rbd: removed spurious error message from mirror pool commands (`pr#14935 <https://github.com/ceph/ceph/pull/14935>`_, Jason Dillaman)
|
||||
* rbd: stop indefinite thread waiting in krbd udev handling (`issue#17195 <http://tracker.ceph.com/issues/17195>`_, `pr#14051 <https://github.com/ceph/ceph/pull/14051>`_, Spandan Kumar Sahu)
|
||||
* rbd,tests: qa: krbd write-after-checksum tests (`pr#14836 <https://github.com/ceph/ceph/pull/14836>`_, Ilya Dryomov)
|
||||
* rbd,tests: qa/workunits/rbd: increased trash deferment period (`pr#14846 <https://github.com/ceph/ceph/pull/14846>`_, Jason Dillaman)
|
||||
* rbd,tests: qa/workunits: switch to OpenStack Ocata release for RBD testing (`pr#14465 <https://github.com/ceph/ceph/pull/14465>`_, Jason Dillaman)
|
||||
* rbd,tests: test/librbd/test_librbd.cc: set *features even if RBD_FEATURES is unset (`issue#19865 <http://tracker.ceph.com/issues/19865>`_, `pr#14965 <https://github.com/ceph/ceph/pull/14965>`_, Dan Mick)
|
||||
* rbd,tests: test/librbd/test_notify.py: don't disable feature in slave (`issue#19716 <http://tracker.ceph.com/issues/19716>`_, `pr#14751 <https://github.com/ceph/ceph/pull/14751>`_, Mykola Golub)
|
||||
* rbd,tests: test/rbd_mirror: race in TestMockLeaderWatcher.AcquireError (`issue#19405 <http://tracker.ceph.com/issues/19405>`_, `pr#14741 <https://github.com/ceph/ceph/pull/14741>`_, Mykola Golub)
|
||||
* rbd,tests: test: remove hard-coded image name from RBD metadata test (`issue#19798 <http://tracker.ceph.com/issues/19798>`_, `pr#14848 <https://github.com/ceph/ceph/pull/14848>`_, Jason Dillaman)
|
||||
* rdma: msg/async/rdma: add inqueue rx chunks perf counter (`pr#14782 <https://github.com/ceph/ceph/pull/14782>`_, Haomai Wang)
|
||||
* rgw: add bucket size limit check to radosgw-admin (`issue#17925 <http://tracker.ceph.com/issues/17925>`_, `pr#11796 <https://github.com/ceph/ceph/pull/11796>`_, Matt Benjamin)
|
||||
* rgw: add 'state==SyncState::IncrementalSync' condition when add item … (`pr#14552 <https://github.com/ceph/ceph/pull/14552>`_, Shasha Lu)
|
||||
* rgw: add support container and object levels of swift bulkupload (`pr#14775 <https://github.com/ceph/ceph/pull/14775>`_, Jing Wenjun)
|
||||
* rgw: add support for FormPost of Swift API (`issue#17273 <http://tracker.ceph.com/issues/17273>`_, `pr#11179 <https://github.com/ceph/ceph/pull/11179>`_, Radoslaw Zarzynski, Orit Wasserman)
|
||||
* rgw: civetweb don't go past the array index while calling mg_start (`issue#19749 <http://tracker.ceph.com/issues/19749>`_, `pr#14750 <https://github.com/ceph/ceph/pull/14750>`_, Abhishek Lekshmanan, Jesse Williamson)
|
||||
* rgw: clean unuse code in cls_statelog_check_state (`pr#10260 <https://github.com/ceph/ceph/pull/10260>`_, weiqiaomiao)
|
||||
* rgw: cleanup: fix variable name in RGWRados::create_pool() declaration (`pr#14547 <https://github.com/ceph/ceph/pull/14547>`_, Nathan Cutler)
|
||||
* rgw: cleanup lc continuation (`pr#14906 <https://github.com/ceph/ceph/pull/14906>`_, Jiaying Ren)
|
||||
* rgw: cls/rgw: list_plain_entries() stops before bi_log entries (`issue#19876 <http://tracker.ceph.com/issues/19876>`_, `pr#14981 <https://github.com/ceph/ceph/pull/14981>`_, Casey Bodley)
|
||||
* rgw: custom user data header (`issue#19644 <http://tracker.ceph.com/issues/19644>`_, `pr#14592 <https://github.com/ceph/ceph/pull/14592>`_, Pavan Rallabhandi)
|
||||
* rgw: deduplicate variants of rgw_make_bucket_entry_name(). (`pr#14299 <https://github.com/ceph/ceph/pull/14299>`_, Radoslaw Zarzynski)
|
||||
* rgw: don't do unneccesary write if buffer with zero length (`pr#14925 <https://github.com/ceph/ceph/pull/14925>`_, fang yuxiang)
|
||||
* rgw: dont spawn error_repo until lease is acquired (`issue#19446 <http://tracker.ceph.com/issues/19446>`_, `pr#14714 <https://github.com/ceph/ceph/pull/14714>`_, Casey Bodley)
|
||||
* rgw: drop unused param "bucket" from select_bucket_placement (`pr#14390 <https://github.com/ceph/ceph/pull/14390>`_, Shasha Lu)
|
||||
* rgw: drop unused port var (`pr#14412 <https://github.com/ceph/ceph/pull/14412>`_, Jiaying Ren)
|
||||
* rgw: fix broken /crossdomain.xml, /info and /healthcheck of Swift API. (`issue#19520 <http://tracker.ceph.com/issues/19520>`_, `pr#14373 <https://github.com/ceph/ceph/pull/14373>`_, Radoslaw Zarzynski)
|
||||
* rgw: fix forward request for bulkupload to be applied in multisite (`issue#19645 <http://tracker.ceph.com/issues/19645>`_, `pr#14601 <https://github.com/ceph/ceph/pull/14601>`_, Jing Wenjun)
|
||||
* rgw: fix handling of --remote in radosgw-admin period commands (`issue#19554 <http://tracker.ceph.com/issues/19554>`_, `pr#14407 <https://github.com/ceph/ceph/pull/14407>`_, Casey Bodley)
|
||||
* rgw: fix RadosGW hang during multi-chunk upload of AWSv4. (`issue#19754 <http://tracker.ceph.com/issues/19754>`_, `pr#14770 <https://github.com/ceph/ceph/pull/14770>`_, Radoslaw Zarzynski)
|
||||
* rgw: migrate atomic_t to std::atomic<> (ebirah) (`pr#14839 <https://github.com/ceph/ceph/pull/14839>`_, Jesse Williamson)
|
||||
* rgw: radosgw-admin: warn that 'realm rename' does not update other clusters (`issue#19746 <http://tracker.ceph.com/issues/19746>`_, `pr#14722 <https://github.com/ceph/ceph/pull/14722>`_, Casey Bodley)
|
||||
* rgw: rgw_file: cleanup virtual keyword on derived functions (`pr#14908 <https://github.com/ceph/ceph/pull/14908>`_, Gui Hecheng)
|
||||
* rgw: rgw_ldap: log the ldap err in case of bind failure (`pr#14781 <https://github.com/ceph/ceph/pull/14781>`_, Abhishek Lekshmanan)
|
||||
* rgw: rgw multisite: automated mdlog trimming (`pr#13111 <https://github.com/ceph/ceph/pull/13111>`_, Casey Bodley)
|
||||
* rgw: rgw multisite: fixes for meta sync across periods (`issue#18639 <http://tracker.ceph.com/issues/18639>`_, `pr#13070 <https://github.com/ceph/ceph/pull/13070>`_, Casey Bodley)
|
||||
* rgw: rgw multisite: remove the redundant post in OPT_ZONEGROUP_MODIFY (`pr#14359 <https://github.com/ceph/ceph/pull/14359>`_, Jing Wenjun)
|
||||
* rgw: RGWPeriodPusher spawns http thread before cr thread (`issue#19834 <http://tracker.ceph.com/issues/19834>`_, `pr#14936 <https://github.com/ceph/ceph/pull/14936>`_, Casey Bodley)
|
||||
* rgw: rgw_rados drop deprecated global var (`pr#14411 <https://github.com/ceph/ceph/pull/14411>`_, Jiaying Ren)
|
||||
* rgw: should delete in_stream_req if conn->get_obj(...) return not zero value (`pr#9950 <https://github.com/ceph/ceph/pull/9950>`_, weiqiaomiao)
|
||||
* rgw: swift: ability to update swift read and write acls separately. (`issue#19289 <http://tracker.ceph.com/issues/19289>`_, `pr#14499 <https://github.com/ceph/ceph/pull/14499>`_, Marcus Watts)
|
||||
* rgw: swift: disable revocation thread if sleep == 0 (`issue#19499 <http://tracker.ceph.com/issues/19499>`_, `issue#9493 <http://tracker.ceph.com/issues/9493>`_, `pr#14501 <https://github.com/ceph/ceph/pull/14501>`_, Marcus Watts)
|
||||
* rgw,tests: qa/rgw: add cluster name to path when s3tests scans rgw log (`pr#14845 <https://github.com/ceph/ceph/pull/14845>`_, Casey Bodley)
|
||||
* rgw,tests: qa/rgw: don't scan radosgw logs for encryption keys on jewel upgrade test (`pr#14697 <https://github.com/ceph/ceph/pull/14697>`_, Casey Bodley)
|
||||
* rgw,tests: qa/rgw: fix assertions in radosgw_admin task (`pr#14842 <https://github.com/ceph/ceph/pull/14842>`_, Casey Bodley)
|
||||
* rgw,tests: test/rgw: fixes for test_multi_period_incremental_sync() (`pr#13067 <https://github.com/ceph/ceph/pull/13067>`_, Casey Bodley)
|
||||
* rgw,tests: test/rgw: fix for empty lists as default arguments (`pr#14816 <https://github.com/ceph/ceph/pull/14816>`_, Casey Bodley)
|
||||
* rgw: update Beast for streaming reads in asio frontend (`pr#14273 <https://github.com/ceph/ceph/pull/14273>`_, Casey Bodley)
|
||||
* rgw: using the same bucket num_shards as master zg when create bucket in secondary zg (`issue#19745 <http://tracker.ceph.com/issues/19745>`_, `pr#14388 <https://github.com/ceph/ceph/pull/14388>`_, Shasha Lu)
|
||||
* rgw: when create_bucket use the same num_shards with info.num_shards (`issue#19745 <http://tracker.ceph.com/issues/19745>`_, `pr#15010 <https://github.com/ceph/ceph/pull/15010>`_, Shasha Lu)
|
||||
* tests: ceph_test_rados_api_tier: tolerate ENOENT from 'pg scrub' (`pr#14807 <https://github.com/ceph/ceph/pull/14807>`_, Sage Weil)
|
||||
* tests: cephtool/test.sh error on full tests (`issue#19698 <http://tracker.ceph.com/issues/19698>`_, `pr#14647 <https://github.com/ceph/ceph/pull/14647>`_, Willem Jan Withagen, David Zafman)
|
||||
* tests: Don't dump core when using EXPECT_DEATH (`pr#14821 <https://github.com/ceph/ceph/pull/14821>`_, Kefu Chai, Brad Hubbard)
|
||||
* tests: fio_ceph_objectstore: fixes improper write request data lifetime (`pr#14338 <https://github.com/ceph/ceph/pull/14338>`_, Adam Kupczyk)
|
||||
* tests: fix broken links in upgrade/hammer-jewel-x/stress-split (`issue#19793 <http://tracker.ceph.com/issues/19793>`_, `pr#14831 <https://github.com/ceph/ceph/pull/14831>`_, Nathan Cutler)
|
||||
* tests: include/denc: support ENCODE_DUMP (`pr#14962 <https://github.com/ceph/ceph/pull/14962>`_, Sage Weil)
|
||||
* tests: libradosstriper: do not assign garbage to returned value (`pr#15009 <https://github.com/ceph/ceph/pull/15009>`_, Kefu Chai)
|
||||
* tests: qa/erasure-code: override min_size to 2 (`issue#19770 <http://tracker.ceph.com/issues/19770>`_, `pr#14872 <https://github.com/ceph/ceph/pull/14872>`_, Kefu Chai)
|
||||
* tests: qa/suites/jewel-x/point-to-point: don't scane for keys on second s3tests either (`pr#14788 <https://github.com/ceph/ceph/pull/14788>`_, Sage Weil)
|
||||
* tests: qa/suites: Reduce fs combination tests for smoke, use bluestore (`pr#14854 <https://github.com/ceph/ceph/pull/14854>`_, Vasu Kulkarni)
|
||||
* tests: qa/suites: Revert "qa/suites: add mon-reweight-min-pgs-per-osd = 4" (`pr#14584 <https://github.com/ceph/ceph/pull/14584>`_, Kefu Chai)
|
||||
* tests: qa/suites/upgrade/jewel-x: add mgr.x role (`pr#14689 <https://github.com/ceph/ceph/pull/14689>`_, Sage Weil)
|
||||
* tests: qa/suites/upgrade/kraken-x: misc fixes (`pr#14887 <https://github.com/ceph/ceph/pull/14887>`_, Sage Weil)
|
||||
* tests: qa/tasks/ceph_manager: always fix pgp_num when done with thrashosd task (`issue#19771 <http://tracker.ceph.com/issues/19771>`_, `pr#14931 <https://github.com/ceph/ceph/pull/14931>`_, Kefu Chai)
|
||||
* tests: qa/tasks: few fixes to get ceph-deploy 1node to working state (`pr#14400 <https://github.com/ceph/ceph/pull/14400>`_, Vasu Kulkarni)
|
||||
* tests: rados: move cephtool.yaml to new singleton/bluestore subsuite (`issue#19797 <http://tracker.ceph.com/issues/19797>`_, `pr#14847 <https://github.com/ceph/ceph/pull/14847>`_, Nathan Cutler)
|
||||
* tests: set -x in suites/iozone.sh workunit (`issue#19740 <http://tracker.ceph.com/issues/19740>`_, `pr#14713 <https://github.com/ceph/ceph/pull/14713>`_, Nathan Cutler)
|
||||
* tests: test/compressor: disable isal tests if not available (`pr#14929 <https://github.com/ceph/ceph/pull/14929>`_, Kefu Chai)
|
||||
* tests: test: c_read_operations.cc: silence warning from -Wsign-compare (`pr#14888 <https://github.com/ceph/ceph/pull/14888>`_, Jos Collin)
|
||||
* tests: test/fio_ceph_objectstore: fix fio plugin build failure by engine_data (`pr#15044 <https://github.com/ceph/ceph/pull/15044>`_, lisali)
|
||||
* tests: test: fixing assert that creates warning: comparison between signed and unsigned integer expressions (`pr#14794 <https://github.com/ceph/ceph/pull/14794>`_, Jos Collin)
|
||||
* tests: test/fsx: Remove the dead code associated with aio backend (`pr#14905 <https://github.com/ceph/ceph/pull/14905>`_, Zhou Zhengping)
|
||||
* tests: test/objectstore/: Check put_ref return value (`pr#15007 <https://github.com/ceph/ceph/pull/15007>`_, zhanglei)
|
||||
* tests: test/osd/osd-dup.sh: lower wb fd throttle limits (`pr#14984 <https://github.com/ceph/ceph/pull/14984>`_, Dan Mick)
|
||||
* tests: test: use 7130 for crush-classes.sh (`pr#14783 <https://github.com/ceph/ceph/pull/14783>`_, Loic Dachary)
|
||||
* tests: test: warning: comparison between signed and unsigned integer expressions (`pr#14705 <https://github.com/ceph/ceph/pull/14705>`_, Jos Collin)
|
||||
* tests,tools: test: kill warnings (`pr#14892 <https://github.com/ceph/ceph/pull/14892>`_, Kefu Chai)
|
||||
* tools: change compare_exchange_weak to compare_exchange_strong (`pr#15030 <https://github.com/ceph/ceph/pull/15030>`_, Jesse Williamson)
|
||||
* tools: rados: check for negative return value of rados_create_with_context() as its comment put (`pr#10893 <https://github.com/ceph/ceph/pull/10893>`_, zhang.zezhu)
|
||||
* tools: rados: out json 'df' values as numbers, not strings (`issue#15546 <http://tracker.ceph.com/issues/15546>`_, `pr#14644 <https://github.com/ceph/ceph/pull/14644>`_, Sage Weil)
|
||||
|
||||
|
||||
v12.0.2 Luminous (dev)
|
||||
======================
|
||||
This is the third development checkpoint release of Luminous, the next long term
|
||||
@ -10,40 +827,6 @@ stable release.
|
||||
|
||||
Major changes from v12.0.1
|
||||
--------------------------
|
||||
* The original librados rados_objects_list_open (C) and objects_begin
|
||||
(C++) object listing API, deprecated in Hammer, has finally been
|
||||
removed. Users of this interface must update their software to use
|
||||
either the rados_nobjects_list_open (C) and nobjects_begin (C++) API or
|
||||
the new rados_object_list_begin (C) and object_list_begin (C++) API
|
||||
before updating the client-side librados library to Luminous.
|
||||
|
||||
Object enumeration (via any API) with the latest librados version
|
||||
and pre-Hammer OSDs is no longer supported. Note that no in-tree
|
||||
Ceph services rely on object enumeration via the deprecated APIs, so
|
||||
only external librados users might be affected.
|
||||
|
||||
The newest (and recommended) rados_object_list_begin (C) and
|
||||
object_list_begin (C++) API is only usable on clusters with the
|
||||
SORTBITWISE flag enabled (Jewel and later). (Note that this flag is
|
||||
required to be set before upgrading beyond Jewel.)
|
||||
* CephFS clients without the 'p' flag in their authentication capability
|
||||
string will no longer be able to set quotas or any layout fields. This
|
||||
flag previously only restricted modification of the pool and namespace
|
||||
fields in layouts.
|
||||
* CephFS directory fragmentation (large directory support) is enabled
|
||||
by default on new filesystems. To enable it on existing filesystems
|
||||
use "ceph fs set <fs_name> allow_dirfrags".
|
||||
* CephFS will generate a health warning if you have fewer standby daemons
|
||||
than it thinks you wanted. By default this will be 1 if you ever had
|
||||
a standby, and 0 if you did not. You can customize this using
|
||||
``ceph fs set <fs> standby_count_wanted <number>``. Setting it
|
||||
to zero will effectively disable the health check.
|
||||
* The "ceph mds tell ..." command has been removed. It is superceded
|
||||
by "ceph tell mds.<id> ..."
|
||||
* RGW introduces server side encryption of uploaded objects with 3 options for
|
||||
the management of encryption keys, automatic encryption (only recommended for
|
||||
test setups), customer provided keys similar to Amazon SSE KMS specification &
|
||||
using a key management service (openstack barbician)
|
||||
|
||||
Notable Changes
|
||||
---------------
|
||||
@ -924,51 +1707,6 @@ This is the first development checkpoint release of Luminous series, the next
|
||||
long term release. We're off to a good start to release Luminous in the spring
|
||||
of '17.
|
||||
|
||||
Major changes from Kraken
|
||||
-------------------------
|
||||
* When assigning a network to the public network and not to
|
||||
the cluster network the network specification of the public
|
||||
network will be used for the cluster network as well.
|
||||
In older versions this would lead to cluster services
|
||||
being bound to 0.0.0.0:<port>, thus making the
|
||||
cluster service even more publicly available than the
|
||||
public services. When only specifying a cluster network it
|
||||
will still result in the public services binding to 0.0.0.0.
|
||||
|
||||
* Some variants of the omap_get_keys and omap_get_vals librados
|
||||
functions have been deprecated in favor of omap_get_vals2 and
|
||||
omap_get_keys2. The new methods include an output argument
|
||||
indicating whether there are additional keys left to fetch.
|
||||
Previously this had to be inferred from the requested key count vs
|
||||
the number of keys returned, but this breaks with new OSD-side
|
||||
limits on the number of keys or bytes that can be returned by a
|
||||
single omap request. These limits were introduced by kraken but
|
||||
are effectively disabled by default (by setting a very large limit
|
||||
of 1 GB) because users of the newly deprecated interface cannot
|
||||
tell whether they should fetch more keys or not. In the case of
|
||||
the standalone calls in the C++ interface
|
||||
(IoCtx::get_omap_{keys,vals}), librados has been updated to loop on
|
||||
the client side to provide a correct result via multiple calls to
|
||||
the OSD. In the case of the methods used for building
|
||||
multi-operation transactions, however, client-side looping is not
|
||||
practical, and the methods have been deprecated. Note that use of
|
||||
either the IoCtx methods on older librados versions or the
|
||||
deprecated methods on any version of librados will lead to
|
||||
incomplete results if/when the new OSD limits are enabled.
|
||||
|
||||
* In previous versions, if a client sent an op to the wrong OSD, the OSD
|
||||
would reply with ENXIO. The rationale here is that the client or OSD is
|
||||
clearly buggy and we want to surface the error as clearly as possible.
|
||||
We now only send the ENXIO reply if the osd_enxio_on_misdirected_op option
|
||||
is enabled (it's off by default). This means that a VM using librbd that
|
||||
previously would have gotten an EIO and gone read-only will now see a
|
||||
blocked/hung IO instead.
|
||||
|
||||
* When configuring ceph-fuse mounts in /etc/fstab, a new syntax is
|
||||
available that uses "ceph.<arg>=<val>" in the options column, instead
|
||||
of putting configuration in the device column. The old style syntax
|
||||
still works. See the documentation page "Mount CephFS in your
|
||||
file systems table" for details.
|
||||
|
||||
|
||||
Notable Changes
|
||||
@ -1475,7 +2213,7 @@ Upgrading from Jewel
|
||||
to Kraken 11.2.z (or, eventually, Luminous 12.2.z).
|
||||
|
||||
* The ``sortbitwise`` flag must be set on the Jewel cluster before upgrading
|
||||
to Kraken. The latest Jewel (10.2.4+) releases issue a health warning if
|
||||
to Kraken. The latest Jewel (10.2.8+) releases issue a health warning if
|
||||
the flag is not set, so this is probably already set. If it is not, Kraken
|
||||
OSDs will refuse to start and will print and error message in their log.
|
||||
|
||||
|
@ -346,7 +346,8 @@ Then, download them from a mirror and install them. For example::
|
||||
sudo yum install ditaa-0.9-13.r74.fc21.noarch.rpm
|
||||
|
||||
Once you have installed all these packages, build the documentation by following
|
||||
the steps given in ``Build the Source``.
|
||||
the steps given in `Build the Source`_.
|
||||
|
||||
|
||||
Commit the Change
|
||||
-----------------
|
||||
|
@ -12,11 +12,11 @@ These are exciting times in the Ceph community! Get involved!
|
||||
| **Blog** | Check the Ceph Blog_ periodically to keep track | http://ceph.com/community/blog/ |
|
||||
| | of Ceph progress and important announcements. | |
|
||||
+----------------------+-------------------------------------------------+-----------------------------------------------+
|
||||
| **Planet Ceph** | Check the blog aggregation on Planet Ceph for | http://ceph.com/community/planet-ceph/ |
|
||||
| **Planet Ceph** | Check the blog aggregation on Planet Ceph for | https://ceph.com/category/planet/ |
|
||||
| | interesting stories, information and | |
|
||||
| | experiences from the community. | |
|
||||
+----------------------+-------------------------------------------------+-----------------------------------------------+
|
||||
| **Wiki** | Check the Ceph Wiki is a source for more | https://wiki.ceph.com/ |
|
||||
| **Wiki** | Check the Ceph Wiki is a source for more | http://wiki.ceph.com/ |
|
||||
| | community and development related topics. You | |
|
||||
| | can find there information about blueprints, | |
|
||||
| | meetups, the Ceph Developer Summits and more. | |
|
||||
|
@ -38,14 +38,11 @@ Create a Cluster
|
||||
================
|
||||
|
||||
If at any point you run into trouble and you want to start over, execute
|
||||
the following to purge the configuration::
|
||||
|
||||
ceph-deploy purgedata {ceph-node} [{ceph-node}]
|
||||
ceph-deploy forgetkeys
|
||||
|
||||
To purge the Ceph packages too, you may also execute::
|
||||
the following to purge the Ceph packages, and erase all its data and configuration::
|
||||
|
||||
ceph-deploy purge {ceph-node} [{ceph-node}]
|
||||
ceph-deploy purgedata {ceph-node} [{ceph-node}]
|
||||
ceph-deploy forgetkeys
|
||||
|
||||
If you execute ``purge``, you must re-install Ceph.
|
||||
|
||||
|
@ -1,7 +1,7 @@
|
||||
|
||||
CXX?=g++
|
||||
CXX_FLAGS?=-std=c++11 -Wall -Wextra -Werror -g
|
||||
CXX_LIBS?=-lboost_system -lrados
|
||||
CXX_LIBS?=-lboost_system -lrados -lradosstriper
|
||||
CXX_INC?=$(LOCAL_LIBRADOS_INC)
|
||||
CXX_CC=$(CXX) $(CXX_FLAGS) $(CXX_INC) $(LOCAL_LIBRADOS) $(CXX_LIBS)
|
||||
|
||||
@ -13,11 +13,12 @@ CC_CC=$(CC) $(CC_FLAGS) $(CC_INC) $(LOCAL_LIBRADOS) $(CC_LIBS)
|
||||
|
||||
# Relative path to the Ceph source:
|
||||
CEPH_SRC_HOME?=../../src
|
||||
CEPH_BLD_HOME?=../../build
|
||||
|
||||
LOCAL_LIBRADOS?=-L$(CEPH_SRC_HOME)/.libs/ -Wl,-rpath,$(CEPH_SRC_HOME)/.libs
|
||||
LOCAL_LIBRADOS?=-L$(CEPH_BLD_HOME)/lib/ -Wl,-rpath,$(CEPH_BLD_HOME)/lib
|
||||
LOCAL_LIBRADOS_INC?=-I$(CEPH_SRC_HOME)/include
|
||||
|
||||
all: hello_world_cpp hello_world_c
|
||||
all: hello_world_cpp hello_radosstriper_cpp hello_world_c
|
||||
|
||||
# Build against the system librados instead of the one in the build tree:
|
||||
all-system: LOCAL_LIBRADOS=
|
||||
@ -27,9 +28,12 @@ all-system: all
|
||||
hello_world_cpp: hello_world.cc
|
||||
$(CXX_CC) -o hello_world_cpp hello_world.cc
|
||||
|
||||
hello_radosstriper_cpp: hello_radosstriper.cc
|
||||
$(CXX_CC) -o hello_radosstriper_cpp hello_radosstriper.cc
|
||||
|
||||
hello_world_c: hello_world_c.c
|
||||
$(CC_CC) -o hello_world_c hello_world_c.c
|
||||
|
||||
clean:
|
||||
rm -f hello_world_cpp hello_world_c
|
||||
rm -f hello_world_cpp hello_radosstriper_cpp hello_world_c
|
||||
|
||||
|
102
ceph/examples/librados/hello_radosstriper.cc
Normal file
102
ceph/examples/librados/hello_radosstriper.cc
Normal file
@ -0,0 +1,102 @@
|
||||
#include "rados/librados.hpp"
|
||||
#include "radosstriper/libradosstriper.hpp"
|
||||
#include <iostream>
|
||||
#include <string>
|
||||
|
||||
|
||||
int main(int argc, char* argv[])
|
||||
{
|
||||
if(argc != 6)
|
||||
{
|
||||
std::cout <<"Please put in correct params\n"<<
|
||||
"Stripe Count:\n"<<
|
||||
"Object Size:\n" <<
|
||||
"File Name:\n" <<
|
||||
"Object Name:\n"
|
||||
"Pool Name:"<< std::endl;
|
||||
return EXIT_FAILURE;
|
||||
}
|
||||
uint32_t strip_count = std::stoi(argv[1]);
|
||||
uint32_t obj_size = std::stoi(argv[2]);
|
||||
std::string fname = argv[3];
|
||||
std::string obj_name = argv[4];
|
||||
std::string pool_name = argv[5];
|
||||
int ret = 0;
|
||||
librados::IoCtx io_ctx;
|
||||
librados::Rados cluster;
|
||||
libradosstriper::RadosStriper* rs = new libradosstriper::RadosStriper;
|
||||
|
||||
// make sure the keyring file is in /etc/ceph/ and is world readable
|
||||
ret = cluster.init2("client.admin","ceph",0);
|
||||
if( ret < 0)
|
||||
{
|
||||
std::cerr << "Couldn't init cluster "<< ret << std::endl;
|
||||
}
|
||||
|
||||
// make sure ceph.conf is in /etc/ceph/ and is world readable
|
||||
ret = cluster.conf_read_file("ceph.conf");
|
||||
if( ret < 0)
|
||||
{
|
||||
std::cerr << "Couldn't read conf file "<< ret << std::endl;
|
||||
}
|
||||
ret = cluster.connect();
|
||||
if(ret < 0)
|
||||
{
|
||||
std::cerr << "Couldn't connect to cluster "<< ret << std::endl;
|
||||
}
|
||||
else
|
||||
{
|
||||
std::cout << "Connected to Cluster"<< std::endl;
|
||||
}
|
||||
|
||||
ret = cluster.ioctx_create(pool_name.c_str(), io_ctx);
|
||||
|
||||
if(ret < 0)
|
||||
{
|
||||
std::cerr << "Couldn't Create IO_CTX"<< ret << std::endl;
|
||||
}
|
||||
ret = libradosstriper::RadosStriper::striper_create(io_ctx,rs);
|
||||
if(ret < 0)
|
||||
{
|
||||
std::cerr << "Couldn't Create RadosStriper"<< ret << std::endl;
|
||||
delete rs;
|
||||
}
|
||||
uint64_t alignment = 0;
|
||||
ret = io_ctx.pool_required_alignment2(&alignment);
|
||||
if(ret < 0)
|
||||
{
|
||||
std::cerr << "IO_CTX didn't give alignment "<< ret
|
||||
<< "\n Is this an erasure coded pool? "<< std::endl;
|
||||
|
||||
delete rs;
|
||||
io_ctx.close();
|
||||
cluster.shutdown();
|
||||
return EXIT_FAILURE;
|
||||
}
|
||||
std::cout << "Pool alignment: "<< alignment << std::endl;
|
||||
rs->set_object_layout_stripe_unit(alignment);
|
||||
// how many objects are we striping across?
|
||||
rs->set_object_layout_stripe_count(strip_count);
|
||||
// how big should each object be?
|
||||
rs->set_object_layout_object_size(obj_size);
|
||||
|
||||
std::string err = "no_err";
|
||||
librados::bufferlist bl;
|
||||
bl.read_file(fname.c_str(),&err);
|
||||
if(err != "no_err")
|
||||
{
|
||||
std::cout << "Error reading file into bufferlist: "<< err << std::endl;
|
||||
delete rs;
|
||||
io_ctx.close();
|
||||
cluster.shutdown();
|
||||
return EXIT_FAILURE;
|
||||
}
|
||||
|
||||
std::cout << "Writing: " << fname << "\nas: "<< obj_name << std::endl;
|
||||
rs->write_full(obj_name,bl);
|
||||
std::cout << "done with: " << fname << std::endl;
|
||||
|
||||
delete rs;
|
||||
io_ctx.close();
|
||||
cluster.shutdown();
|
||||
}
|
@ -22,6 +22,7 @@ export LC_ALL=C # the following is vulnerable to i18n
|
||||
if [ x`uname`x = xFreeBSDx ]; then
|
||||
$SUDO pkg install -yq \
|
||||
devel/git \
|
||||
devel/gperf \
|
||||
devel/gmake \
|
||||
devel/cmake \
|
||||
devel/yasm \
|
||||
@ -29,7 +30,6 @@ if [ x`uname`x = xFreeBSDx ]; then
|
||||
devel/boost-python-libs \
|
||||
devel/valgrind \
|
||||
devel/pkgconf \
|
||||
devel/libatomic_ops \
|
||||
devel/libedit \
|
||||
devel/libtool \
|
||||
devel/google-perftools \
|
||||
@ -52,6 +52,7 @@ if [ x`uname`x = xFreeBSDx ]; then
|
||||
emulators/fuse \
|
||||
java/junit \
|
||||
lang/python27 \
|
||||
devel/py-pip \
|
||||
devel/py-argparse \
|
||||
devel/py-nose \
|
||||
www/py-flask \
|
||||
@ -59,6 +60,9 @@ if [ x`uname`x = xFreeBSDx ]; then
|
||||
sysutils/flock \
|
||||
sysutils/fusefs-libs \
|
||||
|
||||
# Now use pip to install some extra python modules
|
||||
pip install pecan
|
||||
|
||||
exit
|
||||
else
|
||||
source /etc/os-release
|
||||
@ -159,7 +163,8 @@ function populate_wheelhouse() {
|
||||
|
||||
# although pip comes with virtualenv, having a recent version
|
||||
# of pip matters when it comes to using wheel packages
|
||||
pip --timeout 300 $install 'setuptools >= 0.8' 'pip >= 7.0' 'wheel >= 0.24' || return 1
|
||||
# workaround of https://github.com/pypa/setuptools/issues/1042
|
||||
pip --timeout 300 $install 'setuptools >= 0.8,< 36' 'pip >= 7.0' 'wheel >= 0.24' || return 1
|
||||
if test $# != 0 ; then
|
||||
pip --timeout 300 $install $@ || return 1
|
||||
fi
|
||||
|
@ -8,3 +8,4 @@ us-east.ceph.com: Tyler Bishop <tyler.bishop@beyondhosting.net>
|
||||
hk.ceph.com: Mart van Santen <mart@greenhost.nl>
|
||||
fr.ceph.com: Adrien Gillard <gillard.adrien@gmail.com>
|
||||
uk.ceph.com: Tim Bishop <T.D.Bishop@kent.ac.uk>
|
||||
cn.ceph.com: USTC LUG <lug@ustc.edu.cn>
|
||||
|
@ -7,3 +7,8 @@ openstack:
|
||||
- volumes: # attached to each instance
|
||||
count: 3
|
||||
size: 10 # GB
|
||||
overrides:
|
||||
ceph:
|
||||
conf:
|
||||
osd:
|
||||
osd shutdown pgref assert: true
|
@ -6,5 +6,9 @@ overrides:
|
||||
osd crush chooseleaf type: 0
|
||||
osd pool default pg num: 128
|
||||
osd pool default pgp num: 128
|
||||
ceph:
|
||||
conf:
|
||||
osd:
|
||||
osd shutdown pgref assert: true
|
||||
roles:
|
||||
- [mon.a, mgr.x, osd.0, osd.1, osd.2, client.0]
|
||||
|
@ -5,3 +5,8 @@ openstack:
|
||||
- volumes: # attached to each instance
|
||||
count: 3
|
||||
size: 10 # GB
|
||||
overrides:
|
||||
ceph:
|
||||
conf:
|
||||
osd:
|
||||
osd shutdown pgref assert: true
|
@ -9,3 +9,8 @@ openstack:
|
||||
log-rotate:
|
||||
ceph-mds: 10G
|
||||
ceph-osd: 10G
|
||||
overrides:
|
||||
ceph:
|
||||
conf:
|
||||
osd:
|
||||
osd shutdown pgref assert: true
|
@ -6,3 +6,8 @@ openstack:
|
||||
- volumes: # attached to each instance
|
||||
count: 3
|
||||
size: 10 # GB
|
||||
overrides:
|
||||
ceph:
|
||||
conf:
|
||||
osd:
|
||||
osd shutdown pgref assert: true
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user