From 6d12a8184f285ed66ebbebf3e4557ef7060d4303 Mon Sep 17 00:00:00 2001 From: Joao Eduardo Luis Date: Wed, 25 Oct 2023 21:06:05 +0000 Subject: [PATCH 1/5] common/subsys: add log subsys for rgw sfs driver Signed-off-by: Joao Eduardo Luis --- doc/rados/troubleshooting/log-and-debug.rst | 2 ++ src/common/subsys.h | 1 + 2 files changed, 3 insertions(+) diff --git a/doc/rados/troubleshooting/log-and-debug.rst b/doc/rados/troubleshooting/log-and-debug.rst index 929c3f53f8841..3314ac3611c0c 100644 --- a/doc/rados/troubleshooting/log-and-debug.rst +++ b/doc/rados/troubleshooting/log-and-debug.rst @@ -298,6 +298,8 @@ to their default level or to a level suitable for normal operations. +--------------------------+-----------+--------------+ | ``rgw dbstore`` | 1 | 5 | +--------------------------+-----------+--------------+ +| ``rgw sfs`` | 1 | 5 | ++--------------------------+-----------+--------------+ | ``javaclient`` | 1 | 5 | +--------------------------+-----------+--------------+ | ``asok`` | 1 | 5 | diff --git a/src/common/subsys.h b/src/common/subsys.h index 3e558b4409251..1dc91e888b763 100644 --- a/src/common/subsys.h +++ b/src/common/subsys.h @@ -64,6 +64,7 @@ SUBSYS(rgw_datacache, 1, 5) SUBSYS(rgw_access, 1, 5) SUBSYS(rgw_dbstore, 1, 5) SUBSYS(rgw_flight, 1, 5) +SUBSYS(rgw_sfs, 1, 5) SUBSYS(javaclient, 1, 5) SUBSYS(asok, 1, 5) SUBSYS(throttle, 1, 1) From 0ebdf7d82919deeaf1581ca2483309f72bc19951 Mon Sep 17 00:00:00 2001 From: Joao Eduardo Luis Date: Wed, 25 Oct 2023 21:30:01 +0000 Subject: [PATCH 2/5] rgw/sfs: use rgw_sfs log subsys Signed-off-by: Joao Eduardo Luis --- src/rgw/driver/sfs/bucket.cc | 2 +- src/rgw/driver/sfs/multipart.cc | 2 +- src/rgw/driver/sfs/object.cc | 2 +- src/rgw/driver/sfs/sfs_bucket.cc | 2 +- src/rgw/driver/sfs/sfs_gc.h | 2 +- src/rgw/driver/sfs/sfs_lc.cc | 2 +- src/rgw/driver/sfs/sfs_user.cc | 2 +- src/rgw/driver/sfs/sqlite/dbconn.cc | 2 +- src/rgw/driver/sfs/types.cc | 3 ++- src/rgw/driver/sfs/user.cc | 2 +- src/rgw/driver/sfs/writer.cc | 2 +- src/rgw/driver/sfs/zone.cc | 2 +- src/rgw/rgw_sal_sfs.cc | 2 +- 13 files changed, 14 insertions(+), 13 deletions(-) diff --git a/src/rgw/driver/sfs/bucket.cc b/src/rgw/driver/sfs/bucket.cc index 93e906ad3db2c..3acdda5977d97 100644 --- a/src/rgw/driver/sfs/bucket.cc +++ b/src/rgw/driver/sfs/bucket.cc @@ -37,7 +37,7 @@ #include "rgw_common.h" #include "rgw_sal_sfs.h" -#define dout_subsys ceph_subsys_rgw +#define dout_subsys ceph_subsys_rgw_sfs using namespace std; using namespace sqlite_orm; diff --git a/src/rgw/driver/sfs/multipart.cc b/src/rgw/driver/sfs/multipart.cc index 0a011cf1d7558..2e9d2ecbd7cac 100644 --- a/src/rgw/driver/sfs/multipart.cc +++ b/src/rgw/driver/sfs/multipart.cc @@ -27,7 +27,7 @@ #include "rgw_sal_sfs.h" #include "writer.h" -#define dout_subsys ceph_subsys_rgw +#define dout_subsys ceph_subsys_rgw_sfs using namespace std; diff --git a/src/rgw/driver/sfs/object.cc b/src/rgw/driver/sfs/object.cc index c2e686295df3b..48d914580d3f7 100644 --- a/src/rgw/driver/sfs/object.cc +++ b/src/rgw/driver/sfs/object.cc @@ -22,7 +22,7 @@ #include "rgw_common.h" #include "rgw_sal_sfs.h" -#define dout_subsys ceph_subsys_rgw +#define dout_subsys ceph_subsys_rgw_sfs using namespace std; diff --git a/src/rgw/driver/sfs/sfs_bucket.cc b/src/rgw/driver/sfs/sfs_bucket.cc index 3ac4ce5b42fc0..7df2aaa6e82f7 100644 --- a/src/rgw/driver/sfs/sfs_bucket.cc +++ b/src/rgw/driver/sfs/sfs_bucket.cc @@ -13,7 +13,7 @@ */ #include "rgw_sal_sfs.h" -#define dout_subsys ceph_subsys_rgw +#define dout_subsys ceph_subsys_rgw_sfs using namespace std; diff --git a/src/rgw/driver/sfs/sfs_gc.h b/src/rgw/driver/sfs/sfs_gc.h index 638ab5a52a0cb..09b5df3ecd4e0 100644 --- a/src/rgw/driver/sfs/sfs_gc.h +++ b/src/rgw/driver/sfs/sfs_gc.h @@ -18,7 +18,7 @@ #include "rgw_sal.h" #include "rgw_sal_sfs.h" -#define sfs_dout_subsys ceph_subsys_rgw +#define sfs_dout_subsys ceph_subsys_rgw_sfs namespace rgw::sal::sfs { diff --git a/src/rgw/driver/sfs/sfs_lc.cc b/src/rgw/driver/sfs/sfs_lc.cc index 58510d4685901..fb25a36e5b4f9 100644 --- a/src/rgw/driver/sfs/sfs_lc.cc +++ b/src/rgw/driver/sfs/sfs_lc.cc @@ -19,7 +19,7 @@ #include "sqlite/sqlite_lifecycle.h" -#define dout_subsys ceph_subsys_rgw +#define dout_subsys ceph_subsys_rgw_sfs namespace rgw::sal::sfs { diff --git a/src/rgw/driver/sfs/sfs_user.cc b/src/rgw/driver/sfs/sfs_user.cc index 5d22ecbb5365f..2214b737b9bcc 100644 --- a/src/rgw/driver/sfs/sfs_user.cc +++ b/src/rgw/driver/sfs/sfs_user.cc @@ -14,7 +14,7 @@ #include "rgw/driver/sfs/sqlite/sqlite_users.h" #include "rgw_sal_sfs.h" -#define dout_subsys ceph_subsys_rgw +#define dout_subsys ceph_subsys_rgw_sfs using namespace std; diff --git a/src/rgw/driver/sfs/sqlite/dbconn.cc b/src/rgw/driver/sfs/sqlite/dbconn.cc index c747fa181a010..6699f8aef8abf 100644 --- a/src/rgw/driver/sfs/sqlite/dbconn.cc +++ b/src/rgw/driver/sfs/sqlite/dbconn.cc @@ -20,7 +20,7 @@ #include "common/dout.h" -#define dout_subsys ceph_subsys_rgw +#define dout_subsys ceph_subsys_rgw_sfs namespace fs = std::filesystem; namespace orm = sqlite_orm; diff --git a/src/rgw/driver/sfs/types.cc b/src/rgw/driver/sfs/types.cc index 62fe1060da8d0..3f4bee8207259 100644 --- a/src/rgw/driver/sfs/types.cc +++ b/src/rgw/driver/sfs/types.cc @@ -28,7 +28,8 @@ #include "rgw_obj_types.h" #include "rgw_sal_sfs.h" -#define dout_subsys ceph_subsys_rgw +#define dout_subsys ceph_subsys_rgw_sfs + namespace rgw::sal::sfs { std::string generate_new_version_id(CephContext* ceph_context) { diff --git a/src/rgw/driver/sfs/user.cc b/src/rgw/driver/sfs/user.cc index c07d6ad89bf5e..79d4e50aaca2b 100644 --- a/src/rgw/driver/sfs/user.cc +++ b/src/rgw/driver/sfs/user.cc @@ -19,7 +19,7 @@ #include "rgw/driver/sfs/sqlite/sqlite_users.h" #include "rgw_sal_sfs.h" -#define dout_subsys ceph_subsys_rgw +#define dout_subsys ceph_subsys_rgw_sfs using namespace std; diff --git a/src/rgw/driver/sfs/writer.cc b/src/rgw/driver/sfs/writer.cc index f8486e1a3c028..d9d153b9de6fa 100644 --- a/src/rgw/driver/sfs/writer.cc +++ b/src/rgw/driver/sfs/writer.cc @@ -33,7 +33,7 @@ #include "rgw_sal.h" #include "rgw_sal_sfs.h" -#define dout_subsys ceph_subsys_rgw +#define dout_subsys ceph_subsys_rgw_sfs using namespace std; diff --git a/src/rgw/driver/sfs/zone.cc b/src/rgw/driver/sfs/zone.cc index 1ce6134a9b76a..befab23d8d5bf 100644 --- a/src/rgw/driver/sfs/zone.cc +++ b/src/rgw/driver/sfs/zone.cc @@ -15,7 +15,7 @@ #include "rgw_sal_sfs.h" -#define dout_subsys ceph_subsys_rgw +#define dout_subsys ceph_subsys_rgw_sfs using namespace std; diff --git a/src/rgw/rgw_sal_sfs.cc b/src/rgw/rgw_sal_sfs.cc index 03586b2ed44cc..584cf380e8e8a 100644 --- a/src/rgw/rgw_sal_sfs.cc +++ b/src/rgw/rgw_sal_sfs.cc @@ -59,7 +59,7 @@ #include "services/svc_zone.h" #include "services/svc_zone_utils.h" -#define dout_subsys ceph_subsys_rgw +#define dout_subsys ceph_subsys_rgw_sfs using namespace std; From 03c867016b50f9d9a474d3d415fada53534bc273 Mon Sep 17 00:00:00 2001 From: Joao Eduardo Luis Date: Wed, 25 Oct 2023 21:54:28 +0000 Subject: [PATCH 3/5] .clang-format: allow aligning consecutive macros Signed-off-by: Joao Eduardo Luis --- .clang-format | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.clang-format b/.clang-format index c2c395c6d5f00..01c62f4ace829 100644 --- a/.clang-format +++ b/.clang-format @@ -24,7 +24,7 @@ AlignConsecutiveDeclarations: AlignCompound: false PadOperators: false AlignConsecutiveMacros: - Enabled: false + Enabled: true AcrossEmptyLines: false AcrossComments: false AlignCompound: false From 49247c02cd92901916274dbe4f298a089cb33b74 Mon Sep 17 00:00:00 2001 From: Joao Eduardo Luis Date: Wed, 25 Oct 2023 22:20:05 +0000 Subject: [PATCH 4/5] rgw/sfs: make clang-tidy happy on header recursion When building a preamble, clang-tidy really dislikes header recursion without '#pragma once'. Seems like it doesn't observe the header guards. Signed-off-by: Joao Eduardo Luis --- src/rgw/driver/sfs/bucket.h | 2 ++ src/rgw/driver/sfs/object.h | 2 ++ 2 files changed, 4 insertions(+) diff --git a/src/rgw/driver/sfs/bucket.h b/src/rgw/driver/sfs/bucket.h index f970e426af499..dca868e5a1c85 100644 --- a/src/rgw/driver/sfs/bucket.h +++ b/src/rgw/driver/sfs/bucket.h @@ -14,6 +14,8 @@ #ifndef RGW_STORE_SFS_BUCKET_H #define RGW_STORE_SFS_BUCKET_H +#pragma once + #include #include #include diff --git a/src/rgw/driver/sfs/object.h b/src/rgw/driver/sfs/object.h index 3e5e66b7bcee9..3fec31632abfd 100644 --- a/src/rgw/driver/sfs/object.h +++ b/src/rgw/driver/sfs/object.h @@ -14,6 +14,8 @@ #ifndef RGW_STORE_SFS_OBJECT_H #define RGW_STORE_SFS_OBJECT_H +#pragma once + #include #include "rgw/driver/sfs/bucket.h" From 8dcd87125c45c3937e19c67aea2ddaf7f592dab4 Mon Sep 17 00:00:00 2001 From: Joao Eduardo Luis Date: Wed, 25 Oct 2023 22:38:41 +0000 Subject: [PATCH 5/5] rgw/sfs: standardize log levels Signed-off-by: Joao Eduardo Luis --- src/rgw/driver/sfs/bucket.cc | 169 ++++++++------- src/rgw/driver/sfs/multipart.cc | 307 ++++++++++++++-------------- src/rgw/driver/sfs/object.cc | 234 +++++++++++---------- src/rgw/driver/sfs/object.h | 4 +- src/rgw/driver/sfs/sfs_bucket.cc | 9 +- src/rgw/driver/sfs/sfs_gc.cc | 21 +- src/rgw/driver/sfs/sfs_lc.cc | 4 - src/rgw/driver/sfs/sfs_log.h | 79 +++++++ src/rgw/driver/sfs/sfs_user.cc | 7 +- src/rgw/driver/sfs/sqlite/dbconn.cc | 46 +++-- src/rgw/driver/sfs/user.cc | 26 +-- src/rgw/driver/sfs/writer.cc | 241 +++++++++++----------- src/rgw/rgw_sal_sfs.h | 5 - 13 files changed, 600 insertions(+), 552 deletions(-) create mode 100644 src/rgw/driver/sfs/sfs_log.h diff --git a/src/rgw/driver/sfs/bucket.cc b/src/rgw/driver/sfs/bucket.cc index 3acdda5977d97..abd4cab67b99e 100644 --- a/src/rgw/driver/sfs/bucket.cc +++ b/src/rgw/driver/sfs/bucket.cc @@ -30,6 +30,7 @@ #include "driver/sfs/multipart.h" #include "driver/sfs/object.h" #include "driver/sfs/object_state.h" +#include "driver/sfs/sfs_log.h" #include "driver/sfs/sqlite/objects/object_definitions.h" #include "driver/sfs/sqlite/sqlite_list.h" #include "driver/sfs/sqlite/sqlite_versioned_objects.h" @@ -75,7 +76,7 @@ std::unique_ptr SFSBucket::_get_object(sfs::ObjectRef obj) { } std::unique_ptr SFSBucket::get_object(const rgw_obj_key& key) { - ldout(store->ceph_context(), 10) + ldout(store->ceph_context(), SFS_LOG_DEBUG) << "bucket::" << __func__ << ": key : " << key << dendl; try { auto objref = bucket->get(key); @@ -88,7 +89,7 @@ std::unique_ptr SFSBucket::get_object(const rgw_obj_key& key) { objref->instance = key.instance; return _get_object(objref); } catch (const sfs::UnknownObjectException& _) { - ldout(store->ceph_context(), 10) + ldout(store->ceph_context(), SFS_LOG_VERBOSE) << "unable to find key " << key << " in bucket " << bucket->get_name() << dendl; // possibly a copy, return a placeholder @@ -106,7 +107,7 @@ int SFSBucket::verify_list_params( // allow unordered is a ceph extension intended to improve performance // of list() by not sorting through results from all over the cluster lsfs_dout( - dpp, 10 + dpp, SFS_LOG_VERBOSE ) << "unsupported allow unordered list requested. returning ordered result." << get_name() << dendl; @@ -117,8 +118,8 @@ int SFSBucket::verify_list_params( } } if (!params.end_marker.empty()) { - lsfs_dout(dpp, 2) << "unsupported end marker (SWIFT) requested " - << get_name() << dendl; + lsfs_verb(dpp) << "unsupported end marker (SWIFT) requested " << get_name() + << dendl; return -ENOTSUP; } if (!params.ns.empty() && params.ns != RGW_OBJ_NS_MULTIPART) { @@ -152,13 +153,12 @@ int SFSBucket::list( const DoutPrefixProvider* dpp, ListParams& params, int max, ListResults& results, optional_yield /* y */ ) { - lsfs_dout(dpp, 10) - << fmt::format( - "listing bucket {} {} {}: max:{} params:", get_name(), - params.ns == RGW_OBJ_NS_MULTIPART ? "multipart" : "", - params.list_versions ? "versions" : "objects", max - ) - << params << dendl; + lsfs_debug(dpp) << fmt::format( + "listing bucket {} {} {}: max:{} params:", get_name(), + params.ns == RGW_OBJ_NS_MULTIPART ? "multipart" : "", + params.list_versions ? "versions" : "objects", max + ) + << params << dendl; const int list_params_ok = verify_list_params(dpp, params, max); if (list_params_ok < 0) { @@ -186,15 +186,15 @@ int SFSBucket::list( e.meta.mtime = mp.mtime; results.objs.emplace_back(e); } - lsfs_dout(dpp, 10) << fmt::format( - "success (prefix:{}, start_after:{}, " - "max:{}). #objs_returned:{} " - "next:{} have_more:{}", - params.prefix, params.marker.name, max, - params.delim, results.objs.size(), - results.next_marker, results.is_truncated - ) - << dendl; + lsfs_debug(dpp) << fmt::format( + "success (prefix:{}, start_after:{}, " + "max:{}). #objs_returned:{} " + "next:{} have_more:{}", + params.prefix, params.marker.name, max, params.delim, + results.objs.size(), results.next_marker, + results.is_truncated + ) + << dendl; return 0; } @@ -231,14 +231,13 @@ int SFSBucket::list( } }(); if (!listing_succeeded) { - lsfs_dout(dpp, 10) << fmt::format( - "list (prefix:{}, start_after:{}, " - "max:{}) failed.", - params.prefix, start_with, max, - results.objs.size(), results.next_marker, - results.is_truncated - ) - << dendl; + lsfs_info(dpp) << fmt::format( + "list (prefix:{}, start_after:{}, " + "max:{}) failed.", + params.prefix, start_with, max, results.objs.size(), + results.next_marker, results.is_truncated + ) + << dendl; return -ERR_INTERNAL_ERROR; } @@ -262,14 +261,13 @@ int SFSBucket::list( list.objects(get_bucket_id(), params.prefix, query, 1, objects_after); results.is_truncated = objects_after.size() > 0; } - lsfs_dout(dpp, 10) << fmt::format( - "common prefix rollup #objs:{} -> #objs:{}, " - "#prefix:{}, more:{}", - results.objs.size(), new_results.size(), - results.common_prefixes.size(), - results.is_truncated - ) - << dendl; + lsfs_debug(dpp) << fmt::format( + "common prefix rollup #objs:{} -> #objs:{}, " + "#prefix:{}, more:{}", + results.objs.size(), new_results.size(), + results.common_prefixes.size(), results.is_truncated + ) + << dendl; results.objs = new_results; } @@ -296,7 +294,7 @@ int SFSBucket::list( } } - lsfs_dout(dpp, 10) + lsfs_debug(dpp) << fmt::format( "success (prefix:{}, start_after:{}, " "max:{} delim:{}). #objs_returned:{} " @@ -322,11 +320,11 @@ int SFSBucket::remove_bucket( auto res = sfs::SFSMultipartUploadV2::abort_multiparts(dpp, store, this); if (res < 0) { - lsfs_dout(dpp, -1) << fmt::format( - "unable to abort multiparts on bucket {}: {}", - get_name(), res - ) - << dendl; + lsfs_err(dpp) << fmt::format( + "unable to abort multiparts on bucket {}: {}", + get_name(), res + ) + << dendl; if (res == -ERR_NO_SUCH_BUCKET) { return -ENOENT; } @@ -337,8 +335,7 @@ int SFSBucket::remove_bucket( sfs::sqlite::SQLiteBuckets db_buckets(store->db_conn); auto db_bucket = db_buckets.get_bucket(get_bucket_id()); if (!db_bucket.has_value()) { - ldpp_dout(dpp, 1) << __func__ << ": Bucket metadata was not found.." - << dendl; + lsfs_verb(dpp) << __func__ << ": Bucket metadata was not found." << dendl; return -ENOENT; } db_bucket->deleted = true; @@ -352,7 +349,7 @@ int SFSBucket::remove_bucket_bypass_gc( optional_yield /*y*/, const DoutPrefixProvider* dpp ) { /** Remove this bucket, bypassing garbage collection. May be removed */ - ldpp_dout(dpp, 10) << __func__ << ": TODO" << dendl; + lsfs_warn(dpp) << __func__ << ": TODO" << dendl; return -ENOTSUP; } @@ -383,12 +380,12 @@ int SFSBucket::set_acl( int SFSBucket::chown( const DoutPrefixProvider* dpp, User& /*new_user*/, optional_yield /*y*/ ) { - ldpp_dout(dpp, 10) << __func__ << ": TODO" << dendl; + lsfs_warn(dpp) << __func__ << ": TODO" << dendl; return -ENOTSUP; } bool SFSBucket::is_owner(User* /*user*/) { - ldout(store->ceph_context(), 10) << __func__ << ": TODO" << dendl; + ldout(store->ceph_context(), SFS_LOG_WARN) << __func__ << ": TODO" << dendl; return true; } @@ -398,7 +395,7 @@ int SFSBucket:: // check if there are still objects owned by the bucket sfs::sqlite::SQLiteBuckets db_buckets(store->db_conn); if (!db_buckets.bucket_empty(get_bucket_id())) { - ldpp_dout(dpp, -1) << __func__ << ": Bucket Not Empty.." << dendl; + lsfs_debug(dpp) << __func__ << ": Bucket Not Empty." << dendl; return -ENOTEMPTY; } return 0; @@ -456,7 +453,7 @@ std::unique_ptr SFSBucket::get_multipart_upload( const std::string& with_oid, std::optional with_upload_id, ACLOwner with_owner, ceph::real_time with_mtime ) { - ldout(store->ceph_context(), 10) + ldout(store->ceph_context(), SFS_LOG_DEBUG) << "bucket::" << __func__ << ": oid: " << with_oid << ", upload id: " << with_upload_id << dendl; @@ -470,7 +467,7 @@ std::unique_ptr SFSBucket::get_multipart_upload( try_resolve_mp_from_oid( store->db_conn, with_oid, next_oid, next_upload_id )) { - ldout(store->ceph_context(), 20) + ldout(store->ceph_context(), SFS_LOG_DEBUG) << fmt::format( "called without upload_id. resolved oid {} to MP oid:{} " "upload:{}", @@ -511,12 +508,11 @@ int SFSBucket::list_multiparts( std::vector>& uploads, std::map* common_prefixes, bool* is_truncated ) { - lsfs_dout(dpp, 10) - << fmt::format( - "prefix: {}, marker: {}, delim: {}, max_uploads: {}", prefix, - marker, delim, max_uploads - ) - << dendl; + lsfs_debug(dpp) << fmt::format( + "prefix: {}, marker: {}, delim: {}, max_uploads: {}", + prefix, marker, delim, max_uploads + ) + << dendl; return sfs::SFSMultipartUploadV2::list_multiparts( dpp, store, this, bucket, prefix, marker, delim, max_uploads, uploads, @@ -527,8 +523,7 @@ int SFSBucket::list_multiparts( int SFSBucket::abort_multiparts( const DoutPrefixProvider* dpp, CephContext* /*cct*/ ) { - lsfs_dout( - dpp, 10 + lsfs_debug(dpp ) << fmt::format("aborting multipart uploads on bucket {}", get_name()) << dendl; return sfs::SFSMultipartUploadV2::abort_multiparts(dpp, store, this); @@ -537,7 +532,7 @@ int SFSBucket::abort_multiparts( int SFSBucket::try_refresh_info( const DoutPrefixProvider* dpp, ceph::real_time* /*pmtime*/ ) { - ldpp_dout(dpp, 10) << __func__ << ": TODO" << dendl; + lsfs_warn(dpp) << __func__ << ": TODO" << dendl; return -ENOTSUP; } @@ -547,19 +542,19 @@ int SFSBucket::read_usage( RGWUsageIter& /*usage_iter*/, std::map& /*usage*/ ) { - ldpp_dout(dpp, 10) << __func__ << ": TODO" << dendl; + lsfs_warn(dpp) << __func__ << ": TODO" << dendl; return -ENOTSUP; } int SFSBucket::trim_usage( const DoutPrefixProvider* dpp, uint64_t /*start_epoch*/, uint64_t /*end_epoch*/ ) { - ldpp_dout(dpp, 10) << __func__ << ": TODO" << dendl; + lsfs_warn(dpp) << __func__ << ": TODO" << dendl; return -ENOTSUP; } int SFSBucket::rebuild_index(const DoutPrefixProvider* dpp) { - ldpp_dout(dpp, 10) << __func__ << ": TODO" << dendl; + lsfs_warn(dpp) << __func__ << ": TODO" << dendl; return -ENOTSUP; } @@ -567,13 +562,13 @@ int SFSBucket::check_quota( const DoutPrefixProvider* dpp, RGWQuota& quota, uint64_t obj_size, optional_yield /*y*/, bool /*check_size_only*/ ) { - ldpp_dout(dpp, 10) << __func__ - << ": user(max size: " << quota.user_quota.max_size - << ", max objs: " << quota.user_quota.max_objects - << "), bucket(max size: " << quota.bucket_quota.max_size - << ", max objs: " << quota.bucket_quota.max_objects - << "), obj size: " << obj_size << dendl; - ldpp_dout(dpp, 10) << __func__ << ": not implemented, return okay." << dendl; + lsfs_debug(dpp) << __func__ + << ": user(max size: " << quota.user_quota.max_size + << ", max objs: " << quota.user_quota.max_objects + << "), bucket(max size: " << quota.bucket_quota.max_size + << ", max objs: " << quota.bucket_quota.max_objects + << "), obj size: " << obj_size << dendl; + lsfs_warn(dpp) << __func__ << ": not implemented, return okay." << dendl; return 0; } @@ -601,36 +596,36 @@ int SFSBucket::sync_user_stats( } int SFSBucket::update_container_stats(const DoutPrefixProvider* dpp) { - lsfs_dout(dpp, 10) << fmt::format( - "update bucket {} (id {}) stats", get_name(), - get_bucket_id() - ) - << dendl; + lsfs_debug(dpp) << fmt::format( + "update bucket {} (id {}) stats", get_name(), + get_bucket_id() + ) + << dendl; sfs::sqlite::SQLiteBuckets bucketdb(store->db_conn); auto stats = bucketdb.get_stats(get_bucket_id()); if (!stats.has_value()) { - lsfs_dout(dpp, 10) << fmt::format( - "unable to obtain stats for bucket {} (id {}) -- " - "no such bucket!", - get_name(), get_bucket_id() - ) - << dendl; + lsfs_verb(dpp) << fmt::format( + "unable to obtain stats for bucket {} (id {}) -- " + "no such bucket!", + get_name(), get_bucket_id() + ) + << dendl; return -ERR_NO_SUCH_BUCKET; } - lsfs_dout(dpp, 10) << fmt::format( - "bucket {} stats: size: {}, obj_cnt: {}", - get_name(), stats->size, stats->obj_count - ) - << dendl; + lsfs_debug(dpp) << fmt::format( + "bucket {} stats: size: {}, obj_cnt: {}", get_name(), + stats->size, stats->obj_count + ) + << dendl; ent.size = ent.size_rounded = stats->size; ent.count = stats->obj_count; return 0; } int SFSBucket::check_bucket_shards(const DoutPrefixProvider* dpp) { - ldpp_dout(dpp, 10) << __func__ << ": TODO" << dendl; + lsfs_warn(dpp) << __func__ << ": TODO" << dendl; return -ENOTSUP; } int SFSBucket::put_info( diff --git a/src/rgw/driver/sfs/multipart.cc b/src/rgw/driver/sfs/multipart.cc index 2e9d2ecbd7cac..c6a9f396943ce 100644 --- a/src/rgw/driver/sfs/multipart.cc +++ b/src/rgw/driver/sfs/multipart.cc @@ -22,6 +22,7 @@ #include "rgw/driver/sfs/fmt.h" #include "rgw/driver/sfs/multipart_types.h" +#include "rgw/driver/sfs/sfs_log.h" #include "rgw/driver/sfs/sqlite/buckets/multipart_definitions.h" #include "rgw_obj_manifest.h" #include "rgw_sal_sfs.h" @@ -74,20 +75,19 @@ int SFSMultipartUploadV2::init( const DoutPrefixProvider* dpp, optional_yield /*y*/, ACLOwner& acl_owner, rgw_placement_rule& dest_placement, rgw::sal::Attrs& attrs ) { - lsfs_dout(dpp, 10) << "upload_id: " << upload_id << ", oid: " << get_key() - << ", meta: " << meta_str - << ", owner: " << acl_owner.get_display_name() - << ", attrs: " << attrs << dendl; + lsfs_debug(dpp) << "upload_id: " << upload_id << ", oid: " << get_key() + << ", meta: " << meta_str + << ", owner: " << acl_owner.get_display_name() + << ", attrs: " << attrs << dendl; sfs::sqlite::SQLiteMultipart mpdb(store->db_conn); auto mp = mpdb.get_multipart(upload_id); if (mp.has_value()) { - lsfs_dout(dpp, -1) - << fmt::format( - "BUG: upload already exists, upload_id: {}, oid: {}", upload_id, - get_key() - ) - << dendl; + lsfs_err(dpp) << fmt::format( + "BUG: upload already exists, upload_id: {}, oid: {}", + upload_id, get_key() + ) + << dendl; // upload exists and has been initiated, return error. return -ERR_INTERNAL_ERROR; } @@ -116,7 +116,7 @@ int SFSMultipartUploadV2::init( auto id = mpdb.insert(mpop); ceph_assert(id > 0); } catch (std::system_error& e) { - lsfs_dout(dpp, -1) + lsfs_err(dpp) << fmt::format( "BUG: upload already exists, raced! upload_id: {}, oid: {}", upload_id, get_key() @@ -124,13 +124,12 @@ int SFSMultipartUploadV2::init( << dendl; return -ERR_INTERNAL_ERROR; } - lsfs_dout(dpp, 10) - << fmt::format( - "created multipart upload_id: {}, oid: {}, owner: {}", upload_id, - get_key(), acl_owner.get_display_name() - ) - << dendl; - lsfs_dout(dpp, 10) << "attrs: " << attrs << dendl; + lsfs_debug(dpp) << fmt::format( + "created multipart upload_id: {}, oid: {}, owner: {}", + upload_id, get_key(), acl_owner.get_display_name() + ) + << dendl; + lsfs_trace(dpp) << "attrs: " << attrs << dendl; // set placement in case we get an info request placement = dest_placement; @@ -141,8 +140,8 @@ int SFSMultipartUploadV2::list_parts( const DoutPrefixProvider* dpp, CephContext* /*cct*/, int num_parts, int marker, int* next_marker, bool* truncated, bool /*assume_unsorted*/ ) { - lsfs_dout(dpp, 10) << "num_parts: " << num_parts << ", marker: " << marker - << dendl; + lsfs_debug(dpp) << "num_parts: " << num_parts << ", marker: " << marker + << dendl; ceph_assert(marker >= 0); ceph_assert(num_parts >= 0); @@ -165,13 +164,13 @@ int SFSMultipartUploadV2::list_parts( int SFSMultipartUploadV2::abort( const DoutPrefixProvider* dpp, CephContext* /*cct*/ ) { - lsfs_dout(dpp, 10) << "upload_id: " << upload_id << dendl; + lsfs_debug(dpp) << "upload_id: " << upload_id << dendl; sfs::sqlite::SQLiteMultipart mpdb(store->db_conn); auto res = mpdb.abort(upload_id); - lsfs_dout(dpp, 10) << "upload_id: " << upload_id << ", aborted: " << res - << dendl; + lsfs_debug(dpp) << "upload_id: " << upload_id << ", aborted: " << res + << dendl; return (res ? 0 : -ERR_NO_SUCH_UPLOAD); } @@ -184,29 +183,29 @@ int SFSMultipartUploadV2::complete( std::string& tag, ACLOwner& acl_owner, uint64_t olh_epoch, rgw::sal::Object* target_obj ) { - lsfs_dout(dpp, 10) << fmt::format( - "upload_id: {}, accounted_size: {}, tag: {}, " - "owner: {}, olh_epoch: {}" - ", target_obj: {}", - upload_id, accounted_size, tag, - acl_owner.get_display_name(), olh_epoch, - target_obj->get_key() - ) - << dendl; - lsfs_dout(dpp, 10) << "part_etags: " << part_etags << dendl; + lsfs_debug(dpp) << fmt::format( + "upload_id: {}, accounted_size: {}, tag: {}, " + "owner: {}, olh_epoch: {}" + ", target_obj: {}", + upload_id, accounted_size, tag, + acl_owner.get_display_name(), olh_epoch, + target_obj->get_key() + ) + << dendl; + lsfs_debug(dpp) << "part_etags: " << part_etags << dendl; sfs::sqlite::SQLiteMultipart mpdb(store->db_conn); bool duplicate = false; auto res = mpdb.mark_complete(upload_id, &duplicate); if (!res) { - lsfs_dout(dpp, 10) << fmt::format( - "unable to find on-going multipart upload id {}", - upload_id - ) - << dendl; + lsfs_verb(dpp) << fmt::format( + "unable to find on-going multipart upload id {}", + upload_id + ) + << dendl; return -ERR_NO_SUCH_UPLOAD; } else if (duplicate) { - lsfs_dout(dpp, 10) + lsfs_debug(dpp) << fmt::format( "multipart id {} already completed, returning success!", upload_id @@ -248,23 +247,20 @@ int SFSMultipartUploadV2::complete( auto p = parts_map.find(k); if (p == parts_map.end()) { - lsfs_dout(dpp, 1) << fmt::format( - "client-specified part {} does not exist!", k - ) - << dendl; + lsfs_verb(dpp + ) << fmt::format("client-specified part {} does not exist!", k) + << dendl; return -ERR_INVALID_PART; } auto part = p->second; if (!part.is_finished()) { - lsfs_dout( - dpp, 1 + lsfs_verb(dpp ) << fmt::format("client-specified part {} is not finished yet!", k) << dendl; return -ERR_INVALID_PART; } else if (!part.etag.has_value()) { - lsfs_dout( - dpp, -1 + lsfs_err(dpp ) << fmt::format("BUG: Part {} is finished and should have an etag!", k) << dendl; return -ERR_INTERNAL_ERROR; @@ -274,7 +270,7 @@ int SFSMultipartUploadV2::complete( auto part_etag = part.etag.value(); auto etag = rgw_string_unquote(v); if (part_etag != etag) { - lsfs_dout(dpp, 1) + lsfs_info(dpp) << fmt::format( "client-specified part {} etag mismatch; expected {}, got {}", k, part_etag, etag @@ -286,10 +282,9 @@ int SFSMultipartUploadV2::complete( if ((part.size < 5 * 1024 * 1024) && (std::distance(it, part_etags.cend()) > 1)) { - lsfs_dout(dpp, 1) << fmt::format( - "part {} is too small and not the last part!", k - ) - << dendl; + lsfs_debug(dpp + ) << fmt::format("part {} is too small and not the last part!", k) + << dendl; return -ERR_TOO_SMALL; } @@ -297,25 +292,24 @@ int SFSMultipartUploadV2::complete( } if (store->filesystem_stats_avail_bytes.load() < expected_size) { - lsfs_dout(dpp, -1) << fmt::format( - "filesystem stat reservation check hit. " - "avail_bytes: {}, avail_pct: " - "{}, total_bytes: {}, expected size: {}", - store->filesystem_stats_avail_bytes, - store->filesystem_stats_avail_percent, - store->filesystem_stats_total_bytes, expected_size - ) - << dendl; + lsfs_err(dpp) << fmt::format( + "filesystem stat reservation check hit. " + "avail_bytes: {}, avail_pct: " + "{}, total_bytes: {}, expected size: {}", + store->filesystem_stats_avail_bytes, + store->filesystem_stats_avail_percent, + store->filesystem_stats_total_bytes, expected_size + ) + << dendl; return -ERR_QUOTA_EXCEEDED; } // calculate final etag std::string etag = fmt::format("{}-{}", hash.final(), part_etags.size()); - lsfs_dout(dpp, 10) << fmt::format( - "upload_id: {}, final etag: {}", upload_id, etag - ) - << dendl; + lsfs_debug(dpp + ) << fmt::format("upload_id: {}, final etag: {}", upload_id, etag) + << dendl; // start aggregating final object res = mpdb.mark_aggregating(upload_id); @@ -329,7 +323,7 @@ int SFSMultipartUploadV2::complete( std::error_code ec; std::filesystem::create_directories(objpath.parent_path(), ec); if (ec) { - lsfs_dout(dpp, -1) + lsfs_err(dpp) << fmt::format( "failed to create directories for temp mp object {}: {}", objpath, ec.message() @@ -339,11 +333,11 @@ int SFSMultipartUploadV2::complete( } int objfd = ::open(objpath.c_str(), O_WRONLY | O_BINARY | O_CREAT, 0600); if (objfd < 0) { - lsfs_dout(dpp, -1) << fmt::format( - "unable to open object file {} to write: {}", - objpath, cpp_strerror(errno) - ) - << dendl; + lsfs_err(dpp) << fmt::format( + "unable to open object file {} to write: {}", objpath, + cpp_strerror(errno) + ) + << dendl; return -ERR_INTERNAL_ERROR; } @@ -356,27 +350,27 @@ int SFSMultipartUploadV2::complete( ceph_assert(std::filesystem::exists(path)); auto partsize = std::filesystem::file_size(path); if (partsize != part.size) { - lsfs_dout(dpp, 1) << fmt::format( - "part size mismatch, expected {}, found: {}", - part.size, partsize - ) - << dendl; + lsfs_info(dpp) << fmt::format( + "part size mismatch, expected {}, found: {}", + part.size, partsize + ) + << dendl; return -ERR_INVALID_PART; } int partfd = ::open(path.c_str(), O_RDONLY | O_BINARY); if (partfd < 0) { - lsfs_dout(dpp, -1) << fmt::format( - "unable to open part file {} for reading: {}", - path, cpp_strerror(errno) - ) - << dendl; + lsfs_err(dpp) << fmt::format( + "unable to open part file {} for reading: {}", path, + cpp_strerror(errno) + ) + << dendl; return -ERR_INTERNAL_ERROR; } int ret = ::copy_file_range(partfd, NULL, objfd, NULL, partsize, 0); if (ret < 0) { // this is an unexpected error, we don't know how to recover from it. - lsfs_dout(dpp, -1) + lsfs_err(dpp) << fmt::format( "unable to copy part {} (fd {}) to object file {} (fd {}): {}", part.part_num, partfd, objpath, objfd, cpp_strerror(errno) @@ -387,44 +381,44 @@ int SFSMultipartUploadV2::complete( accounted_bytes += partsize; ret = ::fsync(objfd); if (ret < 0) { - lsfs_dout(dpp, -1) << fmt::format( - "failed fsync fd: {}, on obj file: {}: {}", - objfd, objpath, cpp_strerror(ret) - ) - << dendl; + lsfs_err(dpp) << fmt::format( + "failed fsync fd: {}, on obj file: {}: {}", objfd, + objpath, cpp_strerror(ret) + ) + << dendl; ceph_abort_msg("Unexpected error fsync'ing obj path"); } ret = ::close(partfd); if (ret < 0) { - lsfs_dout(dpp, -1) << fmt::format( - "failed closing fd: {}, on part file: {}: {}", - partfd, path, cpp_strerror(ret) - ) - << dendl; + lsfs_err(dpp) << fmt::format( + "failed closing fd: {}, on part file: {}: {}", + partfd, path, cpp_strerror(ret) + ) + << dendl; ceph_abort_msg("Unexpected error on closing part path"); } } int ret = ::close(objfd); if (ret < 0) { - lsfs_dout(dpp, -1) << fmt::format( - "failed closing fd: {}, on obj file: {}: {}", - objfd, objpath, cpp_strerror(ret) - ) - << dendl; + lsfs_err(dpp) << fmt::format( + "failed closing fd: {}, on obj file: {}: {}", objfd, + objpath, cpp_strerror(ret) + ) + << dendl; } auto final_obj_size = std::filesystem::file_size(objpath); if (accounted_bytes != final_obj_size) { // this is an unexpected error, probably a bug - die. - lsfs_dout(dpp, -1) << fmt::format( - "BUG: expected {} bytes, found {} bytes", - accounted_bytes, final_obj_size - ) - << dendl; + lsfs_err(dpp) << fmt::format( + "BUG: expected {} bytes, found {} bytes", + accounted_bytes, final_obj_size + ) + << dendl; ceph_abort_msg("BUG: on final object for multipart upload!"); } - lsfs_dout(dpp, 10) + lsfs_debug(dpp) << fmt::format( "finished building final object file at {}, size: {}, etag: {}", objpath, final_obj_size, etag @@ -441,7 +435,7 @@ int SFSMultipartUploadV2::complete( try { objref = bucketref->create_version(target_obj->get_key()); } catch (const std::system_error& e) { - lsfs_dout(dpp, -1) + lsfs_err(dpp) << fmt::format( "error while fetching obj ref from bucket: {}, oid: {}: {}", bucketref->get_bucket_id(), mp->object_name, e.what() @@ -450,14 +444,13 @@ int SFSMultipartUploadV2::complete( return -ERR_INTERNAL_ERROR; } auto destpath = store->get_data_path() / objref->get_storage_path(); - lsfs_dout( - dpp, 10 + lsfs_debug(dpp ) << fmt::format("moving final object from {} to {}", objpath, destpath) << dendl; std::filesystem::create_directories(destpath.parent_path(), ec); if (ec) { - lsfs_dout(dpp, -1) + lsfs_debug(dpp) << fmt::format( "failed to create directories for destination object {}: {}", destpath, ec.message() @@ -469,11 +462,11 @@ int SFSMultipartUploadV2::complete( ec.clear(); ret = ::rename(objpath.c_str(), destpath.c_str()); if (ret < 0) { - lsfs_dout(dpp, -1) << fmt::format( - "failed to rename object file from {} to {}: {}", - objpath, destpath, cpp_strerror(errno) - ) - << dendl; + lsfs_err(dpp) << fmt::format( + "failed to rename object file from {} to {}: {}", + objpath, destpath, cpp_strerror(errno) + ) + << dendl; return -ERR_INTERNAL_ERROR; } @@ -508,11 +501,11 @@ int SFSMultipartUploadV2::complete( try { objref->metadata_finish(store, bucketref->get_info().versioning_enabled()); } catch (const std::system_error& e) { - lsfs_dout(dpp, -1) << fmt::format( - "failed to update db object {}: {}", objref->name, - e.what() - ) - << dendl; + lsfs_err(dpp) << fmt::format( + "failed to update db object {}: {}", objref->name, + e.what() + ) + << dendl; return -ERR_INTERNAL_ERROR; } @@ -527,27 +520,23 @@ int SFSMultipartUploadV2::get_info( const DoutPrefixProvider* dpp, optional_yield /*y*/, rgw_placement_rule** rule, rgw::sal::Attrs* attrs ) { - lsfs_dout(dpp, 10) << fmt::format( - "upload_id: {}, obj: {}", upload_id, get_key() - ) - << dendl; + lsfs_debug(dpp) << fmt::format("upload_id: {}, obj: {}", upload_id, get_key()) + << dendl; sfs::sqlite::SQLiteMultipart mpdb(store->db_conn); auto mp = mpdb.get_multipart(upload_id); if (!mp.has_value()) { - lsfs_dout(dpp, 10) << fmt::format( - "unable to find upload_id: {} in db", upload_id - ) - << dendl; + lsfs_debug(dpp + ) << fmt::format("unable to find upload_id: {} in db", upload_id) + << dendl; return -ERR_NO_SUCH_UPLOAD; } if (mp->state != MultipartState::INIT && mp->state != MultipartState::INPROGRESS) { - lsfs_dout(dpp, 10) << fmt::format( - "upload id {} not in available state", upload_id - ) - << dendl; + lsfs_debug(dpp + ) << fmt::format("upload id {} not in available state", upload_id) + << dendl; return -ERR_NO_SUCH_UPLOAD; } @@ -575,12 +564,12 @@ std::unique_ptr SFSMultipartUploadV2::get_writer( ceph_assert(part_num <= 10000); uint32_t pnum = static_cast(part_num); - lsfs_dout(dpp, 10) - << fmt::format( - "head_obj: {}, owner: {}, upload_id: {}, part_num: {}", - head_obj->get_key().name, writer_owner.id, upload_id, pnum - ) - << dendl; + lsfs_debug(dpp) << fmt::format( + "head_obj: {}, owner: {}, upload_id: {}, part_num: {}", + head_obj->get_key().name, writer_owner.id, upload_id, + pnum + ) + << dendl; return std::make_unique(dpp, y, upload_id, store, pnum); } @@ -594,7 +583,7 @@ int SFSMultipartUploadV2::list_multiparts( ) { auto cls = SFSMultipartUploadV2::get_cls_name(); auto bucket_name = bucket->get_name(); - lsfs_dout_for(dpp, 10, cls) + lsfs_debug_for(dpp, cls) << fmt::format( "bucket: {}, prefix: {}, marker: {}, delim: {}, max_uploads: {}", bucket_name, prefix, marker, delim, max_uploads @@ -606,12 +595,12 @@ int SFSMultipartUploadV2::list_multiparts( bucket_name, prefix, marker, delim, max_uploads, is_truncated ); if (!entries.has_value()) { - lsfs_dout_for(dpp, -1, cls) << fmt::format( - "unable to find multipart uploads for " - "bucket {} -- bucket not found!", - bucket_name - ) - << dendl; + lsfs_verb_for(dpp, cls) << fmt::format( + "unable to find multipart uploads for " + "bucket {} -- bucket not found!", + bucket_name + ) + << dendl; return -ERR_NO_SUCH_BUCKET; } @@ -621,15 +610,17 @@ int SFSMultipartUploadV2::list_multiparts( store, bucket, bucketref, entry.upload_id, entry.object_name, entry.owner_id, entry.mtime )); - lsfs_dout_for(dpp, 10, cls) + lsfs_debug_for(dpp, cls) << fmt::format( "found multipart upload id: {}, bucket: {}, obj: {}", entry.upload_id, bucket->get_key().name, entry.object_name ) << dendl; } - lsfs_dout_for(dpp, 10, cls) - << fmt::format("found {} multipart uploads", uploads.size()) << dendl; + lsfs_debug_for(dpp, cls) << fmt::format( + "found {} multipart uploads", uploads.size() + ) + << dendl; return 0; } @@ -639,26 +630,24 @@ int SFSMultipartUploadV2::abort_multiparts( ) { auto cls = SFSMultipartUploadV2::get_cls_name(); auto bucket_name = bucket->get_name(); - lsfs_dout_for(dpp, 10, cls) - << fmt::format("bucket: {}", bucket_name) << dendl; + lsfs_debug_for(dpp, cls) << fmt::format("bucket: {}", bucket_name) << dendl; sqlite::SQLiteMultipart mpdb(store->db_conn); auto num_aborted = mpdb.abort_multiparts(bucket_name); if (num_aborted < 0) { - lsfs_dout_for(dpp, -1, cls) << fmt::format( - "error aborting multipart uploads on " - "bucket {} -- bucket not found!", - bucket_name - ) - << dendl; + lsfs_verb_for(dpp, cls) << fmt::format( + "error aborting multipart uploads on " + "bucket {} -- bucket not found!", + bucket_name + ) + << dendl; return -ERR_NO_SUCH_BUCKET; } - lsfs_dout_for(dpp, 10, cls) - << fmt::format( - "aborted {} multipart uploads on bucket {}", num_aborted, - bucket_name - ) - << dendl; + lsfs_debug_for(dpp, cls) << fmt::format( + "aborted {} multipart uploads on bucket {}", + num_aborted, bucket_name + ) + << dendl; return 0; } diff --git a/src/rgw/driver/sfs/object.cc b/src/rgw/driver/sfs/object.cc index 48d914580d3f7..6a55441622c4e 100644 --- a/src/rgw/driver/sfs/object.cc +++ b/src/rgw/driver/sfs/object.cc @@ -17,6 +17,7 @@ #include #include "driver/sfs/multipart.h" +#include "driver/sfs/sfs_log.h" #include "driver/sfs/sqlite/sqlite_versioned_objects.h" #include "driver/sfs/types.h" #include "rgw_common.h" @@ -58,26 +59,26 @@ int SFSObject::SFSReadOp::handle_conditionals(const DoutPrefixProvider* dpp if (params.if_match) { const std::string match = rgw_string_unquote(params.if_match); result = (etag == match) ? 0 : -ERR_PRECONDITION_FAILED; - ldpp_dout(dpp, 10) << fmt::format( - "If-Match: etag={} vs. ifmatch={}: {}", etag, - match, result - ) - << dendl; + lsfs_debug(dpp) << fmt::format( + "If-Match: etag={} vs. ifmatch={}: {}", etag, match, + result + ) + << dendl; } if (params.if_nomatch) { const std::string match = rgw_string_unquote(params.if_nomatch); result = (etag == match) ? -ERR_NOT_MODIFIED : 0; - ldpp_dout(dpp, 10) << fmt::format( - "If-None-Match: etag={} vs. ifmatch={}: {}", etag, - match, result - ) - << dendl; + lsfs_debug(dpp) << fmt::format( + "If-None-Match: etag={} vs. ifmatch={}: {}", etag, + match, result + ) + << dendl; } // RFC 7232 3.3. A recipient MUST ignore If-Modified-Since if the // request contains an If-None-Match header field if (params.mod_ptr && !params.if_nomatch) { result = (mtime > *params.mod_ptr) ? 0 : -ERR_NOT_MODIFIED; - ldpp_dout(dpp, 10) + lsfs_debug(dpp) << fmt::format( "If-Modified-Since: mtime={:%Y-%m-%d %H:%M:%S} vs. " "if_time={:%Y-%m-%d %H:%M:%S}: {}", @@ -90,7 +91,7 @@ int SFSObject::SFSReadOp::handle_conditionals(const DoutPrefixProvider* dpp // request contains an If-Match header field if (params.unmod_ptr && !params.if_match) { result = (mtime < *params.unmod_ptr) ? 0 : -ERR_PRECONDITION_FAILED; - ldpp_dout(dpp, 10) + lsfs_debug(dpp) << fmt::format( "If-UnModified-Since: mtime={:%Y-%m-%d %H:%M:%S} vs. " "if_time={:%Y-%m-%d %H:%M:%S}: {}", @@ -100,7 +101,7 @@ int SFSObject::SFSReadOp::handle_conditionals(const DoutPrefixProvider* dpp ) << dendl; } - ldpp_dout(dpp, 10) + lsfs_debug(dpp) << fmt::format( "Conditional GET (Match/NoneMatch/Mod/UnMod) ({}, {}): {}", params.if_match != nullptr, params.if_nomatch != nullptr, @@ -121,11 +122,11 @@ int SFSObject::SFSReadOp::prepare( objdata = source->store->get_data_path() / objref->get_storage_path(); if (!std::filesystem::exists(objdata)) { - lsfs_dout(dpp, 10) << "object data not found at " << objdata << dendl; + lsfs_verb(dpp) << "object data not found at " << objdata << dendl; return -ENOENT; } - lsfs_dout(dpp, 10) + lsfs_debug(dpp) << fmt::format( "bucket:{} obj:{} size:{} versionid:{} " "conditionals:(ifmatch:{} ifnomatch:{} ifmod:{} ifunmod:{})", @@ -162,19 +163,18 @@ int SFSObject::SFSReadOp::read( ) { // TODO bounds check, etc. const auto len = end + 1 - ofs; - lsfs_dout(dpp, 10) << "bucket: " << source->bucket->get_name() - << ", obj: " << source->get_name() - << ", size: " << source->get_obj_size() - << ", offset: " << ofs << ", end: " << end - << ", len: " << len << dendl; + lsfs_debug(dpp) << "bucket: " << source->bucket->get_name() + << ", obj: " << source->get_name() + << ", size: " << source->get_obj_size() << ", offset: " << ofs + << ", end: " << end << ", len: " << len << dendl; ceph_assert(std::filesystem::exists(objdata)); std::string error; int ret = bl.pread_file(objdata.c_str(), ofs, len, &error); if (ret < 0) { - lsfs_dout(dpp, 10) << "failed to read object from file " << objdata - << ". Returning EIO." << dendl; + lsfs_err(dpp) << "failed to read object from file " << objdata + << ". Returning EIO." << dendl; return -EIO; } return len; @@ -187,11 +187,10 @@ int SFSObject::SFSReadOp::iterate( ) { // TODO bounds check, etc. const auto len = end + 1 - ofs; - lsfs_dout(dpp, 10) << "bucket: " << source->bucket->get_name() - << ", obj: " << source->get_name() - << ", size: " << source->get_obj_size() - << ", offset: " << ofs << ", end: " << end - << ", len: " << len << dendl; + lsfs_debug(dpp) << "bucket: " << source->bucket->get_name() + << ", obj: " << source->get_name() + << ", size: " << source->get_obj_size() << ", offset: " << ofs + << ", end: " << end << ", len: " << len << dendl; ceph_assert(std::filesystem::exists(objdata)); std::string error; @@ -203,17 +202,17 @@ int SFSObject::SFSReadOp::iterate( bufferlist bl; int ret = bl.pread_file(objdata.c_str(), ofs, size, &error); if (ret < 0) { - lsfs_dout(dpp, 0) << "failed to read object from file '" << objdata - << ", offset: " << ofs << ", size: " << size << ": " - << error << dendl; + lsfs_err(dpp) << "failed to read object from file '" << objdata + << ", offset: " << ofs << ", size: " << size << ": " + << error << dendl; return -EIO; } missing -= size; - lsfs_dout(dpp, 10) << "return " << size << "/" << len << ", offset: " << ofs - << ", missing: " << missing << dendl; + lsfs_debug(dpp) << "return " << size << "/" << len << ", offset: " << ofs + << ", missing: " << missing << dendl; ret = cb->handle_data(bl, 0, size); if (ret < 0) { - lsfs_dout(dpp, 0) << "failed to return object data: " << ret << dendl; + lsfs_warn(dpp) << "failed to return object data: " << ret << dendl; return -EIO; } @@ -230,11 +229,11 @@ SFSObject::SFSDeleteOp::SFSDeleteOp( int SFSObject::SFSDeleteOp::delete_obj( const DoutPrefixProvider* dpp, optional_yield /*y*/ ) { - lsfs_dout(dpp, 10) << "bucket: " << source->bucket->get_name() - << " bucket versioning: " - << source->bucket->versioning_enabled() - << ", object: " << source->get_name() - << ", instance: " << source->get_instance() << dendl; + lsfs_debug(dpp) << "bucket: " << source->bucket->get_name() + << " bucket versioning: " + << source->bucket->versioning_enabled() + << ", object: " << source->get_name() + << ", instance: " << source->get_instance() << dendl; // do the quick and dirty thing for now ceph_assert(bucketref); @@ -275,7 +274,7 @@ int SFSObject::SFSDeleteOp::delete_obj( int SFSObject::delete_object( const DoutPrefixProvider* dpp, optional_yield y, bool prevent_versioning ) { - lsfs_dout(dpp, 10) << "prevent_versioning: " << prevent_versioning << dendl; + lsfs_debug(dpp) << "prevent_versioning: " << prevent_versioning << dendl; auto ref = store->get_bucket_ref(get_bucket()->get_name()); SFSObject::SFSDeleteOp del(this, ref); return del.delete_obj(dpp, y); @@ -296,14 +295,14 @@ int SFSObject::copy_object( , const DoutPrefixProvider* dpp, optional_yield /*y*/ ) { - lsfs_dout(dpp, 10) << fmt::format( - "bucket:{} obj:{} version:{} size:{} -> bucket:{} " - "obj:{} version:{}", - src_bucket->get_name(), get_name(), get_instance(), - get_obj_size(), dst_bucket->get_name(), - dst_object->get_name(), dst_object->get_instance() - ) - << dendl; + lsfs_debug(dpp) << fmt::format( + "bucket:{} obj:{} version:{} size:{} -> bucket:{} " + "obj:{} version:{}", + src_bucket->get_name(), get_name(), get_instance(), + get_obj_size(), dst_bucket->get_name(), + dst_object->get_name(), dst_object->get_instance() + ) + << dendl; refresh_meta(); ceph_assert(objref); @@ -328,12 +327,11 @@ int SFSObject::copy_object( const int src_fd = ::open(srcpath.c_str(), O_RDONLY | O_BINARY); if (src_fd < 0) { - lsfs_dout(dpp, -1) - << fmt::format( - "unable to open src obj {} file {} for reading: {}", - objref->name, srcpath.string(), cpp_strerror(errno) - ) - << dendl; + lsfs_err(dpp) << fmt::format( + "unable to open src obj {} file {} for reading: {}", + objref->name, srcpath.string(), cpp_strerror(errno) + ) + << dendl; return -ERR_INTERNAL_ERROR; } @@ -348,12 +346,12 @@ int SFSObject::copy_object( std::error_code ec; std::filesystem::create_directories(dstpath.parent_path(), ec); if (ec) { - lsfs_dout(dpp, -1) - << fmt::format( - "failed to create directory hierarchy {} for {}: {}", - dstpath.parent_path().string(), dstref->name, ec.message() - ) - << dendl; + lsfs_err(dpp) << fmt::format( + "failed to create directory hierarchy {} for {}: {}", + dstpath.parent_path().string(), dstref->name, + ec.message() + ) + << dendl; ::close(src_fd); return -ERR_INTERNAL_ERROR; } @@ -362,51 +360,49 @@ int SFSObject::copy_object( const int dst_fd = ::open(dstpath.c_str(), O_WRONLY | O_CREAT | O_EXCL | O_BINARY, 0600); if (dst_fd < 0) { - lsfs_dout(dpp, -1) - << fmt::format( - "unable to open dst obj {} file {} for writing: {}", - dstref->name, dstpath.string(), cpp_strerror(errno) - ) - << dendl; + lsfs_err(dpp) << fmt::format( + "unable to open dst obj {} file {} for writing: {}", + dstref->name, dstpath.string(), cpp_strerror(errno) + ) + << dendl; ::close(src_fd); return -ERR_INTERNAL_ERROR; } - lsfs_dout(dpp, 10) << fmt::format( - "copying {} fd:{} -> {} fd:{}", srcpath.string(), - src_fd, dstpath.string(), dst_fd - ) - << dendl; + lsfs_debug(dpp) << fmt::format( + "copying {} fd:{} -> {} fd:{}", srcpath.string(), + src_fd, dstpath.string(), dst_fd + ) + << dendl; int ret = ::copy_file_range( src_fd, nullptr, dst_fd, nullptr, objref->get_meta().size, 0 ); if (ret < 0) { - lsfs_dout(dpp, -1) << fmt::format( - "failed to copy file from {} to {}: {}", - srcpath.string(), dstpath.string(), - cpp_strerror(errno) - ) - << dendl; + lsfs_err(dpp) << fmt::format( + "failed to copy file from {} to {}: {}", + srcpath.string(), dstpath.string(), cpp_strerror(errno) + ) + << dendl; ::close(src_fd); ::close(dst_fd); return -ERR_INTERNAL_ERROR; } ret = ::close(src_fd); if (ret < 0) { - lsfs_dout(dpp, -1) << fmt::format( - "failed closing src fd:{} fn:{}: {}", src_fd, - srcpath.string(), cpp_strerror(ret) - ) - << dendl; + lsfs_err(dpp) << fmt::format( + "failed closing src fd:{} fn:{}: {}", src_fd, + srcpath.string(), cpp_strerror(ret) + ) + << dendl; } ret = ::close(dst_fd); if (ret < 0) { - lsfs_dout(dpp, -1) << fmt::format( - "failed closing dst fd:{} fn:{}: {}", dst_fd, - dstpath.string(), cpp_strerror(ret) - ) - << dendl; + lsfs_err(dpp) << fmt::format( + "failed closing dst fd:{} fn:{}: {}", dst_fd, + dstpath.string(), cpp_strerror(ret) + ) + << dendl; } auto dest_meta = objref->get_meta(); @@ -496,7 +492,7 @@ int SFSObject::delete_obj_aio( const DoutPrefixProvider* dpp, RGWObjState* /*astate*/, Completions* /*aio*/, bool /*keep_index_consistent*/, optional_yield /*y*/ ) { - ldpp_dout(dpp, 10) << __func__ << ": TODO" << dendl; + lsfs_warn(dpp) << __func__ << ": TODO" << dendl; return -ENOTSUP; } @@ -543,7 +539,7 @@ int SFSObject::delete_obj_attrs( std::unique_ptr SFSObject::get_serializer( const DoutPrefixProvider* dpp, const std::string& lock_name ) { - lsfs_dout(dpp, 10) << "lock name: " << lock_name << dendl; + lsfs_debug(dpp) << "lock name: " << lock_name << dendl; return std::make_unique(); } @@ -551,7 +547,7 @@ int SFSObject::transition( Bucket*, const rgw_placement_rule&, const real_time&, uint64_t, const DoutPrefixProvider* dpp, optional_yield ) { - ldpp_dout(dpp, 10) << __func__ << ": TODO" << dendl; + lsfs_warn(dpp) << __func__ << ": TODO" << dendl; return -ENOTSUP; } @@ -560,21 +556,21 @@ int SFSObject::transition_to_cloud( std::set&, CephContext*, bool, const DoutPrefixProvider* dpp, optional_yield ) { - ldpp_dout(dpp, 10) << __func__ << ": not supported" << dendl; + lsfs_warn(dpp) << __func__ << ": not supported" << dendl; return -ENOTSUP; } bool SFSObject::placement_rules_match( rgw_placement_rule& /*r1*/, rgw_placement_rule& /*r2*/ ) { - ldout(store->ceph_context(), 10) << __func__ << ": TODO" << dendl; + ldout(store->ceph_context(), SFS_LOG_WARN) << __func__ << ": TODO" << dendl; return true; } int SFSObject::dump_obj_layout( const DoutPrefixProvider* /*dpp*/, optional_yield /*y*/, Formatter* /*f*/ ) { - ldout(store->ceph_context(), 10) << __func__ << ": TODO" << dendl; + ldout(store->ceph_context(), SFS_LOG_WARN) << __func__ << ": TODO" << dendl; return -ENOTSUP; } @@ -582,14 +578,14 @@ int SFSObject::swift_versioning_restore( bool& /*restored*/, /* out */ const DoutPrefixProvider* dpp ) { - ldpp_dout(dpp, 10) << __func__ << ": do nothing." << dendl; + lsfs_warn(dpp) << __func__ << ": do nothing." << dendl; return 0; } int SFSObject::swift_versioning_copy( const DoutPrefixProvider* dpp, optional_yield /*y*/ ) { - ldpp_dout(dpp, 10) << __func__ << ": do nothing." << dendl; + lsfs_warn(dpp) << __func__ << ": do nothing." << dendl; return 0; } @@ -598,7 +594,7 @@ int SFSObject::omap_get_vals( uint64_t /*count*/, std::map* /*m*/, bool* /*pmore*/, optional_yield /*y*/ ) { - ldpp_dout(dpp, 10) << __func__ << ": TODO" << dendl; + lsfs_warn(dpp) << __func__ << ": TODO" << dendl; return -ENOTSUP; } @@ -606,7 +602,7 @@ int SFSObject::omap_get_all( const DoutPrefixProvider* dpp, std::map* /*m*/, optional_yield /*y*/ ) { - ldpp_dout(dpp, 10) << __func__ << ": TODO" << dendl; + lsfs_warn(dpp) << __func__ << ": TODO" << dendl; return -ENOTSUP; } @@ -614,7 +610,7 @@ int SFSObject::omap_get_vals_by_keys( const DoutPrefixProvider* dpp, const std::string& /*oid*/, const std::set& /*keys*/, Attrs* /*vals*/ ) { - ldpp_dout(dpp, 10) << __func__ << ": TODO" << dendl; + lsfs_warn(dpp) << __func__ << ": TODO" << dendl; return -ENOTSUP; } @@ -622,7 +618,7 @@ int SFSObject::omap_set_val_by_key( const DoutPrefixProvider* dpp, const std::string& /*key*/, bufferlist& /*val*/, bool /*must_exist*/, optional_yield /*y*/ ) { - ldpp_dout(dpp, 10) << __func__ << ": TODO" << dendl; + lsfs_warn(dpp) << __func__ << ": TODO" << dendl; return -ENOTSUP; } @@ -630,7 +626,7 @@ int SFSObject::chown( rgw::sal::User& /*new_user*/, const DoutPrefixProvider* dpp, optional_yield /*y*/ ) { - ldpp_dout(dpp, 10) << __func__ << ": TODO" << dendl; + lsfs_warn(dpp) << __func__ << ": TODO" << dendl; return -ENOTSUP; } @@ -687,35 +683,35 @@ int SFSObject::handle_copy_object_conditionals( if (if_match) { const std::string match = rgw_string_unquote(if_match); result = (etag == match) ? 0 : -ERR_PRECONDITION_FAILED; - ldpp_dout(dpp, 10) << fmt::format( - "If-Match: etag={} vs. ifmatch={}: {}", etag, - match, result - ) - << dendl; + lsfs_debug(dpp) << fmt::format( + "If-Match: etag={} vs. ifmatch={}: {}", etag, match, + result + ) + << dendl; } if (if_nomatch) { const std::string match = rgw_string_unquote(if_nomatch); result = (etag == match) ? -ERR_PRECONDITION_FAILED : 0; - ldpp_dout(dpp, 1) << fmt::format( - "If-None-Match: etag={} vs. ifmatch={}: {}", etag, - match, result - ) - << dendl; + lsfs_debug(dpp) << fmt::format( + "If-None-Match: etag={} vs. ifmatch={}: {}", etag, + match, result + ) + << dendl; } if (mod_ptr && !if_nomatch) { result = (mtime > *mod_ptr) ? 0 : -ERR_PRECONDITION_FAILED; - ldpp_dout(dpp, 10) - << fmt::format( - "If-Modified-Since: mtime={:%Y-%m-%d %H:%M:%S} vs. " - "if_time={:%Y-%m-%d %H:%M:%S}: {}", - fmt::gmtime(ceph::real_clock::to_time_t(mtime)), - fmt::gmtime(ceph::real_clock::to_time_t(*mod_ptr)), result - ) - << dendl; + lsfs_debug(dpp) << fmt::format( + "If-Modified-Since: mtime={:%Y-%m-%d %H:%M:%S} vs. " + "if_time={:%Y-%m-%d %H:%M:%S}: {}", + fmt::gmtime(ceph::real_clock::to_time_t(mtime)), + fmt::gmtime(ceph::real_clock::to_time_t(*mod_ptr)), + result + ) + << dendl; } if (unmod_ptr && !if_match) { result = (mtime < *unmod_ptr) ? 0 : -ERR_PRECONDITION_FAILED; - ldpp_dout(dpp, 10) + lsfs_debug(dpp) << fmt::format( "If-UnModified-Since: mtime={:%Y-%m-%d %H:%M:%S} vs. " "if_time={:%Y-%m-%d %H:%M:%S}: {}", @@ -724,7 +720,7 @@ int SFSObject::handle_copy_object_conditionals( ) << dendl; } - ldpp_dout(dpp, 10) + lsfs_debug(dpp) << fmt::format( "Conditional COPY_OBJ (Match/NoneMatch/Mod/UnMod) ({}, {}): {}", if_match != nullptr, if_nomatch != nullptr, mod_ptr != nullptr, diff --git a/src/rgw/driver/sfs/object.h b/src/rgw/driver/sfs/object.h index 3fec31632abfd..774115f731b51 100644 --- a/src/rgw/driver/sfs/object.h +++ b/src/rgw/driver/sfs/object.h @@ -72,7 +72,7 @@ class SFSObject : public StoreObject { optional_yield y ) override; - const std::string get_cls_name() { return "object_read"; } + const std::string get_cls_name() const { return "object_read"; } }; /** @@ -251,7 +251,7 @@ class SFSObject : public StoreObject { // version" call. void refresh_meta(bool update_version_id_from_metadata = false); - const std::string get_cls_name() { return "object"; } + const std::string get_cls_name() const { return "object"; } int handle_copy_object_conditionals( const DoutPrefixProvider* dpp, const ceph::real_time* mod_ptr, diff --git a/src/rgw/driver/sfs/sfs_bucket.cc b/src/rgw/driver/sfs/sfs_bucket.cc index 7df2aaa6e82f7..bf92a2afcb11f 100644 --- a/src/rgw/driver/sfs/sfs_bucket.cc +++ b/src/rgw/driver/sfs/sfs_bucket.cc @@ -11,6 +11,7 @@ * License version 2.1, as published by the Free Software * Foundation. See file COPYING. */ +#include "driver/sfs/sfs_log.h" #include "rgw_sal_sfs.h" #define dout_subsys ceph_subsys_rgw_sfs @@ -23,7 +24,7 @@ int SFStore::set_buckets_enabled( const DoutPrefixProvider* dpp, std::vector& /*buckets_to_enable*/, bool /*enabled*/ ) { - ldpp_dout(dpp, 10) << __func__ << ": TODO" << dendl; + lsfs_warn(dpp) << __func__ << ": TODO" << dendl; return -ENOTSUP; } @@ -56,7 +57,7 @@ int SFStore::get_bucket( auto bucketref = it->second; auto bucket = make_unique(this, bucketref); - ldpp_dout(dpp, 10) << __func__ << ": bucket: " << bucket->get_name() << dendl; + lsfs_debug(dpp) << __func__ << ": bucket: " << bucket->get_name() << dendl; result->reset(bucket.release()); return 0; } @@ -66,7 +67,7 @@ int SFStore::get_bucket( const std::string& name, std::unique_ptr* bucket, optional_yield /*y*/ ) { - ldpp_dout(dpp, 10) << __func__ << ": get_bucket by name: " << name << dendl; + lsfs_debug(dpp) << __func__ << ": get_bucket by name: " << name << dendl; std::lock_guard l(buckets_map_lock); auto it = buckets.find(name); if (it == buckets.end()) { @@ -75,7 +76,7 @@ int SFStore::get_bucket( auto bucketref = it->second; auto b = make_unique(this, bucketref); - ldpp_dout(dpp, 10) << __func__ << ": bucket: " << b->get_name() << dendl; + lsfs_debug(dpp) << __func__ << ": bucket: " << b->get_name() << dendl; bucket->reset(b.release()); return 0; } diff --git a/src/rgw/driver/sfs/sfs_gc.cc b/src/rgw/driver/sfs/sfs_gc.cc index f21ce5fe358ef..5989ffc9375d6 100644 --- a/src/rgw/driver/sfs/sfs_gc.cc +++ b/src/rgw/driver/sfs/sfs_gc.cc @@ -21,6 +21,7 @@ #include "common/Clock.h" #include "driver/sfs/types.h" #include "multipart_types.h" +#include "rgw/driver/sfs/sfs_log.h" #include "rgw/driver/sfs/sqlite/sqlite_multipart.h" #include "rgw/driver/sfs/sqlite/sqlite_objects.h" #include "rgw/rgw_perf_counters.h" @@ -143,8 +144,8 @@ bool SFSGC::process_deleted_buckets() { // permanently delete removed buckets and their objects and versions sqlite::SQLiteBuckets db_buckets(store->db_conn); auto deleted_buckets = db_buckets.get_deleted_buckets_ids(); - lsfs_dout(this, 10) << "deleted buckets found = " << deleted_buckets.size() - << dendl; + lsfs_debug(this) << "deleted buckets found = " << deleted_buckets.size() + << dendl; bool time_to_delete_more = true; for (auto const& bucket_id : deleted_buckets) { time_to_delete_more = abort_bucket_multiparts(bucket_id); @@ -239,7 +240,7 @@ bool SFSGC::delete_pending_objects_data() { ); it = (*pending_objects_to_delete).erase(it); if (process_time_elapsed()) { - lsfs_dout(this, 10) << "Exit due to max process time reached." << dendl; + lsfs_debug(this) << "Exit due to max process time reached." << dendl; return false; // had no time to delete everything } } @@ -266,7 +267,7 @@ bool SFSGC::delete_pending_multiparts_data() { it = (*pending_multiparts_to_delete).erase(it); // check that we didn't exceed the max before keep going if (process_time_elapsed()) { - lsfs_dout(this, 10) << "Exit due to max process time reached." << dendl; + lsfs_debug(this) << "Exit due to max process time reached." << dendl; return false; // had no time to delete everything } } @@ -284,7 +285,7 @@ bool SFSGC::abort_bucket_multiparts(const std::string& bucket_id) { // check that we didn't exceed the max before keep going if (process_time_elapsed()) { - lsfs_dout(this, 10) << "Exit due to max process time reached." << dendl; + lsfs_debug(this) << "Exit due to max process time reached." << dendl; return false; // had no time to delete everything } return true; @@ -328,18 +329,16 @@ SFSGC::GCWorker::GCWorker( void* SFSGC::GCWorker::entry() { do { utime_t start = ceph_clock_now(); - lsfs_dout(dpp, 2) << "start" << dendl; + lsfs_startup(dpp) << "start" << dendl; if (!gc->suspended()) { common::PerfGuard elapsed(perfcounter, l_rgw_sfs_gc_processing_time); int r = gc->process(); if (r < 0) { - lsfs_dout( - dpp, 0 - ) << "ERROR: garbage collection process() returned error r=" - << r << dendl; + lsfs_err(dpp) << "ERROR: garbage collection process() returned error r=" + << r << dendl; } - lsfs_dout(dpp, 2) << "stop" << dendl; + lsfs_shutdown(dpp) << "stop" << dendl; } if (gc->going_down()) break; diff --git a/src/rgw/driver/sfs/sfs_lc.cc b/src/rgw/driver/sfs/sfs_lc.cc index fb25a36e5b4f9..d171c103ccc70 100644 --- a/src/rgw/driver/sfs/sfs_lc.cc +++ b/src/rgw/driver/sfs/sfs_lc.cc @@ -13,14 +13,10 @@ */ #include "sfs_lc.h" -#include - #include #include "sqlite/sqlite_lifecycle.h" -#define dout_subsys ceph_subsys_rgw_sfs - namespace rgw::sal::sfs { SFSLifecycle::SFSLifecycle(SFStore* _st) : store(_st) {} diff --git a/src/rgw/driver/sfs/sfs_log.h b/src/rgw/driver/sfs/sfs_log.h new file mode 100644 index 0000000000000..9cd5f738e5a42 --- /dev/null +++ b/src/rgw/driver/sfs/sfs_log.h @@ -0,0 +1,79 @@ +/* + * Ceph - scalable distributed file system + * SFS SAL implementation + * + * Copyright (C) 2022 SUSE LLC + * + * This is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License version 2.1, as published by the Free Software + * Foundation. See file COPYING. + */ +#pragma once + +// SFS dout, shows relevant info for SFS +// +#define lsfs_dout_for(_dpp, _lvl, _whom) \ + ldpp_dout(_dpp, _lvl) << "> " << _whom << "::" << __func__ << " " + +#define lsfs_dout(_dpp, _lvl) lsfs_dout_for(_dpp, _lvl, this->get_cls_name()) + +// In principle, this is what each log level means: +// +// ERROR: something unrecoverable happened. Must always go to log regardless of +// what log level the user has set. E.g., database corruption, ceph_abort(). +// +// IMPORTANT: something that we should log even if logging is at zero. Startup +// messages, or warnings, for instance. +// +// INFO: we still want the user to know about it, but not important enough to +// be at level zero. +// +// VERBOSE: we did a thing and found it weird enough to log, or a recurring +// action we find interesting to know about is happening. E.g., running GC. +// +// DEBUG: the vast majority of noise. Important when dealing with weirdness, +// but should otherwise be hidden from the user. +// +// TRACE: whatever may affect performance significantly and used only as last +// resort. +// +// MEGA_TRACE: TRACE on steroids. +// +#define SFS_LOG_ERROR -1 +#define SFS_LOG_IMPORTANT 0 +#define SFS_LOG_INFO 1 +#define SFS_LOG_VERBOSE 10 +#define SFS_LOG_DEBUG 15 +#define SFS_LOG_TRACE 20 +#define SFS_LOG_MEGA_TRACE 30 + +#define SFS_LOG_STARTUP SFS_LOG_IMPORTANT +#define SFS_LOG_SHUTDOWN SFS_LOG_IMPORTANT +#define SFS_LOG_WARN SFS_LOG_IMPORTANT + +#define lsfs_err(_dpp) lsfs_dout(_dpp, SFS_LOG_ERROR) +#define lsfs_err_for(_dpp, _whom) lsfs_dout_for(_dpp, SFS_LOG_ERROR, _whom) + +#define lsfs_startup(_dpp) lsfs_dout(_dpp, SFS_LOG_STARTUP) +#define lsfs_startup_for(_dpp, _whom) \ + lsfs_dout_for(_dpp, SFS_LOG_STARTUP, _whom) + +#define lsfs_shutdown(_dpp) lsfs_dout(_dpp, SFS_LOG_SHUTDOWN) +#define lsfs_shutdown_for(_dpp, _whom) \ + lsfs_dout_for(_dpp, SFS_LOG_SHUTDOWN, _whom) + +#define lsfs_warn(_dpp) lsfs_dout(_dpp, SFS_LOG_WARN) +#define lsfs_warn_for(_dpp, _whom) lsfs_dout_for(_dpp, SFS_LOG_WARN, _whom) + +#define lsfs_info(_dpp) lsfs_dout(_dpp, SFS_LOG_INFO) +#define lsfs_info_for(_dpp, _whom) lsfs_dout_for(_dpp, SFS_LOG_INFO, _whom) + +#define lsfs_verb(_dpp) lsfs_dout(_dpp, SFS_LOG_VERBOSE) +#define lsfs_verb_for(_dpp, _whom) lsfs_dout_for(_dpp, SFS_LOG_VERBOSE, _whom) + +#define lsfs_debug(_dpp) lsfs_dout(_dpp, SFS_LOG_DEBUG) +#define lsfs_debug_for(_dpp, _whom) lsfs_dout_for(_dpp, SFS_LOG_DEBUG, _whom) + +#define lsfs_trace(_dpp) lsfs_dout(_dpp, SFS_LOG_TRACE) +#define lsfs_trace_for(_dpp, _whom) lsfs_dout_for(_dpp, SFS_LOG_TRACE, _whom) diff --git a/src/rgw/driver/sfs/sfs_user.cc b/src/rgw/driver/sfs/sfs_user.cc index 2214b737b9bcc..3d897ecd6831b 100644 --- a/src/rgw/driver/sfs/sfs_user.cc +++ b/src/rgw/driver/sfs/sfs_user.cc @@ -11,6 +11,7 @@ * License version 2.1, as published by the Free Software * Foundation. See file COPYING. */ +#include "rgw/driver/sfs/sfs_log.h" #include "rgw/driver/sfs/sqlite/sqlite_users.h" #include "rgw_sal_sfs.h" @@ -33,7 +34,7 @@ int SFStore::get_user_by_access_key( if (db_user) { user->reset(new SFSUser(db_user->uinfo, this)); } else { - ldpp_dout(dpp, 10) << __func__ << ": User not found" << dendl; + lsfs_debug(dpp) << __func__ << ": User not found" << dendl; err = -ENOENT; } return err; @@ -49,7 +50,7 @@ int SFStore::get_user_by_email( if (db_user) { user->reset(new SFSUser(db_user->uinfo, this)); } else { - ldpp_dout(dpp, 10) << __func__ << ": User not found" << dendl; + lsfs_debug(dpp) << __func__ << ": User not found" << dendl; err = -ENOENT; } return err; @@ -59,7 +60,7 @@ int SFStore::get_user_by_swift( const DoutPrefixProvider* dpp, const std::string& /*user_str*/, optional_yield /*y*/, std::unique_ptr* /*user*/ ) { - ldpp_dout(dpp, 10) << __func__ << ": TODO" << dendl; + lsfs_warn(dpp) << __func__ << ": TODO" << dendl; return -ENOTSUP; } diff --git a/src/rgw/driver/sfs/sqlite/dbconn.cc b/src/rgw/driver/sfs/sqlite/dbconn.cc index 6699f8aef8abf..6a78375bcc1e4 100644 --- a/src/rgw/driver/sfs/sqlite/dbconn.cc +++ b/src/rgw/driver/sfs/sqlite/dbconn.cc @@ -19,6 +19,7 @@ #include #include "common/dout.h" +#include "rgw/driver/sfs/sfs_log.h" #define dout_subsys ceph_subsys_rgw_sfs @@ -61,11 +62,12 @@ static int sqlite_wal_hook_callback( int rc = sqlite3_wal_checkpoint_v2( db, zDb, mode, &total_frames, &checkpointed_frames ); - ldout(cct, 10) << "[SQLITE] WAL checkpoint (" - << (mode == SQLITE_CHECKPOINT_PASSIVE ? "passive" : "truncate") - << ") returned " << rc << " (" << sqlite3_errstr(rc) - << "), total_frames=" << total_frames - << ", checkpointed_frames=" << checkpointed_frames << dendl; + ldout(cct, SFS_LOG_DEBUG) + << "[SQLITE] WAL checkpoint (" + << (mode == SQLITE_CHECKPOINT_PASSIVE ? "passive" : "truncate") + << ") returned " << rc << " (" << sqlite3_errstr(rc) + << "), total_frames=" << total_frames + << ", checkpointed_frames=" << checkpointed_frames << dendl; return SQLITE_OK; } @@ -90,17 +92,19 @@ static int sqlite_profile_callback( const char* sql_str = sql ? sql.get() : sqlite3_sql(statement); if (runtime_ms > slowlog_time.count()) { - lsubdout(cct, rgw, 1) << fmt::format( - "[SQLITE SLOW QUERY] {} {:L}ms {}", - fmt::ptr(db), runtime_ms, sql_str - ) - << dendl; + lsubdout(cct, rgw_sfs, SFS_LOG_INFO) + << fmt::format( + "[SQLITE SLOW QUERY] {} {:L}ms {}", fmt::ptr(db), runtime_ms, + sql_str + ) + << dendl; } - lsubdout(cct, rgw, 20) << fmt::format( - "[SQLITE PROFILE] {} {:L}ms {}", fmt::ptr(db), - runtime_ms, sql_str - ) - << dendl; + lsubdout(cct, rgw_sfs, SFS_LOG_TRACE) + << fmt::format( + "[SQLITE PROFILE] {} {:L}ms {}", fmt::ptr(db), runtime_ms, + sql_str + ) + << dendl; perfcounter_prom_time_hist->hinc( l_rgw_prom_sfs_sqlite_profile, runtime_ns, 1 ); @@ -228,8 +232,9 @@ static int get_version( try { return storage.pragma.user_version(); } catch (const std::system_error& e) { - lsubdout(cct, rgw, -1) << "error opening db: " << e.code().message() << " (" - << e.code().value() << "), " << e.what() << dendl; + lsubdout(cct, rgw_sfs, SFS_LOG_ERROR) + << "error opening db: " << e.code().message() << " (" + << e.code().value() << "), " << e.what() << dendl; throw e; } } @@ -346,11 +351,11 @@ static void upgrade_metadata( auto err = fmt::format( "Error upgrading from version {}: {}", cur_version, errmsg ); - lsubdout(cct, rgw, -1) << err << dendl; + lsubdout(cct, rgw_sfs, SFS_LOG_ERROR) << err << dendl; throw sqlite_sync_exception(err); } - lsubdout(cct, rgw, 1) + lsubdout(cct, rgw_sfs, SFS_LOG_INFO) << fmt::format( "upgraded metadata from version {} to version {}", cur_version, cur_version + 1 @@ -362,7 +367,8 @@ static void upgrade_metadata( void DBConn::maybe_upgrade_metadata() { int db_version = get_version(cct, storage); - lsubdout(cct, rgw, 10) << "db user version: " << db_version << dendl; + lsubdout(cct, rgw_sfs, SFS_LOG_INFO) + << "db user version: " << db_version << dendl; if (db_version == 0) { // must have just been created, set version! diff --git a/src/rgw/driver/sfs/user.cc b/src/rgw/driver/sfs/user.cc index 79d4e50aaca2b..8c3d9b5352667 100644 --- a/src/rgw/driver/sfs/user.cc +++ b/src/rgw/driver/sfs/user.cc @@ -16,6 +16,7 @@ #include #include "driver/sfs/bucket.h" +#include "rgw/driver/sfs/sfs_log.h" #include "rgw/driver/sfs/sqlite/sqlite_users.h" #include "rgw_sal_sfs.h" @@ -32,7 +33,7 @@ int SFSUser::read_attrs(const DoutPrefixProvider* dpp, optional_yield y) { int SFSUser::merge_and_store_attrs( const DoutPrefixProvider* dpp, Attrs& /*new_attrs*/, optional_yield /*y*/ ) { - ldpp_dout(dpp, 10) << __func__ << ": TODO" << dendl; + lsfs_warn(dpp) << __func__ << ": TODO" << dendl; /** Set the attributes in attrs, leaving any other existing attrs set, and * write them to the backing store; a merge operation */ return -ENOTSUP; @@ -44,7 +45,7 @@ int SFSUser::read_stats( ceph::real_time* /*last_stats_update*/ ) { /** Read the User stats from the backing Store, synchronous */ - ldpp_dout(dpp, 1) << __func__ << ": WARNING faked call" << dendl; + lsfs_warn(dpp) << __func__ << ": WARNING faked call" << dendl; return 0; } @@ -52,7 +53,7 @@ int SFSUser::read_stats_async( const DoutPrefixProvider* dpp, RGWGetUserStats_CB* /*cb*/ ) { /** Read the User stats from the backing Store, asynchronous */ - ldpp_dout(dpp, 10) << __func__ << ": TODO" << dendl; + lsfs_warn(dpp) << __func__ << ": TODO" << dendl; return -ENOTSUP; } @@ -60,7 +61,7 @@ int SFSUser::complete_flush_stats( const DoutPrefixProvider* dpp, optional_yield /*y*/ ) { /** Flush accumulated stat changes for this User to the backing store */ - ldpp_dout(dpp, 10) << __func__ << ": TODO" << dendl; + lsfs_warn(dpp) << __func__ << ": TODO" << dendl; return -ENOTSUP; } @@ -71,7 +72,7 @@ int SFSUser::read_usage( std::map& /*usage*/ ) { /** Read detailed usage stats for this User from the backing store */ - ldpp_dout(dpp, 10) << __func__ << ": TODO" << dendl; + lsfs_warn(dpp) << __func__ << ": TODO" << dendl; return -ENOTSUP; } @@ -79,7 +80,7 @@ int SFSUser::trim_usage( const DoutPrefixProvider* dpp, uint64_t /*start_epoch*/, uint64_t /*end_epoch*/ ) { - ldpp_dout(dpp, 10) << __func__ << ": TODO" << dendl; + lsfs_warn(dpp) << __func__ << ": TODO" << dendl; return -ENOTSUP; } @@ -107,8 +108,8 @@ int SFSUser::store_user( *old_info = db_user->uinfo; } if (db_user->user_version.ver != objv_tracker.read_version.ver) { - ldpp_dout(dpp, 0) << "User Read version mismatch err:(" << -ECANCELED - << ") " << dendl; + lsfs_warn(dpp) << "User Read version mismatch err:(" << -ECANCELED << ") " + << dendl; return -ECANCELED; } } @@ -137,8 +138,8 @@ int SFSUser::list_buckets( const std::string& end_marker, uint64_t max, bool /*need_stats*/, BucketList& buckets, optional_yield /*y*/ ) { - ldpp_dout(dpp, 10) << __func__ << ": marker (" << marker << ", " << end_marker - << "), max=" << max << dendl; + lsfs_debug(dpp) << __func__ << ": marker (" << marker << ", " << end_marker + << "), max=" << max << dendl; std::list lst = store->bucket_list(); for (const auto& bucketref : lst) { @@ -147,8 +148,7 @@ int SFSUser::list_buckets( } } - ldpp_dout(dpp, 10) << __func__ << ": buckets=" << buckets.get_buckets() - << dendl; + lsfs_debug(dpp) << __func__ << ": buckets=" << buckets.get_buckets() << dendl; return 0; } @@ -187,7 +187,7 @@ int SFSUser::create_bucket( pquota_info, new_attrs, new_info ); if (!bucketref) { - lsfs_dout(dpp, 0) << "error creating bucket '" << b << "'" << dendl; + lsfs_warn(dpp) << "error creating bucket '" << b << "'" << dendl; return -EINVAL; } diff --git a/src/rgw/driver/sfs/writer.cc b/src/rgw/driver/sfs/writer.cc index d9d153b9de6fa..a0e23820cac75 100644 --- a/src/rgw/driver/sfs/writer.cc +++ b/src/rgw/driver/sfs/writer.cc @@ -27,6 +27,7 @@ #include "driver/sfs/writer.h" #include "rgw/driver/sfs/fmt.h" #include "rgw/driver/sfs/multipart_types.h" +#include "rgw/driver/sfs/sfs_log.h" #include "rgw/driver/sfs/sqlite/sqlite_multipart.h" #include "rgw_common.h" #include "rgw_obj_manifest.h" @@ -47,7 +48,7 @@ static int close_fd_for( ret = ::fsync(fd); if (ret < 0) { - lsfs_dout_for(dpp, -1, whom) + lsfs_err_for(dpp, whom) << fmt::format( "failed to fsync fd:{}: {}. continuing.", fd, cpp_strerror(errno) ) @@ -57,7 +58,7 @@ static int close_fd_for( ret = ::close(fd); fd = -1; if (ret < 0) { - lsfs_dout_for(dpp, -1, whom) + lsfs_err_for(dpp, whom) << fmt::format( "failed closing fd:{}: {}. continuing.", fd, cpp_strerror(errno) ) @@ -96,11 +97,11 @@ SFSAtomicWriter::SFSAtomicWriter( bytes_written(0), io_failed(false), fd(-1) { - lsfs_dout(dpp, 10) << fmt::format( - "head_obj: {}, bucket: {}", _head_obj->get_key(), - _head_obj->get_bucket()->get_name() - ) - << dendl; + lsfs_debug(dpp) << fmt::format( + "head_obj: {}, bucket: {}", _head_obj->get_key(), + _head_obj->get_bucket()->get_name() + ) + << dendl; } SFSAtomicWriter::~SFSAtomicWriter() { @@ -110,7 +111,7 @@ SFSAtomicWriter::~SFSAtomicWriter() { char linkname[PATH_MAX] = "?"; const int ret = ::readlink(proc_fd_path.c_str(), linkname, PATH_MAX - 1); if (ret < 0) { - lsfs_dout(dpp, -1) + lsfs_err(dpp) << fmt::format( "BUG: fd:{} still open. readlink filename:{} failed with {}", fd, proc_fd_path.string(), cpp_strerror(errno) @@ -119,13 +120,12 @@ SFSAtomicWriter::~SFSAtomicWriter() { } else { linkname[ret + 1] = '\0'; } - lsfs_dout(dpp, -1) - << fmt::format( - "BUG: fd:{} still open. fd resolves to filename:{}. " - "(io_failed:{} object_path:{}). closing fd.", - fd, linkname, io_failed, object_path.string() - ) - << dendl; + lsfs_err(dpp) << fmt::format( + "BUG: fd:{} still open. fd resolves to filename:{}. " + "(io_failed:{} object_path:{}). closing fd.", + fd, linkname, io_failed, object_path.string() + ) + << dendl; close(); } } @@ -134,8 +134,8 @@ int SFSAtomicWriter::open() noexcept { std::error_code ec; std::filesystem::create_directories(object_path.parent_path(), ec); if (ec) { - lsfs_dout(dpp, -1) << "failed to mkdir object path " << object_path << ": " - << ec << dendl; + lsfs_err(dpp) << "failed to mkdir object path " << object_path << ": " << ec + << dendl; switch (ec.value()) { case ENOSPC: return -ERR_QUOTA_EXCEEDED; @@ -149,8 +149,8 @@ int SFSAtomicWriter::open() noexcept { object_path.c_str(), O_CREAT | O_TRUNC | O_CLOEXEC | O_WRONLY, 0644 ); if (ret < 0) { - lsfs_dout(dpp, -1) << "error opening file " << object_path << ": " - << cpp_strerror(errno) << dendl; + lsfs_err(dpp) << "error opening file " << object_path << ": " + << cpp_strerror(errno) << dendl; return -ERR_INTERNAL_ERROR; } @@ -163,27 +163,27 @@ int SFSAtomicWriter::close() noexcept { } void SFSAtomicWriter::cleanup() noexcept { - lsfs_dout(dpp, -1) << fmt::format( - "cleaning up failed upload to file {}. " - "returning error.", - object_path.string() - ) - << dendl; + lsfs_err(dpp) << fmt::format( + "cleaning up failed upload to file {}. " + "returning error.", + object_path.string() + ) + << dendl; std::error_code ec; std::filesystem::remove(object_path, ec); if (ec) { - lsfs_dout(dpp, -1) << fmt::format( - "failed deleting file {}: {} {}. ignoring.", - object_path.string(), ec.message(), ec.value() - ) - << dendl; + lsfs_err(dpp) << fmt::format( + "failed deleting file {}: {} {}. ignoring.", + object_path.string(), ec.message(), ec.value() + ) + << dendl; } const auto dir_fd = ::open(object_path.parent_path().c_str(), O_RDONLY); int ret = ::fsync(dir_fd); if (ret < 0) { - lsfs_dout(dpp, -1) + lsfs_err(dpp) << fmt::format( "failed fsyncing dir {} fd:{} for obj file {}: {}. ignoring.", object_path.parent_path().string(), dir_fd, object_path.string(), @@ -195,7 +195,7 @@ void SFSAtomicWriter::cleanup() noexcept { try { objref->delete_object_version(store); } catch (const std::system_error& e) { - lsfs_dout(dpp, -1) + lsfs_err(dpp) << fmt::format( "failed to remove failed upload version from database {}: {}", store->db_conn->get_storage().filename(), e.what() @@ -207,21 +207,21 @@ void SFSAtomicWriter::cleanup() noexcept { int SFSAtomicWriter::prepare(optional_yield /*y*/) { if (store->filesystem_stats_avail_bytes.load() < store->min_space_left_for_data_write_ops_bytes) { - lsfs_dout(dpp, 10) << fmt::format( - "filesystem stat reservation check hit. " - "avail_bytes:{} avail_pct:{} total_bytes:{}. " - "returning quota error.", - store->filesystem_stats_avail_bytes, - store->filesystem_stats_avail_percent, - store->filesystem_stats_total_bytes - ) - << dendl; + lsfs_debug(dpp) << fmt::format( + "filesystem stat reservation check hit. " + "avail_bytes:{} avail_pct:{} total_bytes:{}. " + "returning quota error.", + store->filesystem_stats_avail_bytes, + store->filesystem_stats_avail_percent, + store->filesystem_stats_total_bytes + ) + << dendl; return -ERR_QUOTA_EXCEEDED; } objref = bucketref->create_version(obj.get_key()); if (!objref) { - lsfs_dout(dpp, -1) + lsfs_err(dpp) << fmt::format( "failed to create new object version in bucket {} db:{}. " "failing operation.", @@ -233,13 +233,13 @@ int SFSAtomicWriter::prepare(optional_yield /*y*/) { } object_path = store->get_data_path() / objref->get_storage_path(); - lsfs_dout(dpp, 10) << "creating file at " << object_path << dendl; + lsfs_debug(dpp) << "creating file at " << object_path << dendl; return open(); } int SFSAtomicWriter::process(bufferlist&& data, uint64_t offset) { - lsfs_dout(dpp, 10) + lsfs_debug(dpp) << fmt::format( "data len: {}, offset: {}, io_failed: {}, fd: {}, fn: {}", data.length(), offset, io_failed, fd, object_path.string() @@ -250,23 +250,23 @@ int SFSAtomicWriter::process(bufferlist&& data, uint64_t offset) { } if (data.length() == 0) { - lsfs_dout(dpp, 10) << "final piece, wrote " << bytes_written << " bytes" - << dendl; + lsfs_debug(dpp) << "final piece, wrote " << bytes_written << " bytes" + << dendl; return 0; } ceph_assert(fd >= 0); int write_ret = data.write_fd(fd, offset); if (write_ret < 0) { - lsfs_dout(dpp, -1) << fmt::format( - "failed to write size:{} offset:{} to fd:{}: {}. " - "marking writer failed. " - "failing future io. " - "will delete partial data on completion. " - "returning internal error.", - data.length(), offset, fd, cpp_strerror(errno) - ) - << dendl; + lsfs_err(dpp) << fmt::format( + "failed to write size:{} offset:{} to fd:{}: {}. " + "marking writer failed. " + "failing future io. " + "will delete partial data on completion. " + "returning internal error.", + data.length(), offset, fd, cpp_strerror(errno) + ) + << dendl; io_failed = true; close(); cleanup(); @@ -289,7 +289,7 @@ int SFSAtomicWriter::complete( const std::string* /*user_data*/, rgw_zone_set*, bool* /*canceled*/, optional_yield ) { - lsfs_dout(dpp, 10) + lsfs_debug(dpp) << fmt::format( "accounted_size: {}, etag: {}, set_mtime: {}, attrs: {}, " "delete_at: {}, if_match: {}, if_nomatch: {}", @@ -304,7 +304,7 @@ int SFSAtomicWriter::complete( set_mtime = now; } if (bytes_written != accounted_size) { - lsfs_dout(dpp, -1) + lsfs_err(dpp) << fmt::format( "data written != accounted size. {} vs. {}. failing operation. " "returning internal error.", @@ -366,12 +366,12 @@ int SFSAtomicWriter::complete( try { objref->metadata_finish(store, bucketref->get_info().versioning_enabled()); } catch (const std::system_error& e) { - lsfs_dout(dpp, -1) << fmt::format( - "failed to update db object {}: {}. " - "failing operation. ", - objref->name, e.what() - ) - << dendl; + lsfs_err(dpp) << fmt::format( + "failed to update db object {}: {}. " + "failing operation. ", + objref->name, e.what() + ) + << dendl; io_failed = true; cleanup(); switch (e.code().value()) { @@ -398,15 +398,13 @@ int SFSMultipartWriterV2::close() noexcept { } int SFSMultipartWriterV2::prepare(optional_yield /* y */) { - lsfs_dout(dpp, 10) << fmt::format( - "upload_id: {}, part: {}", upload_id, part_num - ) - << dendl; + lsfs_debug(dpp) << fmt::format("upload_id: {}, part: {}", upload_id, part_num) + << dendl; // check if store has enough space. if (store->filesystem_stats_avail_bytes.load() < store->min_space_left_for_data_write_ops_bytes) { - lsfs_dout(dpp, -1) + lsfs_err(dpp) << fmt::format( "filesystem stat reservation check hit. avail_bytes: {}, " "avail_pct: {}, total_bytes: {} -- return quota error.", @@ -425,7 +423,7 @@ int SFSMultipartWriterV2::prepare(optional_yield /* y */) { std::string error_str; auto entry = mpdb.create_or_reset_part(upload_id, part_num, &error_str); if (!entry.has_value()) { - lsfs_dout(dpp, -1) + lsfs_err(dpp) << fmt::format( "error adding/replacing part {} in db, upload_id: {}: {}", part_num, upload_id, error_str @@ -437,19 +435,17 @@ int SFSMultipartWriterV2::prepare(optional_yield /* y */) { // prepare upload's file paths. auto mp = mpdb.get_multipart(upload_id); if (!mp.has_value()) { - lsfs_dout(dpp, -1) << fmt::format( - "multipart upload {} not found!", upload_id - ) - << dendl; + lsfs_err(dpp) << fmt::format("multipart upload {} not found!", upload_id) + << dendl; return -ERR_NO_SUCH_UPLOAD; } if (mp->state != MultipartState::INPROGRESS) { - lsfs_dout(dpp, -1) << fmt::format( - "multipart upload {} not available -- raced with " - "abort or complete!", - upload_id - ) - << dendl; + lsfs_err(dpp) << fmt::format( + "multipart upload {} not available -- raced with " + "abort or complete!", + upload_id + ) + << dendl; return -ERR_NO_SUCH_UPLOAD; } @@ -459,11 +455,11 @@ int SFSMultipartWriterV2::prepare(optional_yield /* y */) { std::error_code ec; std::filesystem::create_directories(path.parent_path(), ec); if (ec) { - lsfs_dout(dpp, -1) - << fmt::format( - "error creating multipart upload's part paths: {}", ec.message() - ) - << dendl; + lsfs_err(dpp) << fmt::format( + "error creating multipart upload's part paths: {}", + ec.message() + ) + << dendl; return -ERR_INTERNAL_ERROR; } @@ -472,8 +468,7 @@ int SFSMultipartWriterV2::prepare(optional_yield /* y */) { int ret = ::open(path.c_str(), O_CREAT | O_TRUNC | O_CLOEXEC | O_WRONLY, 0600); if (ret < 0) { - lsfs_dout( - dpp, -1 + lsfs_err(dpp ) << fmt::format("error opening file {}: {}", path, cpp_strerror(errno)) << dendl; return -ERR_INTERNAL_ERROR; @@ -483,8 +478,7 @@ int SFSMultipartWriterV2::prepare(optional_yield /* y */) { ret = ::fsync(fd); if (ret < 0) { - lsfs_dout( - dpp, -1 + lsfs_err(dpp ) << fmt::format("error sync'ing opened file: {}", cpp_strerror(errno)) << dendl; return -ERR_INTERNAL_ERROR; @@ -496,7 +490,7 @@ int SFSMultipartWriterV2::prepare(optional_yield /* y */) { int SFSMultipartWriterV2::process(bufferlist&& data, uint64_t offset) { auto len = data.length(); - lsfs_dout(dpp, 10) + lsfs_debug(dpp) << fmt::format( "upload_id: {}, part: {}, data(len: {}, offset: {}), written: {}", upload_id, part_num, len, offset, bytes_written @@ -506,36 +500,33 @@ int SFSMultipartWriterV2::process(bufferlist&& data, uint64_t offset) { sqlite::SQLiteMultipart mpdb(store->db_conn); auto mp = mpdb.get_multipart(upload_id); if (!mp.has_value()) { - lsfs_dout(dpp, -1) << fmt::format( - "multipart upload {} not found!", upload_id - ) - << dendl; + lsfs_err(dpp) << fmt::format("multipart upload {} not found!", upload_id) + << dendl; return -ERR_NO_SUCH_UPLOAD; } if (mp->state != MultipartState::INPROGRESS) { - lsfs_dout(dpp, -1) << fmt::format( - "multipart upload {} not available -- raced with " - "abort or complete!", - upload_id - ) - << dendl; + lsfs_err(dpp) << fmt::format( + "multipart upload {} not available -- raced with " + "abort or complete!", + upload_id + ) + << dendl; return -ERR_NO_SUCH_UPLOAD; } if (len == 0) { - lsfs_dout(dpp, 10) << "nothing to write" << dendl; + lsfs_debug(dpp) << "nothing to write" << dendl; return 0; } ceph_assert(fd >= 0); int write_ret = data.write_fd(fd, offset); if (write_ret < 0) { - lsfs_dout(dpp, -1) - << fmt::format( - "failed to write size: {}, offset: {}, to fd: {}: {}", len, - offset, fd, cpp_strerror(write_ret) - ) - << dendl; + lsfs_err(dpp) << fmt::format( + "failed to write size: {}, offset: {}, to fd: {}: {}", + len, offset, fd, cpp_strerror(write_ret) + ) + << dendl; switch (write_ret) { case -EDQUOT: case -ENOSPC: @@ -565,23 +556,23 @@ int SFSMultipartWriterV2::complete( // * zones_trace // * canceled - lsfs_dout(dpp, 10) << fmt::format( - "accounted_size: {}, etag: {}, set_mtime: {}, " - "delete_at: {}, if_match: {}, if_nomatch: {}", - accounted_size, etag, to_iso_8601(set_mtime), - to_iso_8601(delete_at), if_match ? if_match : "N/A", - if_nomatch ? if_nomatch : "N/A" - ) - << dendl; + lsfs_debug(dpp) << fmt::format( + "accounted_size: {}, etag: {}, set_mtime: {}, " + "delete_at: {}, if_match: {}, if_nomatch: {}", + accounted_size, etag, to_iso_8601(set_mtime), + to_iso_8601(delete_at), if_match ? if_match : "N/A", + if_nomatch ? if_nomatch : "N/A" + ) + << dendl; lsfs_dout(dpp, 10) << "attrs: " << attrs << dendl; if (bytes_written != accounted_size) { - lsfs_dout(dpp, -1) << fmt::format( - "bytes_written != accounted_size, expected {} " - "byte, found {} byte.", - bytes_written, accounted_size - ) - << dendl; + lsfs_err(dpp) << fmt::format( + "bytes_written != accounted_size, expected {} " + "byte, found {} byte.", + bytes_written, accounted_size + ) + << dendl; return -ERR_INTERNAL_ERROR; } @@ -589,11 +580,11 @@ int SFSMultipartWriterV2::complete( sqlite::SQLiteMultipart mpdb(store->db_conn); auto res = mpdb.finish_part(upload_id, part_num, etag, bytes_written); if (!res) { - lsfs_dout(dpp, -1) << fmt::format( - "unable to finish upload_id {}, part_num {}", - upload_id, part_num - ) - << dendl; + lsfs_err(dpp) << fmt::format( + "unable to finish upload_id {}, part_num {}", + upload_id, part_num + ) + << dendl; return -ERR_INTERNAL_ERROR; } auto entry = mpdb.get_part(upload_id, part_num); diff --git a/src/rgw/rgw_sal_sfs.h b/src/rgw/rgw_sal_sfs.h index c02394f961d36..e19f98c37fcc6 100644 --- a/src/rgw/rgw_sal_sfs.h +++ b/src/rgw/rgw_sal_sfs.h @@ -38,11 +38,6 @@ #include "rgw_sal.h" #include "rgw_status_page.h" -#define lsfs_dout_for(_dpp, _lvl, _whom) \ - ldpp_dout(_dpp, _lvl) << "> " << _whom << "::" << __func__ << " " - -#define lsfs_dout(_dpp, _lvl) lsfs_dout_for(_dpp, _lvl, this->get_cls_name()) - namespace rgw::sal::sfs { class SFSGC; }