Skip to content
This repository has been archived by the owner on Jan 3, 2024. It is now read-only.

rgw/sfs: standardize log levels #239

Merged
merged 5 commits into from
Nov 6, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion .clang-format
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,7 @@ AlignConsecutiveDeclarations:
AlignCompound: false
PadOperators: false
AlignConsecutiveMacros:
Enabled: false
Enabled: true
AcrossEmptyLines: false
AcrossComments: false
AlignCompound: false
Expand Down
2 changes: 2 additions & 0 deletions doc/rados/troubleshooting/log-and-debug.rst
Original file line number Diff line number Diff line change
Expand Up @@ -298,6 +298,8 @@ to their default level or to a level suitable for normal operations.
+--------------------------+-----------+--------------+
| ``rgw dbstore`` | 1 | 5 |
+--------------------------+-----------+--------------+
| ``rgw sfs`` | 1 | 5 |
+--------------------------+-----------+--------------+
| ``javaclient`` | 1 | 5 |
+--------------------------+-----------+--------------+
| ``asok`` | 1 | 5 |
Expand Down
1 change: 1 addition & 0 deletions src/common/subsys.h
Original file line number Diff line number Diff line change
Expand Up @@ -64,6 +64,7 @@ SUBSYS(rgw_datacache, 1, 5)
SUBSYS(rgw_access, 1, 5)
SUBSYS(rgw_dbstore, 1, 5)
SUBSYS(rgw_flight, 1, 5)
SUBSYS(rgw_sfs, 1, 5)
SUBSYS(javaclient, 1, 5)
SUBSYS(asok, 1, 5)
SUBSYS(throttle, 1, 1)
Expand Down
171 changes: 83 additions & 88 deletions src/rgw/driver/sfs/bucket.cc
Original file line number Diff line number Diff line change
Expand Up @@ -30,14 +30,15 @@
#include "driver/sfs/multipart.h"
#include "driver/sfs/object.h"
#include "driver/sfs/object_state.h"
#include "driver/sfs/sfs_log.h"
#include "driver/sfs/sqlite/objects/object_definitions.h"
#include "driver/sfs/sqlite/sqlite_list.h"
#include "driver/sfs/sqlite/sqlite_versioned_objects.h"
#include "driver/sfs/types.h"
#include "rgw_common.h"
#include "rgw_sal_sfs.h"

#define dout_subsys ceph_subsys_rgw
#define dout_subsys ceph_subsys_rgw_sfs

using namespace std;
using namespace sqlite_orm;
Expand Down Expand Up @@ -75,7 +76,7 @@ std::unique_ptr<Object> SFSBucket::_get_object(sfs::ObjectRef obj) {
}

std::unique_ptr<Object> SFSBucket::get_object(const rgw_obj_key& key) {
ldout(store->ceph_context(), 10)
ldout(store->ceph_context(), SFS_LOG_DEBUG)
<< "bucket::" << __func__ << ": key : " << key << dendl;
try {
auto objref = bucket->get(key);
Expand All @@ -88,7 +89,7 @@ std::unique_ptr<Object> SFSBucket::get_object(const rgw_obj_key& key) {
objref->instance = key.instance;
return _get_object(objref);
} catch (const sfs::UnknownObjectException& _) {
ldout(store->ceph_context(), 10)
ldout(store->ceph_context(), SFS_LOG_VERBOSE)
<< "unable to find key " << key << " in bucket " << bucket->get_name()
<< dendl;
// possibly a copy, return a placeholder
Expand All @@ -106,7 +107,7 @@ int SFSBucket::verify_list_params(
// allow unordered is a ceph extension intended to improve performance
// of list() by not sorting through results from all over the cluster
lsfs_dout(
dpp, 10
dpp, SFS_LOG_VERBOSE
) << "unsupported allow unordered list requested. returning ordered result."
<< get_name() << dendl;

Expand All @@ -117,8 +118,8 @@ int SFSBucket::verify_list_params(
}
}
if (!params.end_marker.empty()) {
lsfs_dout(dpp, 2) << "unsupported end marker (SWIFT) requested "
<< get_name() << dendl;
lsfs_verb(dpp) << "unsupported end marker (SWIFT) requested " << get_name()
<< dendl;
return -ENOTSUP;
}
if (!params.ns.empty() && params.ns != RGW_OBJ_NS_MULTIPART) {
Expand Down Expand Up @@ -152,13 +153,12 @@ int SFSBucket::list(
const DoutPrefixProvider* dpp, ListParams& params, int max,
ListResults& results, optional_yield /* y */
) {
lsfs_dout(dpp, 10)
<< fmt::format(
"listing bucket {} {} {}: max:{} params:", get_name(),
params.ns == RGW_OBJ_NS_MULTIPART ? "multipart" : "",
params.list_versions ? "versions" : "objects", max
)
<< params << dendl;
lsfs_debug(dpp) << fmt::format(
"listing bucket {} {} {}: max:{} params:", get_name(),
params.ns == RGW_OBJ_NS_MULTIPART ? "multipart" : "",
params.list_versions ? "versions" : "objects", max
)
<< params << dendl;

const int list_params_ok = verify_list_params(dpp, params, max);
if (list_params_ok < 0) {
Expand Down Expand Up @@ -186,15 +186,15 @@ int SFSBucket::list(
e.meta.mtime = mp.mtime;
results.objs.emplace_back(e);
}
lsfs_dout(dpp, 10) << fmt::format(
"success (prefix:{}, start_after:{}, "
"max:{}). #objs_returned:{} "
"next:{} have_more:{}",
params.prefix, params.marker.name, max,
params.delim, results.objs.size(),
results.next_marker, results.is_truncated
)
<< dendl;
lsfs_debug(dpp) << fmt::format(
"success (prefix:{}, start_after:{}, "
"max:{}). #objs_returned:{} "
"next:{} have_more:{}",
params.prefix, params.marker.name, max, params.delim,
results.objs.size(), results.next_marker,
results.is_truncated
)
<< dendl;
return 0;
}

Expand Down Expand Up @@ -231,14 +231,13 @@ int SFSBucket::list(
}
}();
if (!listing_succeeded) {
lsfs_dout(dpp, 10) << fmt::format(
"list (prefix:{}, start_after:{}, "
"max:{}) failed.",
params.prefix, start_with, max,
results.objs.size(), results.next_marker,
results.is_truncated
)
<< dendl;
lsfs_info(dpp) << fmt::format(
"list (prefix:{}, start_after:{}, "
"max:{}) failed.",
params.prefix, start_with, max, results.objs.size(),
results.next_marker, results.is_truncated
)
<< dendl;
return -ERR_INTERNAL_ERROR;
}

Expand All @@ -262,14 +261,13 @@ int SFSBucket::list(
list.objects(get_bucket_id(), params.prefix, query, 1, objects_after);
results.is_truncated = objects_after.size() > 0;
}
lsfs_dout(dpp, 10) << fmt::format(
"common prefix rollup #objs:{} -> #objs:{}, "
"#prefix:{}, more:{}",
results.objs.size(), new_results.size(),
results.common_prefixes.size(),
results.is_truncated
)
<< dendl;
lsfs_debug(dpp) << fmt::format(
"common prefix rollup #objs:{} -> #objs:{}, "
"#prefix:{}, more:{}",
results.objs.size(), new_results.size(),
results.common_prefixes.size(), results.is_truncated
)
<< dendl;

results.objs = new_results;
}
Expand All @@ -296,7 +294,7 @@ int SFSBucket::list(
}
}

lsfs_dout(dpp, 10)
lsfs_debug(dpp)
<< fmt::format(
"success (prefix:{}, start_after:{}, "
"max:{} delim:{}). #objs_returned:{} "
Expand All @@ -322,11 +320,11 @@ int SFSBucket::remove_bucket(

auto res = sfs::SFSMultipartUploadV2::abort_multiparts(dpp, store, this);
if (res < 0) {
lsfs_dout(dpp, -1) << fmt::format(
"unable to abort multiparts on bucket {}: {}",
get_name(), res
)
<< dendl;
lsfs_err(dpp) << fmt::format(
"unable to abort multiparts on bucket {}: {}",
get_name(), res
)
<< dendl;
if (res == -ERR_NO_SUCH_BUCKET) {
return -ENOENT;
}
Expand All @@ -337,8 +335,7 @@ int SFSBucket::remove_bucket(
sfs::sqlite::SQLiteBuckets db_buckets(store->db_conn);
auto db_bucket = db_buckets.get_bucket(get_bucket_id());
if (!db_bucket.has_value()) {
ldpp_dout(dpp, 1) << __func__ << ": Bucket metadata was not found.."
<< dendl;
lsfs_verb(dpp) << __func__ << ": Bucket metadata was not found." << dendl;
return -ENOENT;
}
db_bucket->deleted = true;
Expand All @@ -352,7 +349,7 @@ int SFSBucket::remove_bucket_bypass_gc(
optional_yield /*y*/, const DoutPrefixProvider* dpp
) {
/** Remove this bucket, bypassing garbage collection. May be removed */
ldpp_dout(dpp, 10) << __func__ << ": TODO" << dendl;
lsfs_warn(dpp) << __func__ << ": TODO" << dendl;
return -ENOTSUP;
}

Expand Down Expand Up @@ -383,12 +380,12 @@ int SFSBucket::set_acl(
int SFSBucket::chown(
const DoutPrefixProvider* dpp, User& /*new_user*/, optional_yield /*y*/
) {
ldpp_dout(dpp, 10) << __func__ << ": TODO" << dendl;
lsfs_warn(dpp) << __func__ << ": TODO" << dendl;
return -ENOTSUP;
}

bool SFSBucket::is_owner(User* /*user*/) {
ldout(store->ceph_context(), 10) << __func__ << ": TODO" << dendl;
ldout(store->ceph_context(), SFS_LOG_WARN) << __func__ << ": TODO" << dendl;
return true;
}

Expand All @@ -398,7 +395,7 @@ int SFSBucket::
// check if there are still objects owned by the bucket
sfs::sqlite::SQLiteBuckets db_buckets(store->db_conn);
if (!db_buckets.bucket_empty(get_bucket_id())) {
ldpp_dout(dpp, -1) << __func__ << ": Bucket Not Empty.." << dendl;
lsfs_debug(dpp) << __func__ << ": Bucket Not Empty." << dendl;
return -ENOTEMPTY;
}
return 0;
Expand Down Expand Up @@ -456,7 +453,7 @@ std::unique_ptr<MultipartUpload> SFSBucket::get_multipart_upload(
const std::string& with_oid, std::optional<std::string> with_upload_id,
ACLOwner with_owner, ceph::real_time with_mtime
) {
ldout(store->ceph_context(), 10)
ldout(store->ceph_context(), SFS_LOG_DEBUG)
<< "bucket::" << __func__ << ": oid: " << with_oid
<< ", upload id: " << with_upload_id << dendl;

Expand All @@ -470,7 +467,7 @@ std::unique_ptr<MultipartUpload> SFSBucket::get_multipart_upload(
try_resolve_mp_from_oid(
store->db_conn, with_oid, next_oid, next_upload_id
)) {
ldout(store->ceph_context(), 20)
ldout(store->ceph_context(), SFS_LOG_DEBUG)
<< fmt::format(
"called without upload_id. resolved oid {} to MP oid:{} "
"upload:{}",
Expand Down Expand Up @@ -511,12 +508,11 @@ int SFSBucket::list_multiparts(
std::vector<std::unique_ptr<MultipartUpload>>& uploads,
std::map<std::string, bool>* common_prefixes, bool* is_truncated
) {
lsfs_dout(dpp, 10)
<< fmt::format(
"prefix: {}, marker: {}, delim: {}, max_uploads: {}", prefix,
marker, delim, max_uploads
)
<< dendl;
lsfs_debug(dpp) << fmt::format(
"prefix: {}, marker: {}, delim: {}, max_uploads: {}",
prefix, marker, delim, max_uploads
)
<< dendl;

return sfs::SFSMultipartUploadV2::list_multiparts(
dpp, store, this, bucket, prefix, marker, delim, max_uploads, uploads,
Expand All @@ -527,8 +523,7 @@ int SFSBucket::list_multiparts(
int SFSBucket::abort_multiparts(
const DoutPrefixProvider* dpp, CephContext* /*cct*/
) {
lsfs_dout(
dpp, 10
lsfs_debug(dpp
) << fmt::format("aborting multipart uploads on bucket {}", get_name())
<< dendl;
return sfs::SFSMultipartUploadV2::abort_multiparts(dpp, store, this);
Expand All @@ -537,7 +532,7 @@ int SFSBucket::abort_multiparts(
int SFSBucket::try_refresh_info(
const DoutPrefixProvider* dpp, ceph::real_time* /*pmtime*/
) {
ldpp_dout(dpp, 10) << __func__ << ": TODO" << dendl;
lsfs_warn(dpp) << __func__ << ": TODO" << dendl;
return -ENOTSUP;
}

Expand All @@ -547,33 +542,33 @@ int SFSBucket::read_usage(
RGWUsageIter& /*usage_iter*/,
std::map<rgw_user_bucket, rgw_usage_log_entry>& /*usage*/
) {
ldpp_dout(dpp, 10) << __func__ << ": TODO" << dendl;
lsfs_warn(dpp) << __func__ << ": TODO" << dendl;
return -ENOTSUP;
}
int SFSBucket::trim_usage(
const DoutPrefixProvider* dpp, uint64_t /*start_epoch*/,
uint64_t /*end_epoch*/
) {
ldpp_dout(dpp, 10) << __func__ << ": TODO" << dendl;
lsfs_warn(dpp) << __func__ << ": TODO" << dendl;
return -ENOTSUP;
}

int SFSBucket::rebuild_index(const DoutPrefixProvider* dpp) {
ldpp_dout(dpp, 10) << __func__ << ": TODO" << dendl;
lsfs_warn(dpp) << __func__ << ": TODO" << dendl;
return -ENOTSUP;
}

int SFSBucket::check_quota(
const DoutPrefixProvider* dpp, RGWQuota& quota, uint64_t obj_size,
optional_yield /*y*/, bool /*check_size_only*/
) {
ldpp_dout(dpp, 10) << __func__
<< ": user(max size: " << quota.user_quota.max_size
<< ", max objs: " << quota.user_quota.max_objects
<< "), bucket(max size: " << quota.bucket_quota.max_size
<< ", max objs: " << quota.bucket_quota.max_objects
<< "), obj size: " << obj_size << dendl;
ldpp_dout(dpp, 10) << __func__ << ": not implemented, return okay." << dendl;
lsfs_debug(dpp) << __func__
<< ": user(max size: " << quota.user_quota.max_size
<< ", max objs: " << quota.user_quota.max_objects
<< "), bucket(max size: " << quota.bucket_quota.max_size
<< ", max objs: " << quota.bucket_quota.max_objects
<< "), obj size: " << obj_size << dendl;
lsfs_warn(dpp) << __func__ << ": not implemented, return okay." << dendl;
return 0;
}

Expand Down Expand Up @@ -601,36 +596,36 @@ int SFSBucket::sync_user_stats(
}

int SFSBucket::update_container_stats(const DoutPrefixProvider* dpp) {
lsfs_dout(dpp, 10) << fmt::format(
"update bucket {} (id {}) stats", get_name(),
get_bucket_id()
)
<< dendl;
lsfs_debug(dpp) << fmt::format(
"update bucket {} (id {}) stats", get_name(),
get_bucket_id()
)
<< dendl;
sfs::sqlite::SQLiteBuckets bucketdb(store->db_conn);
auto stats = bucketdb.get_stats(get_bucket_id());

if (!stats.has_value()) {
lsfs_dout(dpp, 10) << fmt::format(
"unable to obtain stats for bucket {} (id {}) -- "
"no such bucket!",
get_name(), get_bucket_id()
)
<< dendl;
lsfs_verb(dpp) << fmt::format(
"unable to obtain stats for bucket {} (id {}) -- "
"no such bucket!",
get_name(), get_bucket_id()
)
<< dendl;
return -ERR_NO_SUCH_BUCKET;
}

lsfs_dout(dpp, 10) << fmt::format(
"bucket {} stats: size: {}, obj_cnt: {}",
get_name(), stats->size, stats->obj_count
)
<< dendl;
lsfs_debug(dpp) << fmt::format(
"bucket {} stats: size: {}, obj_cnt: {}", get_name(),
stats->size, stats->obj_count
)
<< dendl;
ent.size = ent.size_rounded = stats->size;
ent.count = stats->obj_count;
return 0;
}

int SFSBucket::check_bucket_shards(const DoutPrefixProvider* dpp) {
ldpp_dout(dpp, 10) << __func__ << ": TODO" << dendl;
lsfs_warn(dpp) << __func__ << ": TODO" << dendl;
return -ENOTSUP;
}
int SFSBucket::put_info(
Expand Down
Loading
Loading