Skip to content

Commit

Permalink
Use wrapper for logging.
Browse files Browse the repository at this point in the history
We no longer use the `max_level_trace` and `release_max_level_off`
features of the `log` crate.  By doing this, mmtk-core, as a library,
won't conflict with its users which need to use the log level features
of the `log` crate.

We no longer directly use logging macros in the `log` crate.  Now all
logging operations are done through wrapper macros in the
`crate::util::log` module.  Logs of level DEBUG and TRACE are disabled
at compile time for release builds, and can be enabled using the
"hot_log" Cargo feature.  By doing this, we can disable logging
statements on hot paths in mmtk-core, only, without affecting log
statements in other crates.  It also allow developers to use logs in
release builds in order to debug the performance.
  • Loading branch information
wks committed Nov 4, 2024
1 parent 618fde4 commit 223c9d7
Show file tree
Hide file tree
Showing 60 changed files with 466 additions and 334 deletions.
9 changes: 8 additions & 1 deletion Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,6 @@ itertools = "0.12.0"
jemalloc-sys = { version = "0.5.3", features = ["disable_initial_exec_tls"], optional = true }
lazy_static = "1.1"
libc = "0.2"
log = { version = "0.4", features = ["max_level_trace", "release_max_level_off"] }
memoffset = "0.9"
mimalloc-sys = { version = "0.1.6", optional = true }
# MMTk macros - we have to specify a version here in order to publish the crate, even though we use the dependency from a local path.
Expand All @@ -51,6 +50,9 @@ static_assertions = "1.1.0"
strum = "0.26.2"
strum_macros = "0.26.2"
sysinfo = "0.30.9"
# mmtk-core internally uses wrapper macros in the `mmtk::util::log` module.
# We rename the crate so that programmers don't unintentially use the `log` crate directly.
the_log_crate = { package = "log", version = "0.4"}

[dev-dependencies]
paste = "1.0.8"
Expand Down Expand Up @@ -171,6 +173,11 @@ count_live_bytes_in_gc = []
# capture the type names of work packets.
bpftrace_workaround = []

# Enable verbose logs in release build. Those are placed on hot paths, so merely checking whether
# logging is enabled may degrade performance. Currently such logs include `debug!` and `trace!`
# levels.
hot_log = []

# Do not modify the following line - ci-common.sh matches it
# -- Mutally exclusive features --
# Only one feature from each group can be provided. Otherwise build will fail.
Expand Down
3 changes: 2 additions & 1 deletion src/global_state.rs
Original file line number Diff line number Diff line change
@@ -1,3 +1,4 @@
use crate::util::log;
use std::sync::atomic::{AtomicBool, AtomicUsize, Ordering};
use std::sync::Mutex;
use std::time::Instant;
Expand Down Expand Up @@ -160,7 +161,7 @@ impl GlobalState {
/// Increase the allocation bytes and return the current allocation bytes after increasing
pub fn increase_allocation_bytes_by(&self, size: usize) -> usize {
let old_allocation_bytes = self.allocation_bytes.fetch_add(size, Ordering::SeqCst);
trace!(
log::trace!(
"Stress GC: old_allocation_bytes = {}, size = {}, allocation_bytes = {}",
old_allocation_bytes,
size,
Expand Down
2 changes: 0 additions & 2 deletions src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -30,8 +30,6 @@
#[macro_use]
extern crate lazy_static;
#[macro_use]
extern crate log;
#[macro_use]
extern crate downcast_rs;
#[macro_use]
extern crate static_assertions;
Expand Down
26 changes: 14 additions & 12 deletions src/memory_manager.rs
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,7 @@ use crate::scheduler::{GCWork, GCWorker};
use crate::util::alloc::allocators::AllocatorSelector;
use crate::util::constants::{LOG_BYTES_IN_PAGE, MIN_OBJECT_SIZE};
use crate::util::heap::layout::vm_layout::vm_layout;
use crate::util::log;
use crate::util::opaque_pointer::*;
use crate::util::{Address, ObjectReference};
use crate::vm::slot::MemorySlice;
Expand Down Expand Up @@ -52,8 +53,8 @@ use crate::vm::VMBinding;
/// * `builder`: The reference to a MMTk builder.
pub fn mmtk_init<VM: VMBinding>(builder: &MMTKBuilder) -> Box<MMTK<VM>> {
match crate::util::logger::try_init() {
Ok(_) => debug!("MMTk initialized the logger."),
Err(_) => debug!(
Ok(_) => log::debug!("MMTk initialized the logger."),
Err(_) => log::debug!(
"MMTk failed to initialize the logger. Possibly a logger has been initialized by user."
),
}
Expand All @@ -69,19 +70,20 @@ pub fn mmtk_init<VM: VMBinding>(builder: &MMTKBuilder) -> Box<MMTK<VM>> {
if split[0] == "Threads:" {
let threads = split[1].parse::<i32>().unwrap();
if threads != 1 {
warn!("Current process has {} threads, process-wide perf event measurement will only include child threads spawned from this thread", threads);
log::warn!("Current process has {} threads, process-wide perf event measurement will only include child threads spawned from this thread", threads);
}
}
}
}
let mmtk = builder.build();

info!(
log::info!(
"Initialized MMTk with {:?} ({:?})",
*mmtk.options.plan, *mmtk.options.gc_trigger
*mmtk.options.plan,
*mmtk.options.gc_trigger
);
#[cfg(feature = "extreme_assertions")]
warn!("The feature 'extreme_assertions' is enabled. MMTk will run expensive run-time checks. Slow performance should be expected.");
log::warn!("The feature 'extreme_assertions' is enabled. MMTk will run expensive run-time checks. Slow performance should be expected.");
Box::new(mmtk)
}

Expand Down Expand Up @@ -115,7 +117,7 @@ pub fn bind_mutator<VM: VMBinding>(

const LOG_ALLOCATOR_MAPPING: bool = false;
if LOG_ALLOCATOR_MAPPING {
info!("{:?}", mutator.config);
log::info!("{:?}", mutator.config);
}
mutator
}
Expand Down Expand Up @@ -477,7 +479,7 @@ pub fn gc_poll<VM: VMBinding>(mmtk: &MMTK<VM>, tls: VMMutatorThread) {
);

if VM::VMCollection::is_collection_enabled() && mmtk.gc_trigger.poll(false, None) {
debug!("Collection required");
log::debug!("Collection required");
assert!(mmtk.state.is_initialized(), "GC is not allowed here: collection is not initialized (did you call initialize_collection()?).");
VM::VMCollection::block_for_gc(tls);
}
Expand Down Expand Up @@ -764,7 +766,7 @@ pub fn add_finalizer<VM: VMBinding>(
object: <VM::VMReferenceGlue as ReferenceGlue<VM>>::FinalizableType,
) {
if *mmtk.options.no_finalizer {
warn!("add_finalizer() is called when no_finalizer = true");
log::warn!("add_finalizer() is called when no_finalizer = true");
}

mmtk.finalizable_processor.lock().unwrap().add(object);
Expand Down Expand Up @@ -823,7 +825,7 @@ pub fn get_finalized_object<VM: VMBinding>(
mmtk: &'static MMTK<VM>,
) -> Option<<VM::VMReferenceGlue as ReferenceGlue<VM>>::FinalizableType> {
if *mmtk.options.no_finalizer {
warn!("get_finalized_object() is called when no_finalizer = true");
log::warn!("get_finalized_object() is called when no_finalizer = true");
}

mmtk.finalizable_processor
Expand All @@ -843,7 +845,7 @@ pub fn get_all_finalizers<VM: VMBinding>(
mmtk: &'static MMTK<VM>,
) -> Vec<<VM::VMReferenceGlue as ReferenceGlue<VM>>::FinalizableType> {
if *mmtk.options.no_finalizer {
warn!("get_all_finalizers() is called when no_finalizer = true");
log::warn!("get_all_finalizers() is called when no_finalizer = true");
}

mmtk.finalizable_processor
Expand All @@ -863,7 +865,7 @@ pub fn get_finalizers_for<VM: VMBinding>(
object: ObjectReference,
) -> Vec<<VM::VMReferenceGlue as ReferenceGlue<VM>>::FinalizableType> {
if *mmtk.options.no_finalizer {
warn!("get_finalizers() is called when no_finalizer = true");
log::warn!("get_finalizers() is called when no_finalizer = true");
}

mmtk.finalizable_processor
Expand Down
6 changes: 4 additions & 2 deletions src/mmtk.rs
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,7 @@ use crate::util::heap::gc_trigger::GCTrigger;
use crate::util::heap::layout::vm_layout::VMLayout;
use crate::util::heap::layout::{self, Mmapper, VMMap};
use crate::util::heap::HeapMeta;
use crate::util::log;
use crate::util::opaque_pointer::*;
use crate::util::options::Options;
use crate::util::reference_processor::ReferenceProcessors;
Expand All @@ -25,6 +26,7 @@ use crate::util::slot_logger::SlotLogger;
use crate::util::statistics::stats::Stats;
use crate::vm::ReferenceGlue;
use crate::vm::VMBinding;

use std::cell::UnsafeCell;
use std::default::Default;
use std::sync::atomic::{AtomicBool, Ordering};
Expand Down Expand Up @@ -419,12 +421,12 @@ impl<VM: VMBinding> MMTK<VM> {
) -> bool {
use crate::vm::Collection;
if !self.get_plan().constraints().collects_garbage {
warn!("User attempted a collection request, but the plan can not do GC. The request is ignored.");
log::warn!("User attempted a collection request, but the plan can not do GC. The request is ignored.");
return false;
}

if force || !*self.options.ignore_system_gc && VM::VMCollection::is_collection_enabled() {
info!("User triggering collection");
log::info!("User triggering collection");
if exhaustive {
if let Some(gen) = self.get_plan().generational() {
gen.force_full_heap_collection();
Expand Down
15 changes: 8 additions & 7 deletions src/plan/generational/global.rs
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,7 @@ use crate::scheduler::*;
use crate::util::copy::CopySemantics;
use crate::util::heap::gc_trigger::SpaceStats;
use crate::util::heap::VMRequest;
use crate::util::log;
use crate::util::statistics::counter::EventCounter;
use crate::util::Address;
use crate::util::ObjectReference;
Expand Down Expand Up @@ -101,7 +102,7 @@ impl<VM: VMBinding> CommonGenPlan<VM> {
let cur_nursery = self.nursery.reserved_pages();
let max_nursery = self.common.base.gc_trigger.get_max_nursery_pages();
let nursery_full = cur_nursery >= max_nursery;
trace!(
log::trace!(
"nursery_full = {:?} (nursery = {}, max_nursery = {})",
nursery_full,
cur_nursery,
Expand Down Expand Up @@ -143,7 +144,7 @@ impl<VM: VMBinding> CommonGenPlan<VM> {
// The conditions are complex, and it is easier to read if we put them to separate if blocks.
#[allow(clippy::if_same_then_else, clippy::needless_bool)]
let is_full_heap = if crate::plan::generational::FULL_NURSERY_GC {
trace!("full heap: forced full heap");
log::trace!("full heap: forced full heap");
// For barrier overhead measurements, we always do full gc in nursery collections.
true
} else if self
Expand All @@ -154,7 +155,7 @@ impl<VM: VMBinding> CommonGenPlan<VM> {
.load(Ordering::SeqCst)
&& *self.common.base.options.full_heap_system_gc
{
trace!("full heap: user triggered");
log::trace!("full heap: user triggered");
// User triggered collection, and we force full heap for user triggered collection
true
} else if self.next_gc_full_heap.load(Ordering::SeqCst)
Expand All @@ -166,7 +167,7 @@ impl<VM: VMBinding> CommonGenPlan<VM> {
.load(Ordering::SeqCst)
> 1
{
trace!(
log::trace!(
"full heap: next_gc_full_heap = {}, cur_collection_attempts = {}",
self.next_gc_full_heap.load(Ordering::SeqCst),
self.common
Expand All @@ -178,7 +179,7 @@ impl<VM: VMBinding> CommonGenPlan<VM> {
// Forces full heap collection
true
} else if Self::virtual_memory_exhausted(plan.generational().unwrap()) {
trace!("full heap: virtual memory exhausted");
log::trace!("full heap: virtual memory exhausted");
true
} else {
// We use an Appel-style nursery. The default GC (even for a "heap-full" collection)
Expand All @@ -191,7 +192,7 @@ impl<VM: VMBinding> CommonGenPlan<VM> {

self.gc_full_heap.store(is_full_heap, Ordering::SeqCst);

info!(
log::info!(
"{}",
if is_full_heap {
"Full heap GC"
Expand Down Expand Up @@ -265,7 +266,7 @@ impl<VM: VMBinding> CommonGenPlan<VM> {
let available = plan.get_available_pages();
let min_nursery = plan.base().gc_trigger.get_min_nursery_pages();
let next_gc_full_heap = available < min_nursery;
trace!(
log::trace!(
"next gc will be full heap? {}, available pages = {}, min nursery = {}",
next_gc_full_heap,
available,
Expand Down
5 changes: 3 additions & 2 deletions src/plan/generational/immix/global.rs
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,7 @@ use crate::util::alloc::allocators::AllocatorSelector;
use crate::util::copy::*;
use crate::util::heap::gc_trigger::SpaceStats;
use crate::util::heap::VMRequest;
use crate::util::log;
use crate::util::Address;
use crate::util::ObjectReference;
use crate::util::VMWorkerThread;
Expand Down Expand Up @@ -109,10 +110,10 @@ impl<VM: VMBinding> Plan for GenImmix<VM> {
probe!(mmtk, gen_full_heap, is_full_heap);

if !is_full_heap {
info!("Nursery GC");
log::info!("Nursery GC");
scheduler.schedule_common_work::<GenImmixNurseryGCWorkContext<VM>>(self);
} else {
info!("Full heap GC");
log::info!("Full heap GC");
crate::plan::immix::Immix::schedule_immix_full_heap_collection::<
GenImmix<VM>,
GenImmixMatureGCWorkContext<VM, TRACE_KIND_FAST>,
Expand Down
25 changes: 13 additions & 12 deletions src/plan/global.rs
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,7 @@ use crate::util::heap::layout::Mmapper;
use crate::util::heap::layout::VMMap;
use crate::util::heap::HeapMeta;
use crate::util::heap::VMRequest;
use crate::util::log;
use crate::util::metadata::side_metadata::SideMetadataSanity;
use crate::util::metadata::side_metadata::SideMetadataSpec;
use crate::util::options::Options;
Expand Down Expand Up @@ -233,7 +234,7 @@ pub trait Plan: 'static + HasSpaces + Sync + Downcast {
let vm_live_pages = conversions::bytes_to_pages_up(vm_live_bytes);
let total = used_pages + collection_reserve + vm_live_pages;

trace!(
log::trace!(
"Reserved pages = {}, used pages: {}, collection reserve: {}, VM live pages: {}",
total,
used_pages,
Expand Down Expand Up @@ -267,7 +268,7 @@ pub trait Plan: 'static + HasSpaces + Sync + Downcast {
// may be larger than the reserved pages before a GC, as we may end up using more memory for thread local
// buffers for copy allocators).
let available_pages = total_pages.saturating_sub(reserved_pages);
trace!(
log::trace!(
"Total pages = {}, reserved pages = {}, available pages = {}",
total_pages,
reserved_pages,
Expand Down Expand Up @@ -491,25 +492,25 @@ impl<VM: VMBinding> BasePlan<VM> {
) -> ObjectReference {
#[cfg(feature = "code_space")]
if self.code_space.in_space(object) {
trace!("trace_object: object in code space");
log::trace!("trace_object: object in code space");
return self.code_space.trace_object::<Q>(queue, object);
}

#[cfg(feature = "code_space")]
if self.code_lo_space.in_space(object) {
trace!("trace_object: object in large code space");
log::trace!("trace_object: object in large code space");
return self.code_lo_space.trace_object::<Q>(queue, object);
}

#[cfg(feature = "ro_space")]
if self.ro_space.in_space(object) {
trace!("trace_object: object in ro_space space");
log::trace!("trace_object: object in ro_space space");
return self.ro_space.trace_object(queue, object);
}

#[cfg(feature = "vm_space")]
if self.vm_space.in_space(object) {
trace!("trace_object: object in boot space");
log::trace!("trace_object: object in boot space");
return self.vm_space.trace_object(queue, object);
}

Expand Down Expand Up @@ -545,18 +546,18 @@ impl<VM: VMBinding> BasePlan<VM> {
&self.options,
);
if stress_force_gc {
debug!(
log::debug!(
"Stress GC: allocation_bytes = {}, stress_factor = {}",
self.global_state.allocation_bytes.load(Ordering::Relaxed),
*self.options.stress_factor
);
debug!("Doing stress GC");
log::debug!("Doing stress GC");
self.global_state
.allocation_bytes
.store(0, Ordering::SeqCst);
}

debug!(
log::debug!(
"self.get_reserved_pages()={}, self.get_total_pages()={}",
plan.get_reserved_pages(),
plan.get_total_pages()
Expand Down Expand Up @@ -622,15 +623,15 @@ impl<VM: VMBinding> CommonPlan<VM> {
worker: &mut GCWorker<VM>,
) -> ObjectReference {
if self.immortal.in_space(object) {
trace!("trace_object: object in immortal space");
log::trace!("trace_object: object in immortal space");
return self.immortal.trace_object(queue, object);
}
if self.los.in_space(object) {
trace!("trace_object: object in los");
log::trace!("trace_object: object in los");
return self.los.trace_object(queue, object);
}
if self.nonmoving.in_space(object) {
trace!("trace_object: object in nonmoving space");
log::trace!("trace_object: object in nonmoving space");
return self.nonmoving.trace_object(queue, object);
}
self.base.trace_object::<Q>(queue, object, worker)
Expand Down
3 changes: 2 additions & 1 deletion src/plan/pageprotect/global.rs
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,7 @@ use crate::scheduler::*;
use crate::util::alloc::allocators::AllocatorSelector;
use crate::util::heap::gc_trigger::SpaceStats;
use crate::util::heap::VMRequest;
use crate::util::log;
use crate::util::metadata::side_metadata::SideMetadataContext;
use crate::{plan::global::BasePlan, vm::VMBinding};
use crate::{
Expand Down Expand Up @@ -86,7 +87,7 @@ impl<VM: VMBinding> Plan for PageProtect<VM> {
impl<VM: VMBinding> PageProtect<VM> {
pub fn new(args: CreateGeneralPlanArgs<VM>) -> Self {
// Warn users that the plan may fail due to maximum mapping allowed.
warn!(
log::warn!(
"PageProtect uses a high volume of memory mappings. \
If you encounter failures in memory protect/unprotect in this plan,\
consider increase the maximum mapping allowed by the OS{}.",
Expand Down
Loading

0 comments on commit 223c9d7

Please sign in to comment.