diff --git a/include/gc/gc.h b/include/gc/gc.h index ad2f054c4..847700302 100644 --- a/include/gc/gc.h +++ b/include/gc/gc.h @@ -205,6 +205,10 @@ GC_API GC_ATTR_DEPRECATED int GC_finalize_on_demand; GC_API void GC_CALL GC_set_finalize_on_demand(int); GC_API int GC_CALL GC_get_finalize_on_demand(void); +/* Returns the total number of finalizers that have been run so far */ +/* by the collector. */ +GC_API size_t GC_CALL GC_finalized_total(void); + /* Mark objects reachable from finalizable objects in a separate */ /* post-pass. This makes it a bit safer to use */ /* non-topologically-ordered finalization. Default value is determined */ diff --git a/include/private/gc_priv.h b/include/private/gc_priv.h index c82cf223a..e2c0f2f1e 100644 --- a/include/private/gc_priv.h +++ b/include/private/gc_priv.h @@ -380,11 +380,12 @@ typedef struct hblkhdr hdr; EXTERN_C_BEGIN #ifndef GC_NO_FINALIZATION + GC_INNER void GC_maybe_wake_finalizer_thread(void); /* If GC_finalize_on_demand is not set, invoke eligible */ /* finalizers. Otherwise: call (*GC_finalizer_notifier)() if */ /* there are finalizers to be run, and we have not called this */ /* procedure yet this collection cycle. */ -# define GC_INVOKE_FINALIZERS() GC_notify_or_invoke_finalizers() +# define GC_INVOKE_FINALIZERS() GC_maybe_wake_finalizer_thread() GC_INNER void GC_notify_or_invoke_finalizers(void); /* Perform all indicated finalization actions on unmarked */ @@ -1368,7 +1369,8 @@ struct _GC_arrays { /* running. Used to approximate size of memory explicitly */ /* deallocated by finalizers. */ word _finalizer_bytes_freed; - + /* Number of finalizers that have been run so far. */ + size_t _finalizers_run; /* Pointer to the first (lowest address) bottom_index; assumes the */ /* allocator lock is held. */ bottom_index *_all_bottom_indices; @@ -1676,6 +1678,7 @@ GC_API_PRIV GC_FAR struct _GC_arrays GC_arrays; #define GC_composite_in_use GC_arrays._composite_in_use #define GC_excl_table GC_arrays._excl_table #define GC_finalizer_bytes_freed GC_arrays._finalizer_bytes_freed +#define GC_finalizers_run GC_arrays._finalizers_run #define GC_heapsize GC_arrays._heapsize #define GC_large_allocd_bytes GC_arrays._large_allocd_bytes #define GC_large_free_bytes GC_arrays._large_free_bytes diff --git a/reclaim.c b/reclaim.c index 821c7c435..b5e55d09e 100644 --- a/reclaim.c +++ b/reclaim.c @@ -25,6 +25,14 @@ /* originally on free lists which we had to drop. */ GC_INNER signed_word GC_bytes_found = 0; +#ifdef GC_PTHREADS +static pthread_mutex_t flzr_mtx = PTHREAD_MUTEX_INITIALIZER; +static pthread_cond_t flzr_t_has_work = PTHREAD_COND_INITIALIZER; +static int flzr_can_work = 0; +#elif +#error "This fork of BDWGC only supports POSIX threads" +#endif + #if defined(PARALLEL_MARK) /* Number of threads currently building free lists without holding */ /* the allocator lock. It is not safe to collect if this is nonzero. */ @@ -53,6 +61,8 @@ STATIC unsigned GC_n_leaked = 0; STATIC void GC_reclaim_unconditionally_marked(void); #endif +STATIC unsigned GC_finalizer_thread_exists = 0; + GC_INLINE void GC_add_leaked(ptr_t leaked) { GC_ASSERT(I_HOLD_LOCK()); @@ -880,3 +890,40 @@ GC_API void GC_CALL GC_enumerate_reachable_objects_inner( ed.client_data = client_data; GC_apply_to_all_blocks(GC_do_enumerate_reachable_objects, &ed); } + +static void* init_finalize_thread(void *arg) +{ + while(1) { + pthread_mutex_lock(&flzr_mtx); + while (flzr_can_work == 0) { + pthread_cond_wait(&flzr_t_has_work, &flzr_mtx); + } + flzr_can_work = 0; + pthread_mutex_unlock(&flzr_mtx); + GC_finalizers_run += GC_invoke_finalizers(); + } + return arg; +} + +GC_INNER void GC_maybe_wake_finalizer_thread() +{ + if (!GC_finalizer_thread_exists) { + pthread_t t; + pthread_create(&t, NULL, init_finalize_thread, NULL /* arg */); + GC_finalizer_thread_exists = 1; + return; + } + + if (GC_should_invoke_finalizers() == 0) + return; + + pthread_mutex_lock(&flzr_mtx); + flzr_can_work = 1; + pthread_cond_signal(&flzr_t_has_work); + pthread_mutex_unlock(&flzr_mtx); +} + +GC_API size_t GC_finalized_total() +{ + return GC_finalizers_run; +}