You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

1443 lines
44 KiB

12 years ago
/*
* Mark-and-sweep garbage collection.
*/
#include "duk_internal.h"
DUK_LOCAL_DECL void duk__mark_heaphdr(duk_heap *heap, duk_heaphdr *h);
DUK_LOCAL_DECL void duk__mark_tval(duk_heap *heap, duk_tval *tv);
12 years ago
/*
* Misc
*/
11 years ago
/* Select a thread for mark-and-sweep use.
*
10 years ago
* XXX: This needs to change later.
12 years ago
*/
DUK_LOCAL duk_hthread *duk__get_temp_hthread(duk_heap *heap) {
12 years ago
if (heap->curr_thread) {
return heap->curr_thread;
}
return heap->heap_thread; /* may be NULL, too */
}
/*
* Marking functions for heap types: mark children recursively
*/
DUK_LOCAL void duk__mark_hstring(duk_heap *heap, duk_hstring *h) {
DUK_UNREF(heap);
DUK_UNREF(h);
12 years ago
DUK_DDD(DUK_DDDPRINT("duk__mark_hstring: %p", (void *) h));
12 years ago
DUK_ASSERT(h);
/* nothing to process */
}
DUK_LOCAL void duk__mark_hobject(duk_heap *heap, duk_hobject *h) {
11 years ago
duk_uint_fast32_t i;
12 years ago
DUK_DDD(DUK_DDDPRINT("duk__mark_hobject: %p", (void *) h));
12 years ago
DUK_ASSERT(h);
/* XXX: use advancing pointers instead of index macros -> faster and smaller? */
16-bit fields and heap pointer compression work Memory optimization work for very low memory devices (96 to 256kB system RAM). Overall changes are: - 16-bit fields for various internal structures to reduce their size - Heap pointer compression to reduce pointer size to 16 bits When DUK_OPT_LIGHTFUNC_BUILTINS and the new low memory options are enabled, Duktape initial heap memory usage is about 23kB (compared to baseline of about 45kB) on x86. Unless low memory feature options are enabled, there should be no visible changes to Duktape behavior. More detailed changes: - 16-bit changes for duk_heaphdr: pointer compression, refcount - 16-bit changes for duk_hstring: hash, blen, and clen can all be 16 bits, use 0xFFFF as string byte length limit (call sites ensure this limit is never exceeded) - 16-bit changes for duk_hbuffer, use 0xFFFF as buffer length limit - 16-bit fields for hobject size (entry part, array part), drop hash part since it's not usually needed for extremely low memory environments - 16-bit changes for duk_hcompiledfunction - Heap pointer packing for stringtable - Heap pointer packing for 'strs' built-in strings list (saves around 600 to 700 bytes but may not be a good tradeoff because call site size will increase) Other changes: - Heaphdr NULL init fix. The original macros were broken: the double/single linked macro variants were the wrong way around. Now sets through macro to work properly with compressed pointers. - Rename duk_hbuffer CURR_DATA_PTR -> DATA_PTR to reduce macro length (previous name was tediously long) - Rename buffer "usable_size" to "alloc_size" throughout as they have been the same for a while now (they used to differ when buffer had an extra NUL). - Add memory optimization markers to Duktape.env (pointer compression and individual 16-bit field options) - Rename a few internal fields for clarity: duk_hobject 'p' to 'props', heap->st to heap->strtable - Add a safety check for buffer alloc size (should not be triggered but prevents wrapping if call sites don't properly check for sizes) - Other minor cleanups
10 years ago
for (i = 0; i < (duk_uint_fast32_t) DUK_HOBJECT_GET_ENEXT(h); i++) {
duk_hstring *key = DUK_HOBJECT_E_GET_KEY(heap, h, i);
12 years ago
if (!key) {
continue;
}
duk__mark_heaphdr(heap, (duk_heaphdr *) key);
if (DUK_HOBJECT_E_SLOT_IS_ACCESSOR(heap, h, i)) {
duk__mark_heaphdr(heap, (duk_heaphdr *) DUK_HOBJECT_E_GET_VALUE_PTR(heap, h, i)->a.get);
duk__mark_heaphdr(heap, (duk_heaphdr *) DUK_HOBJECT_E_GET_VALUE_PTR(heap, h, i)->a.set);
12 years ago
} else {
duk__mark_tval(heap, &DUK_HOBJECT_E_GET_VALUE_PTR(heap, h, i)->v);
12 years ago
}
}
16-bit fields and heap pointer compression work Memory optimization work for very low memory devices (96 to 256kB system RAM). Overall changes are: - 16-bit fields for various internal structures to reduce their size - Heap pointer compression to reduce pointer size to 16 bits When DUK_OPT_LIGHTFUNC_BUILTINS and the new low memory options are enabled, Duktape initial heap memory usage is about 23kB (compared to baseline of about 45kB) on x86. Unless low memory feature options are enabled, there should be no visible changes to Duktape behavior. More detailed changes: - 16-bit changes for duk_heaphdr: pointer compression, refcount - 16-bit changes for duk_hstring: hash, blen, and clen can all be 16 bits, use 0xFFFF as string byte length limit (call sites ensure this limit is never exceeded) - 16-bit changes for duk_hbuffer, use 0xFFFF as buffer length limit - 16-bit fields for hobject size (entry part, array part), drop hash part since it&#39;s not usually needed for extremely low memory environments - 16-bit changes for duk_hcompiledfunction - Heap pointer packing for stringtable - Heap pointer packing for &#39;strs&#39; built-in strings list (saves around 600 to 700 bytes but may not be a good tradeoff because call site size will increase) Other changes: - Heaphdr NULL init fix. The original macros were broken: the double/single linked macro variants were the wrong way around. Now sets through macro to work properly with compressed pointers. - Rename duk_hbuffer CURR_DATA_PTR -&gt; DATA_PTR to reduce macro length (previous name was tediously long) - Rename buffer &#34;usable_size&#34; to &#34;alloc_size&#34; throughout as they have been the same for a while now (they used to differ when buffer had an extra NUL). - Add memory optimization markers to Duktape.env (pointer compression and individual 16-bit field options) - Rename a few internal fields for clarity: duk_hobject &#39;p&#39; to &#39;props&#39;, heap-&gt;st to heap-&gt;strtable - Add a safety check for buffer alloc size (should not be triggered but prevents wrapping if call sites don&#39;t properly check for sizes) - Other minor cleanups
10 years ago
for (i = 0; i < (duk_uint_fast32_t) DUK_HOBJECT_GET_ASIZE(h); i++) {
duk__mark_tval(heap, DUK_HOBJECT_A_GET_VALUE_PTR(heap, h, i));
12 years ago
}
/* hash part is a 'weak reference' and does not contribute */
duk__mark_heaphdr(heap, (duk_heaphdr *) DUK_HOBJECT_GET_PROTOTYPE(heap, h));
12 years ago
/* Fast path for objects which don't have a subclass struct, or have a
* subclass struct but nothing that needs marking in the subclass struct.
*/
if (DUK_HOBJECT_HAS_FASTREFS(h)) {
DUK_ASSERT(DUK_HOBJECT_ALLOWS_FASTREFS(h));
return;
}
DUK_ASSERT(DUK_HOBJECT_PROHIBITS_FASTREFS(h));
if (DUK_HOBJECT_IS_COMPFUNC(h)) {
duk_hcompfunc *f = (duk_hcompfunc *) h;
12 years ago
duk_tval *tv, *tv_end;
duk_hobject **fn, **fn_end;
12 years ago
/* 'data' is reachable through every compiled function which
* contains a reference.
*/
duk__mark_heaphdr(heap, (duk_heaphdr *) DUK_HCOMPFUNC_GET_DATA(heap, f));
duk__mark_heaphdr(heap, (duk_heaphdr *) DUK_HCOMPFUNC_GET_LEXENV(heap, f));
duk__mark_heaphdr(heap, (duk_heaphdr *) DUK_HCOMPFUNC_GET_VARENV(heap, f));
12 years ago
if (DUK_HCOMPFUNC_GET_DATA(heap, f) != NULL) {
tv = DUK_HCOMPFUNC_GET_CONSTS_BASE(heap, f);
tv_end = DUK_HCOMPFUNC_GET_CONSTS_END(heap, f);
while (tv < tv_end) {
duk__mark_tval(heap, tv);
tv++;
}
fn = DUK_HCOMPFUNC_GET_FUNCS_BASE(heap, f);
fn_end = DUK_HCOMPFUNC_GET_FUNCS_END(heap, f);
while (fn < fn_end) {
duk__mark_heaphdr(heap, (duk_heaphdr *) *fn);
fn++;
}
} else {
/* May happen in some out-of-memory corner cases. */
DUK_D(DUK_DPRINT("duk_hcompfunc 'data' is NULL, skipping marking"));
12 years ago
}
#if defined(DUK_USE_BUFFEROBJECT_SUPPORT)
} else if (DUK_HOBJECT_IS_BUFOBJ(h)) {
duk_hbufobj *b = (duk_hbufobj *) h;
duk__mark_heaphdr(heap, (duk_heaphdr *) b->buf);
duk__mark_heaphdr(heap, (duk_heaphdr *) b->buf_prop);
#endif /* DUK_USE_BUFFEROBJECT_SUPPORT */
} else if (DUK_HOBJECT_IS_DECENV(h)) {
duk_hdecenv *e = (duk_hdecenv *) h;
DUK_ASSERT_HDECENV_VALID(e);
duk__mark_heaphdr(heap, (duk_heaphdr *) e->thread);
duk__mark_heaphdr(heap, (duk_heaphdr *) e->varmap);
} else if (DUK_HOBJECT_IS_OBJENV(h)) {
duk_hobjenv *e = (duk_hobjenv *) h;
DUK_ASSERT_HOBJENV_VALID(e);
duk__mark_heaphdr(heap, (duk_heaphdr *) e->target);
12 years ago
} else if (DUK_HOBJECT_IS_THREAD(h)) {
duk_hthread *t = (duk_hthread *) h;
duk_tval *tv;
tv = t->valstack;
while (tv < t->valstack_top) {
duk__mark_tval(heap, tv);
12 years ago
tv++;
}
for (i = 0; i < (duk_uint_fast32_t) t->callstack_top; i++) {
duk_activation *act = t->callstack + i;
First round of lightfunc changes A lot of changes to add preliminary lightfunc support: * Add LIGHTFUNC tagged type to duk_tval.h and API. * Internal changes for preliminary to support lightfuncs in call handling and other operations (FIXMEs left in obvious places where support is still missing after this commit) * Preliminary Ecmascript and API testcases for lightfuncs Detailed notes: * Because magic is signed, reading it back involves sign extension which is quite verbose to do in C. Use macros for reading the magic value and other bit fields encoded in the flags. * Function.prototype.bind(): the &#39;length&#39; property of a bound function now comes out wrong. We could simply look up the virtual &#39;length&#39; property even if h_target is NULL: no extra code and binding is relatively rare in hot paths. Rewrite more cleanly in any case. * The use flag DUK_USE_LIGHTFUNC_BUILTINS controls the forced lightfunc conversion of built-ins. This results in non-compliant built-ins but significant memory savings in very memory poor environments. * Reject eval(), Thread.yield/resume as lightfuncs. These functions have current assertions that they must be called as fully fledged functions. * Lightfuncs are serialized like ordinary functions for JSON, JX, and JC by this diff. * Add &#39;magic&#39; to activation for lightfuncs. It will be needed for lightweight functions: we don&#39;t have the duk_tval related to the lightfunc, so we must copy the magic value to the activation when a call is made. * When lightfuncs are used as property lookup base values, continue property lookup from the Function.prototype object. This is necessary to allow e.g. ``func.call()`` and ``func.apply()`` to be used. * Call handling had to be reworked for lightfuncs, especially how bound function chains are handled. This is a relatively large change but is necessary to support lightweight functions properly in bound function resolution. The current solution is not ideal. The bytecode executor will first try an ecma-to-ecma call setup which resolves the bound function chain first. If the final, unbound function is not viable (a native function) the call setup returns with an error code. The caller will then perform a normal call. Although bound function resolution has already been done, the normal call handling code will re-do it (and detect there is nothing to do). This situation could be avoided by decoupling bound function handling and effective this binding computation from the actual call setup. The caller could then to do this prestep first, and only then decide whether to use an ecma-to-ecma call or an ordinary heavyweight call. Remove duk__find_nonbound_function as unused. * Use indirect magic to allow LIGHTFUNCs for Date. Most of the built-in functions not directly eligible as lightfuncs are the Date built-in methods, whose magic values contain too much information to fit into the 8-bit magic of a LIGHTFUNC value. To work around this, add an array (duk__date_magics[]) containing the actual control flags needed by the built-ins, and make the Date built-in magic value an index into this table. With this change Date built-ins are successfully converted to lightfuncs. Testcase fixes: - Whitespace fixes - Print error for indirect eval error to make diagnosis easier - Fix error string to match errmsg updated in this branch
11 years ago
duk__mark_heaphdr(heap, (duk_heaphdr *) DUK_ACT_GET_FUNC(act));
duk__mark_heaphdr(heap, (duk_heaphdr *) act->var_env);
duk__mark_heaphdr(heap, (duk_heaphdr *) act->lex_env);
#if defined(DUK_USE_NONSTD_FUNC_CALLER_PROPERTY)
duk__mark_heaphdr(heap, (duk_heaphdr *) act->prev_caller);
#endif
12 years ago
}
#if 0 /* nothing now */
for (i = 0; i < (duk_uint_fast32_t) t->catchstack_top; i++) {
duk_catcher *cat = t->catchstack + i;
12 years ago
}
#endif
duk__mark_heaphdr(heap, (duk_heaphdr *) t->resumer);
12 years ago
/* XXX: duk_small_uint_t would be enough for this loop */
12 years ago
for (i = 0; i < DUK_NUM_BUILTINS; i++) {
duk__mark_heaphdr(heap, (duk_heaphdr *) t->builtins[i]);
12 years ago
}
} else {
/* We may come here if the object should have a FASTREFS flag
* but it's missing for some reason. Assert for never getting
* here; however, other than performance, this is harmless.
*/
DUK_D(DUK_DPRINT("missing FASTREFS flag for: %!iO", h));
DUK_ASSERT(0);
12 years ago
}
}
/* recursion tracking happens here only */
DUK_LOCAL void duk__mark_heaphdr(duk_heap *heap, duk_heaphdr *h) {
DUK_DDD(DUK_DDDPRINT("duk__mark_heaphdr %p, type %ld",
(void *) h,
(h != NULL ? (long) DUK_HEAPHDR_GET_TYPE(h) : (long) -1)));
12 years ago
if (!h) {
return;
}
#if defined(DUK_USE_ROM_OBJECTS)
if (DUK_HEAPHDR_HAS_READONLY(h)) {
DUK_DDD(DUK_DDDPRINT("readonly object %p, skip", (void *) h));
return;
}
#endif
#if defined(DUK_USE_ASSERTIONS) && defined(DUK_USE_REFERENCE_COUNTING)
h->h_assert_refcount++; /* Comparison refcount: bump even if already reachable. */
#endif
12 years ago
if (DUK_HEAPHDR_HAS_REACHABLE(h)) {
DUK_DDD(DUK_DDDPRINT("already marked reachable, skip"));
12 years ago
return;
}
DUK_HEAPHDR_SET_REACHABLE(h);
if (heap->mark_and_sweep_recursion_depth >= DUK_USE_MARK_AND_SWEEP_RECLIMIT) {
12 years ago
/* log this with a normal debug level because this should be relatively rare */
DUK_D(DUK_DPRINT("mark-and-sweep recursion limit reached, marking as temproot: %p", (void *) h));
12 years ago
DUK_HEAP_SET_MARKANDSWEEP_RECLIMIT_REACHED(heap);
DUK_HEAPHDR_SET_TEMPROOT(h);
return;
}
heap->mark_and_sweep_recursion_depth++;
switch (DUK_HEAPHDR_GET_TYPE(h)) {
12 years ago
case DUK_HTYPE_STRING:
duk__mark_hstring(heap, (duk_hstring *) h);
12 years ago
break;
case DUK_HTYPE_OBJECT:
duk__mark_hobject(heap, (duk_hobject *) h);
12 years ago
break;
case DUK_HTYPE_BUFFER:
/* nothing to mark */
break;
default:
DUK_D(DUK_DPRINT("attempt to mark heaphdr %p with invalid htype %ld", (void *) h, (long) DUK_HEAPHDR_GET_TYPE(h)));
DUK_UNREACHABLE();
12 years ago
}
heap->mark_and_sweep_recursion_depth--;
}
DUK_LOCAL void duk__mark_tval(duk_heap *heap, duk_tval *tv) {
DUK_DDD(DUK_DDDPRINT("duk__mark_tval %p", (void *) tv));
12 years ago
if (!tv) {
return;
}
if (DUK_TVAL_IS_HEAP_ALLOCATED(tv)) {
duk__mark_heaphdr(heap, DUK_TVAL_GET_HEAPHDR(tv));
12 years ago
}
}
/*
* Mark the heap.
*/
DUK_LOCAL void duk__mark_roots_heap(duk_heap *heap) {
duk_small_uint_t i;
12 years ago
DUK_DD(DUK_DDPRINT("duk__mark_roots_heap: %p", (void *) heap));
12 years ago
duk__mark_heaphdr(heap, (duk_heaphdr *) heap->heap_thread);
duk__mark_heaphdr(heap, (duk_heaphdr *) heap->heap_object);
12 years ago
for (i = 0; i < DUK_HEAP_NUM_STRINGS; i++) {
16-bit fields and heap pointer compression work Memory optimization work for very low memory devices (96 to 256kB system RAM). Overall changes are: - 16-bit fields for various internal structures to reduce their size - Heap pointer compression to reduce pointer size to 16 bits When DUK_OPT_LIGHTFUNC_BUILTINS and the new low memory options are enabled, Duktape initial heap memory usage is about 23kB (compared to baseline of about 45kB) on x86. Unless low memory feature options are enabled, there should be no visible changes to Duktape behavior. More detailed changes: - 16-bit changes for duk_heaphdr: pointer compression, refcount - 16-bit changes for duk_hstring: hash, blen, and clen can all be 16 bits, use 0xFFFF as string byte length limit (call sites ensure this limit is never exceeded) - 16-bit changes for duk_hbuffer, use 0xFFFF as buffer length limit - 16-bit fields for hobject size (entry part, array part), drop hash part since it&#39;s not usually needed for extremely low memory environments - 16-bit changes for duk_hcompiledfunction - Heap pointer packing for stringtable - Heap pointer packing for &#39;strs&#39; built-in strings list (saves around 600 to 700 bytes but may not be a good tradeoff because call site size will increase) Other changes: - Heaphdr NULL init fix. The original macros were broken: the double/single linked macro variants were the wrong way around. Now sets through macro to work properly with compressed pointers. - Rename duk_hbuffer CURR_DATA_PTR -&gt; DATA_PTR to reduce macro length (previous name was tediously long) - Rename buffer &#34;usable_size&#34; to &#34;alloc_size&#34; throughout as they have been the same for a while now (they used to differ when buffer had an extra NUL). - Add memory optimization markers to Duktape.env (pointer compression and individual 16-bit field options) - Rename a few internal fields for clarity: duk_hobject &#39;p&#39; to &#39;props&#39;, heap-&gt;st to heap-&gt;strtable - Add a safety check for buffer alloc size (should not be triggered but prevents wrapping if call sites don&#39;t properly check for sizes) - Other minor cleanups
10 years ago
duk_hstring *h = DUK_HEAP_GET_STRING(heap, i);
duk__mark_heaphdr(heap, (duk_heaphdr *) h);
12 years ago
}
duk__mark_tval(heap, &heap->lj.value1);
duk__mark_tval(heap, &heap->lj.value2);
#if defined(DUK_USE_DEBUGGER_SUPPORT)
for (i = 0; i < heap->dbg_breakpoint_count; i++) {
duk__mark_heaphdr(heap, (duk_heaphdr *) heap->dbg_breakpoints[i].filename);
}
#endif
12 years ago
}
/*
* Mark refzero_list objects.
*
* Objects on the refzero_list have no inbound references. They might have
* outbound references to objects that we might free, which would invalidate
* any references held by the refzero objects. A refzero object might also
* be rescued by refcount finalization. Refzero objects are treated as
* reachability roots to ensure they (or anything they point to) are not
* freed in mark-and-sweep.
*/
#if defined(DUK_USE_REFERENCE_COUNTING)
DUK_LOCAL void duk__mark_refzero_list(duk_heap *heap) {
12 years ago
duk_heaphdr *hdr;
DUK_DD(DUK_DDPRINT("duk__mark_refzero_list: %p", (void *) heap));
12 years ago
hdr = heap->refzero_list;
while (hdr) {
duk__mark_heaphdr(heap, hdr);
hdr = DUK_HEAPHDR_GET_NEXT(heap, hdr);
12 years ago
}
}
#endif
/*
* Mark unreachable, finalizable objects.
*
* Such objects will be moved aside and their finalizers run later. They have
* to be treated as reachability roots for their properties etc to remain
* allocated. This marking is only done for unreachable values which would
* be swept later (refzero_list is thus excluded).
*
* Objects are first marked FINALIZABLE and only then marked as reachability
* roots; otherwise circular references might be handled inconsistently.
*/
#if defined(DUK_USE_FINALIZER_SUPPORT)
DUK_LOCAL void duk__mark_finalizable(duk_heap *heap) {
12 years ago
duk_hthread *thr;
duk_heaphdr *hdr;
duk_size_t count_finalizable = 0;
12 years ago
DUK_DD(DUK_DDPRINT("duk__mark_finalizable: %p", (void *) heap));
12 years ago
thr = duk__get_temp_hthread(heap);
12 years ago
DUK_ASSERT(thr != NULL);
hdr = heap->heap_allocated;
while (hdr) {
/* A finalizer is looked up from the object and up its prototype chain
Use DUK_HOBJECT_FLAG_HAVE_FINALIZER for checks One bottleneck in refzero and mark-and-sweep handling is checking whether an object has an own or inherited _Finalizer property. This check walked the prototype chain and did a property lookup for every object. Because a finalizer is usually not present, the prototype chain would almost always be walked to completion. Improve this behavior by: * Adding a DUK_HOBJECT_FLAG_HAVE_FINALIZER flag. The flag is set when the object has an own _Finalizer property with a callable value, and cleared otherwise. The flag is *only* set by duk_set_finalizer(), so any other means of changing the internal _Finalizer property will leave the flag out of sync (which causes a finalizer run to be skipped). * Adding duk_hobject_has_finalizer_fast() which checks for finalizer existence by walking the prototype chain, but only checking the flag, not the property table. * Use the fast finalizer check in refzero and mark-and-sweep. Out-of sync cases: * If the flag is set but there is no actual finalizer, the object will go through finalizer processing when garbage collecting. This is harmless: the finalizer call will fail and the object will be garbage collected, but with some potential delay (especially for mark-and-sweep). * If the flag is cleared but there is an actual finalizer, the finalizer will be ignored. Related changes: * When duk_dump_function() is called, zero DUK_HOBJECT_FLAG_HAVE_FINALIZER on serialization, so it won&#39;t be set when the function is loaded back. If this is not done, the loaded function will (harmlessly) go through finalizer processing when garbage collected. * Update debugger artificial properties to include &#34;have_finalizer&#34; flag. Other changes: * A few DUK_UNLIKELY() attributes for prototype sanity limits which are almost never hit.
8 years ago
* (which allows inherited finalizers). The finalizer is checked for
* using a duk_hobject flag which is kept in sync with the presence and
* callability of a _Finalizer hidden symbol.
*/
12 years ago
if (!DUK_HEAPHDR_HAS_REACHABLE(hdr) &&
DUK_HEAPHDR_GET_TYPE(hdr) == DUK_HTYPE_OBJECT &&
!DUK_HEAPHDR_HAS_FINALIZED(hdr) &&
Use DUK_HOBJECT_FLAG_HAVE_FINALIZER for checks One bottleneck in refzero and mark-and-sweep handling is checking whether an object has an own or inherited _Finalizer property. This check walked the prototype chain and did a property lookup for every object. Because a finalizer is usually not present, the prototype chain would almost always be walked to completion. Improve this behavior by: * Adding a DUK_HOBJECT_FLAG_HAVE_FINALIZER flag. The flag is set when the object has an own _Finalizer property with a callable value, and cleared otherwise. The flag is *only* set by duk_set_finalizer(), so any other means of changing the internal _Finalizer property will leave the flag out of sync (which causes a finalizer run to be skipped). * Adding duk_hobject_has_finalizer_fast() which checks for finalizer existence by walking the prototype chain, but only checking the flag, not the property table. * Use the fast finalizer check in refzero and mark-and-sweep. Out-of sync cases: * If the flag is set but there is no actual finalizer, the object will go through finalizer processing when garbage collecting. This is harmless: the finalizer call will fail and the object will be garbage collected, but with some potential delay (especially for mark-and-sweep). * If the flag is cleared but there is an actual finalizer, the finalizer will be ignored. Related changes: * When duk_dump_function() is called, zero DUK_HOBJECT_FLAG_HAVE_FINALIZER on serialization, so it won&#39;t be set when the function is loaded back. If this is not done, the loaded function will (harmlessly) go through finalizer processing when garbage collected. * Update debugger artificial properties to include &#34;have_finalizer&#34; flag. Other changes: * A few DUK_UNLIKELY() attributes for prototype sanity limits which are almost never hit.
8 years ago
duk_hobject_has_finalizer_fast(thr, (duk_hobject *) hdr)) {
12 years ago
/* heaphdr:
* - is not reachable
* - is an object
* - is not a finalized object
* - has a finalizer
*/
DUK_DD(DUK_DDPRINT("unreachable heap object will be "
"finalized -> mark as finalizable "
"and treat as a reachability root: %p",
(void *) hdr));
DUK_ASSERT(!DUK_HEAPHDR_HAS_READONLY(hdr));
12 years ago
DUK_HEAPHDR_SET_FINALIZABLE(hdr);
count_finalizable ++;
}
hdr = DUK_HEAPHDR_GET_NEXT(heap, hdr);
12 years ago
}
if (count_finalizable == 0) {
return;
}
DUK_DD(DUK_DDPRINT("marked %ld heap objects as finalizable, now mark them reachable",
(long) count_finalizable));
12 years ago
hdr = heap->heap_allocated;
while (hdr) {
if (DUK_HEAPHDR_HAS_FINALIZABLE(hdr)) {
duk__mark_heaphdr(heap, hdr);
12 years ago
}
hdr = DUK_HEAPHDR_GET_NEXT(heap, hdr);
12 years ago
}
/* Caller will finish the marking process if we hit a recursion limit. */
}
#endif /* DUK_USE_FINALIZER_SUPPORT */
12 years ago
/*
* Mark objects on finalize_list.
*
*/
#if defined(DUK_USE_FINALIZER_SUPPORT)
DUK_LOCAL void duk__mark_finalize_list(duk_heap *heap) {
duk_heaphdr *hdr;
#if defined(DUK_USE_DEBUG)
duk_size_t count_finalize_list = 0;
#endif
DUK_DD(DUK_DDPRINT("duk__mark_finalize_list: %p", (void *) heap));
hdr = heap->finalize_list;
while (hdr) {
duk__mark_heaphdr(heap, hdr);
hdr = DUK_HEAPHDR_GET_NEXT(heap, hdr);
#if defined(DUK_USE_DEBUG)
count_finalize_list++;
#endif
}
#if defined(DUK_USE_DEBUG)
if (count_finalize_list > 0) {
DUK_D(DUK_DPRINT("marked %ld objects on the finalize_list as reachable (previous finalizer run skipped)",
(long) count_finalize_list));
}
#endif
}
#endif /* DUK_USE_FINALIZER_SUPPORT */
12 years ago
/*
* Fallback marking handler if recursion limit is reached.
*
* Iterates 'temproots' until recursion limit is no longer hit. Note
* that temproots may reside either in heap allocated list or the
* refzero work list. This is a slow scan, but guarantees that we
* finish with a bounded C stack.
*
* Note that nodes may have been marked as temproots before this
* scan begun, OR they may have been marked during the scan (as
* we process nodes recursively also during the scan). This is
* intended behavior.
*/
#if defined(DUK_USE_DEBUG)
DUK_LOCAL void duk__handle_temproot(duk_heap *heap, duk_heaphdr *hdr, duk_size_t *count) {
12 years ago
#else
DUK_LOCAL void duk__handle_temproot(duk_heap *heap, duk_heaphdr *hdr) {
12 years ago
#endif
if (!DUK_HEAPHDR_HAS_TEMPROOT(hdr)) {
DUK_DDD(DUK_DDDPRINT("not a temp root: %p", (void *) hdr));
12 years ago
return;
}
DUK_DDD(DUK_DDDPRINT("found a temp root: %p", (void *) hdr));
12 years ago
DUK_HEAPHDR_CLEAR_TEMPROOT(hdr);
DUK_HEAPHDR_CLEAR_REACHABLE(hdr); /* done so that duk__mark_heaphdr() works correctly */
#if defined(DUK_USE_ASSERTIONS) && defined(DUK_USE_REFERENCE_COUNTING)
hdr->h_assert_refcount--; /* Same node visited twice. */
#endif
duk__mark_heaphdr(heap, hdr);
12 years ago
#if defined(DUK_USE_DEBUG)
12 years ago
(*count)++;
#endif
}
DUK_LOCAL void duk__mark_temproots_by_heap_scan(duk_heap *heap) {
12 years ago
duk_heaphdr *hdr;
#if defined(DUK_USE_DEBUG)
duk_size_t count;
12 years ago
#endif
DUK_DD(DUK_DDPRINT("duk__mark_temproots_by_heap_scan: %p", (void *) heap));
12 years ago
while (DUK_HEAP_HAS_MARKANDSWEEP_RECLIMIT_REACHED(heap)) {
DUK_DD(DUK_DDPRINT("recursion limit reached, doing heap scan to continue from temproots"));
12 years ago
#if defined(DUK_USE_DEBUG)
12 years ago
count = 0;
#endif
DUK_HEAP_CLEAR_MARKANDSWEEP_RECLIMIT_REACHED(heap);
hdr = heap->heap_allocated;
while (hdr) {
#if defined(DUK_USE_DEBUG)
duk__handle_temproot(heap, hdr, &count);
12 years ago
#else
duk__handle_temproot(heap, hdr);
12 years ago
#endif
hdr = DUK_HEAPHDR_GET_NEXT(heap, hdr);
12 years ago
}
/* must also check refzero_list */
#if defined(DUK_USE_REFERENCE_COUNTING)
12 years ago
hdr = heap->refzero_list;
while (hdr) {
#if defined(DUK_USE_DEBUG)
duk__handle_temproot(heap, hdr, &count);
12 years ago
#else
duk__handle_temproot(heap, hdr);
12 years ago
#endif
hdr = DUK_HEAPHDR_GET_NEXT(heap, hdr);
12 years ago
}
#endif /* DUK_USE_REFERENCE_COUNTING */
#if defined(DUK_USE_DEBUG)
DUK_DD(DUK_DDPRINT("temproot mark heap scan processed %ld temp roots", (long) count));
12 years ago
#endif
}
}
/*
* Finalize refcounts for heap elements just about to be freed.
* This must be done for all objects before freeing to avoid any
* stale pointer dereferences.
*
* Note that this must deduce the set of objects to be freed
* identically to duk__sweep_heap().
12 years ago
*/
#if defined(DUK_USE_REFERENCE_COUNTING)
DUK_LOCAL void duk__finalize_refcounts(duk_heap *heap) {
12 years ago
duk_hthread *thr;
duk_heaphdr *hdr;
thr = duk__get_temp_hthread(heap);
12 years ago
DUK_ASSERT(thr != NULL);
DUK_DD(DUK_DDPRINT("duk__finalize_refcounts: heap=%p, hthread=%p",
(void *) heap, (void *) thr));
12 years ago
hdr = heap->heap_allocated;
while (hdr) {
if (!DUK_HEAPHDR_HAS_REACHABLE(hdr)) {
/*
* Unreachable object about to be swept. Finalize target refcounts
* (objects which the unreachable object points to) without doing
* refzero processing. Recursive decrefs are also prevented when
* refzero processing is disabled.
*
* Value cannot be a finalizable object, as they have been made
* temporarily reachable for this round.
*/
DUK_DDD(DUK_DDDPRINT("unreachable object, refcount finalize before sweeping: %p", (void *) hdr));
duk_heaphdr_refcount_finalize(thr, hdr);
12 years ago
}
hdr = DUK_HEAPHDR_GET_NEXT(heap, hdr);
12 years ago
}
}
#endif /* DUK_USE_REFERENCE_COUNTING */
12 years ago
/*
* Clear (reachable) flags of refzero work list.
*/
#if defined(DUK_USE_REFERENCE_COUNTING)
DUK_LOCAL void duk__clear_refzero_list_flags(duk_heap *heap) {
12 years ago
duk_heaphdr *hdr;
DUK_DD(DUK_DDPRINT("duk__clear_refzero_list_flags: %p", (void *) heap));
12 years ago
hdr = heap->refzero_list;
while (hdr) {
DUK_HEAPHDR_CLEAR_REACHABLE(hdr);
DUK_ASSERT(!DUK_HEAPHDR_HAS_FINALIZABLE(hdr));
/* DUK_HEAPHDR_HAS_FINALIZED may or may not be set. */
12 years ago
DUK_ASSERT(!DUK_HEAPHDR_HAS_TEMPROOT(hdr));
hdr = DUK_HEAPHDR_GET_NEXT(heap, hdr);
12 years ago
}
}
#endif /* DUK_USE_REFERENCE_COUNTING */
/*
* Clear (reachable) flags of finalize_list
*
* We could mostly do in the sweep phase when we move objects from the
* heap into the finalize_list. However, if a finalizer run is skipped
* during a mark-and-sweep, the objects on the finalize_list will be marked
* reachable during the next mark-and-sweep. Since they're already on the
* finalize_list, no-one will be clearing their REACHABLE flag so we do it
* here. (This now overlaps with the sweep handling in a harmless way.)
*/
#if defined(DUK_USE_FINALIZER_SUPPORT)
DUK_LOCAL void duk__clear_finalize_list_flags(duk_heap *heap) {
duk_heaphdr *hdr;
DUK_DD(DUK_DDPRINT("duk__clear_finalize_list_flags: %p", (void *) heap));
hdr = heap->finalize_list;
while (hdr) {
DUK_HEAPHDR_CLEAR_REACHABLE(hdr);
DUK_ASSERT(!DUK_HEAPHDR_HAS_FINALIZABLE(hdr));
DUK_ASSERT(!DUK_HEAPHDR_HAS_FINALIZED(hdr));
DUK_ASSERT(!DUK_HEAPHDR_HAS_TEMPROOT(hdr));
hdr = DUK_HEAPHDR_GET_NEXT(heap, hdr);
}
}
#endif /* DUK_USE_FINALIZER_SUPPORT */
12 years ago
/*
* Sweep stringtable
*/
DUK_LOCAL void duk__sweep_stringtable(duk_heap *heap, duk_size_t *out_count_keep) {
duk_hstring *h;
duk_hstring *prev;
duk_uint32_t i;
#if defined(DUK_USE_DEBUG)
duk_size_t count_free = 0;
#endif
duk_size_t count_keep = 0;
DUK_DD(DUK_DDPRINT("duk__sweep_stringtable: %p", (void *) heap));
#if defined(DUK_USE_STRTAB_PTRCOMP)
if (heap->strtable16 == NULL) {
#else
if (heap->strtable == NULL) {
#endif
goto done;
}
12 years ago
for (i = 0; i < heap->st_size; i++) {
#if defined(DUK_USE_STRTAB_PTRCOMP)
h = DUK_USE_HEAPPTR_DEC16(heap->heap_udata, heap->strtable16[i]);
16-bit fields and heap pointer compression work Memory optimization work for very low memory devices (96 to 256kB system RAM). Overall changes are: - 16-bit fields for various internal structures to reduce their size - Heap pointer compression to reduce pointer size to 16 bits When DUK_OPT_LIGHTFUNC_BUILTINS and the new low memory options are enabled, Duktape initial heap memory usage is about 23kB (compared to baseline of about 45kB) on x86. Unless low memory feature options are enabled, there should be no visible changes to Duktape behavior. More detailed changes: - 16-bit changes for duk_heaphdr: pointer compression, refcount - 16-bit changes for duk_hstring: hash, blen, and clen can all be 16 bits, use 0xFFFF as string byte length limit (call sites ensure this limit is never exceeded) - 16-bit changes for duk_hbuffer, use 0xFFFF as buffer length limit - 16-bit fields for hobject size (entry part, array part), drop hash part since it&#39;s not usually needed for extremely low memory environments - 16-bit changes for duk_hcompiledfunction - Heap pointer packing for stringtable - Heap pointer packing for &#39;strs&#39; built-in strings list (saves around 600 to 700 bytes but may not be a good tradeoff because call site size will increase) Other changes: - Heaphdr NULL init fix. The original macros were broken: the double/single linked macro variants were the wrong way around. Now sets through macro to work properly with compressed pointers. - Rename duk_hbuffer CURR_DATA_PTR -&gt; DATA_PTR to reduce macro length (previous name was tediously long) - Rename buffer &#34;usable_size&#34; to &#34;alloc_size&#34; throughout as they have been the same for a while now (they used to differ when buffer had an extra NUL). - Add memory optimization markers to Duktape.env (pointer compression and individual 16-bit field options) - Rename a few internal fields for clarity: duk_hobject &#39;p&#39; to &#39;props&#39;, heap-&gt;st to heap-&gt;strtable - Add a safety check for buffer alloc size (should not be triggered but prevents wrapping if call sites don&#39;t properly check for sizes) - Other minor cleanups
10 years ago
#else
h = heap->strtable[i];
#endif
prev = NULL;
while (h != NULL) {
duk_hstring *next;
next = h->hdr.h_next;
if (DUK_HEAPHDR_HAS_REACHABLE((duk_heaphdr *) h)) {
DUK_HEAPHDR_CLEAR_REACHABLE((duk_heaphdr *) h);
count_keep++;
prev = h;
} else {
#if defined(DUK_USE_DEBUG)
count_free++;
12 years ago
#endif
#if defined(DUK_USE_REFERENCE_COUNTING)
/* Non-zero refcounts should not happen for unreachable strings,
* because we refcount finalize all unreachable objects which
* should have decreased unreachable string refcounts to zero
* (even for cycles).
*/
DUK_ASSERT(DUK_HEAPHDR_GET_REFCOUNT((duk_heaphdr *) h) == 0);
12 years ago
#endif
/* deal with weak references first */
duk_heap_strcache_string_remove(heap, (duk_hstring *) h);
12 years ago
/* remove the string from the string table */
duk_heap_strtable_unlink_prev(heap, (duk_hstring *) h, (duk_hstring *) prev);
12 years ago
/* free inner references (these exist e.g. when external
* strings are enabled) and the struct itself.
*/
duk_free_hstring(heap, (duk_hstring *) h);
12 years ago
/* don't update 'prev'; it should be last string kept */
}
h = next;
}
12 years ago
}
done:
#if defined(DUK_USE_DEBUG)
DUK_D(DUK_DPRINT("mark-and-sweep sweep stringtable: %ld freed, %ld kept",
(long) count_free, (long) count_keep));
12 years ago
#endif
*out_count_keep = count_keep;
12 years ago
}
/*
* Sweep heap
*/
DUK_LOCAL void duk__sweep_heap(duk_heap *heap, duk_int_t flags, duk_size_t *out_count_keep) {
12 years ago
duk_heaphdr *prev; /* last element that was left in the heap */
duk_heaphdr *curr;
duk_heaphdr *next;
#if defined(DUK_USE_DEBUG)
duk_size_t count_free = 0;
duk_size_t count_finalize = 0;
duk_size_t count_rescue = 0;
12 years ago
#endif
duk_size_t count_keep = 0;
12 years ago
DUK_UNREF(flags);
DUK_DD(DUK_DDPRINT("duk__sweep_heap: %p", (void *) heap));
12 years ago
prev = NULL;
curr = heap->heap_allocated;
heap->heap_allocated = NULL;
while (curr) {
/* Strings and ROM objects are never placed on the heap allocated list. */
12 years ago
DUK_ASSERT(DUK_HEAPHDR_GET_TYPE(curr) != DUK_HTYPE_STRING);
DUK_ASSERT(!DUK_HEAPHDR_HAS_READONLY(curr));
12 years ago
next = DUK_HEAPHDR_GET_NEXT(heap, curr);
12 years ago
if (DUK_LIKELY(DUK_HEAPHDR_HAS_REACHABLE(curr))) {
12 years ago
/*
* Reachable object, keep
*/
DUK_DDD(DUK_DDDPRINT("sweep, reachable: %p", (void *) curr));
12 years ago
if (DUK_UNLIKELY(DUK_HEAPHDR_HAS_FINALIZABLE(curr))) {
12 years ago
/*
* If object has been marked finalizable, move it to the
* "to be finalized" work list. It will be collected on
* the next mark-and-sweep if it is still unreachable
* after running the finalizer.
*/
DUK_ASSERT(!DUK_HEAPHDR_HAS_FINALIZED(curr));
DUK_ASSERT(DUK_HEAPHDR_GET_TYPE(curr) == DUK_HTYPE_OBJECT);
DUK_DDD(DUK_DDDPRINT("object has finalizer, move to finalization work list: %p", (void *) curr));
12 years ago
#if defined(DUK_USE_DOUBLE_LINKED_HEAP)
if (heap->finalize_list != NULL) {
DUK_HEAPHDR_SET_PREV(heap, heap->finalize_list, curr);
12 years ago
}
DUK_HEAPHDR_SET_PREV(heap, curr, NULL);
12 years ago
#endif
DUK_HEAPHDR_SET_NEXT(heap, curr, heap->finalize_list);
DUK_ASSERT_HEAPHDR_LINKS(heap, curr);
12 years ago
heap->finalize_list = curr;
#if defined(DUK_USE_DEBUG)
12 years ago
count_finalize++;
#endif
} else {
/*
* Object will be kept; queue object back to heap_allocated (to tail)
*/
if (DUK_UNLIKELY(DUK_HEAPHDR_HAS_FINALIZED(curr))) {
12 years ago
/*
* Object's finalizer was executed on last round, and
* object has been happily rescued.
*/
DUK_ASSERT(!DUK_HEAPHDR_HAS_FINALIZABLE(curr));
DUK_ASSERT(DUK_HEAPHDR_GET_TYPE(curr) == DUK_HTYPE_OBJECT);
DUK_DD(DUK_DDPRINT("object rescued during mark-and-sweep finalization: %p", (void *) curr));
#if defined(DUK_USE_DEBUG)
12 years ago
count_rescue++;
#endif
} else {
/*
* Plain, boring reachable object.
*/
DUK_DD(DUK_DDPRINT("keep object: %!iO", curr));
12 years ago
count_keep++;
}
if (prev != NULL) {
DUK_ASSERT(heap->heap_allocated != NULL);
DUK_HEAPHDR_SET_NEXT(heap, prev, curr);
} else {
DUK_ASSERT(heap->heap_allocated == NULL);
heap->heap_allocated = curr;
12 years ago
}
#if defined(DUK_USE_DOUBLE_LINKED_HEAP)
DUK_HEAPHDR_SET_PREV(heap, curr, prev);
12 years ago
#endif
DUK_ASSERT_HEAPHDR_LINKS(heap, prev);
DUK_ASSERT_HEAPHDR_LINKS(heap, curr);
12 years ago
prev = curr;
}
DUK_HEAPHDR_CLEAR_REACHABLE(curr);
DUK_HEAPHDR_CLEAR_FINALIZED(curr);
DUK_HEAPHDR_CLEAR_FINALIZABLE(curr);
DUK_ASSERT(!DUK_HEAPHDR_HAS_REACHABLE(curr));
DUK_ASSERT(!DUK_HEAPHDR_HAS_FINALIZED(curr));
DUK_ASSERT(!DUK_HEAPHDR_HAS_FINALIZABLE(curr));
curr = next;
} else {
/*
* Unreachable object, free
*/
DUK_DDD(DUK_DDDPRINT("sweep, not reachable: %p", (void *) curr));
12 years ago
#if defined(DUK_USE_REFERENCE_COUNTING)
12 years ago
/* Non-zero refcounts should not happen because we refcount
* finalize all unreachable objects which should cancel out
* refcounts (even for cycles).
*/
DUK_ASSERT(DUK_HEAPHDR_GET_REFCOUNT(curr) == 0);
#endif
DUK_ASSERT(!DUK_HEAPHDR_HAS_FINALIZABLE(curr));
if (DUK_HEAPHDR_HAS_FINALIZED(curr)) {
DUK_DDD(DUK_DDDPRINT("finalized object not rescued: %p", (void *) curr));
12 years ago
}
/* Note: object cannot be a finalizable unreachable object, as
* they have been marked temporarily reachable for this round,
* and are handled above.
*/
#if defined(DUK_USE_DEBUG)
12 years ago
count_free++;
#endif
/* weak refs should be handled here, but no weak refs for
* any non-string objects exist right now.
*/
/* free object and all auxiliary (non-heap) allocs */
duk_heap_free_heaphdr_raw(heap, curr);
curr = next;
}
}
if (prev != NULL) {
DUK_HEAPHDR_SET_NEXT(heap, prev, NULL);
12 years ago
}
DUK_ASSERT_HEAPHDR_LINKS(heap, prev);
12 years ago
#if defined(DUK_USE_DEBUG)
DUK_D(DUK_DPRINT("mark-and-sweep sweep objects (non-string): %ld freed, %ld kept, %ld rescued, %ld queued for finalization",
(long) count_free, (long) count_keep, (long) count_rescue, (long) count_finalize));
12 years ago
#endif
*out_count_keep = count_keep;
12 years ago
}
/*
* Run (object) finalizers in the "to be finalized" work list.
*/
#if defined(DUK_USE_FINALIZER_SUPPORT)
DUK_LOCAL void duk__run_object_finalizers(duk_heap *heap, duk_small_uint_t flags) {
12 years ago
duk_heaphdr *curr;
duk_heaphdr *next;
#if defined(DUK_USE_DEBUG)
duk_size_t count = 0;
12 years ago
#endif
duk_hthread *thr;
DUK_DD(DUK_DDPRINT("duk__run_object_finalizers: %p", (void *) heap));
12 years ago
thr = duk__get_temp_hthread(heap);
12 years ago
DUK_ASSERT(thr != NULL);
curr = heap->finalize_list;
while (curr) {
DUK_DDD(DUK_DDDPRINT("mark-and-sweep finalize: %p", (void *) curr));
12 years ago
DUK_ASSERT(DUK_HEAPHDR_GET_TYPE(curr) == DUK_HTYPE_OBJECT); /* only objects have finalizers */
DUK_ASSERT(!DUK_HEAPHDR_HAS_REACHABLE(curr)); /* flags have been already cleared */
DUK_ASSERT(!DUK_HEAPHDR_HAS_TEMPROOT(curr));
DUK_ASSERT(!DUK_HEAPHDR_HAS_FINALIZABLE(curr));
DUK_ASSERT(!DUK_HEAPHDR_HAS_FINALIZED(curr));
DUK_ASSERT(!DUK_HEAPHDR_HAS_READONLY(curr)); /* No finalizers for ROM objects */
12 years ago
/* Keep heap->finalize_list up-to-date during the list walk.
* This has no functional impact, but does matter e.g. for
* duk_push_heapptr() asserts when assertions are enabled.
*/
heap->finalize_list = curr;
if (DUK_LIKELY((flags & DUK_MS_FLAG_SKIP_FINALIZERS) == 0)) {
/* Run the finalizer, duk_hobject_run_finalizer() sets FINALIZED.
* Next mark-and-sweep will collect the object unless it has
* become reachable (i.e. rescued). FINALIZED prevents the
* finalizer from being executed again before that.
*/
duk_hobject_run_finalizer(thr, (duk_hobject *) curr); /* must never longjmp */
DUK_ASSERT(DUK_HEAPHDR_HAS_FINALIZED(curr));
/* XXX: could clear FINALIZED already here; now cleared in
* next mark-and-sweep.
*/
} else {
/* Used during heap destruction: don't actually run finalizers
* because we're heading into forced finalization. Instead,
* queue finalizable objects back to the heap_allocated list.
*/
DUK_D(DUK_DPRINT("skip finalizers flag set, queue object to heap_allocated without finalizing"));
DUK_ASSERT(!DUK_HEAPHDR_HAS_FINALIZED(curr));
}
12 years ago
/* queue back to heap_allocated */
next = DUK_HEAPHDR_GET_NEXT(heap, curr);
12 years ago
DUK_HEAP_INSERT_INTO_HEAP_ALLOCATED(heap, curr);
curr = next;
#if defined(DUK_USE_DEBUG)
12 years ago
count++;
#endif
}
/* finalize_list will always be processed completely */
heap->finalize_list = NULL;
#if defined(DUK_USE_DEBUG)
DUK_D(DUK_DPRINT("mark-and-sweep finalize objects: %ld finalizers called", (long) count));
12 years ago
#endif
}
#endif /* DUK_USE_FINALIZER_SUPPORT */
12 years ago
/*
* Object compaction.
*
* Compaction is assumed to never throw an error.
*/
DUK_LOCAL int duk__protected_compact_object(duk_context *ctx, void *udata) {
duk_hobject *obj;
11 years ago
/* XXX: for threads, compact value stack, call stack, catch stack? */
DUK_UNREF(udata);
obj = duk_known_hobject(ctx, -1);
12 years ago
duk_hobject_compact_props((duk_hthread *) ctx, obj);
return 0;
}
#if defined(DUK_USE_DEBUG)
DUK_LOCAL void duk__compact_object_list(duk_heap *heap, duk_hthread *thr, duk_heaphdr *start, duk_size_t *p_count_check, duk_size_t *p_count_compact, duk_size_t *p_count_bytes_saved) {
12 years ago
#else
DUK_LOCAL void duk__compact_object_list(duk_heap *heap, duk_hthread *thr, duk_heaphdr *start) {
12 years ago
#endif
duk_heaphdr *curr;
#if defined(DUK_USE_DEBUG)
duk_size_t old_size, new_size;
12 years ago
#endif
duk_hobject *obj;
DUK_UNREF(heap);
12 years ago
curr = start;
while (curr) {
DUK_DDD(DUK_DDDPRINT("mark-and-sweep compact: %p", (void *) curr));
12 years ago
if (DUK_HEAPHDR_GET_TYPE(curr) != DUK_HTYPE_OBJECT) {
goto next;
12 years ago
}
obj = (duk_hobject *) curr;
#if defined(DUK_USE_DEBUG)
16-bit fields and heap pointer compression work Memory optimization work for very low memory devices (96 to 256kB system RAM). Overall changes are: - 16-bit fields for various internal structures to reduce their size - Heap pointer compression to reduce pointer size to 16 bits When DUK_OPT_LIGHTFUNC_BUILTINS and the new low memory options are enabled, Duktape initial heap memory usage is about 23kB (compared to baseline of about 45kB) on x86. Unless low memory feature options are enabled, there should be no visible changes to Duktape behavior. More detailed changes: - 16-bit changes for duk_heaphdr: pointer compression, refcount - 16-bit changes for duk_hstring: hash, blen, and clen can all be 16 bits, use 0xFFFF as string byte length limit (call sites ensure this limit is never exceeded) - 16-bit changes for duk_hbuffer, use 0xFFFF as buffer length limit - 16-bit fields for hobject size (entry part, array part), drop hash part since it&#39;s not usually needed for extremely low memory environments - 16-bit changes for duk_hcompiledfunction - Heap pointer packing for stringtable - Heap pointer packing for &#39;strs&#39; built-in strings list (saves around 600 to 700 bytes but may not be a good tradeoff because call site size will increase) Other changes: - Heaphdr NULL init fix. The original macros were broken: the double/single linked macro variants were the wrong way around. Now sets through macro to work properly with compressed pointers. - Rename duk_hbuffer CURR_DATA_PTR -&gt; DATA_PTR to reduce macro length (previous name was tediously long) - Rename buffer &#34;usable_size&#34; to &#34;alloc_size&#34; throughout as they have been the same for a while now (they used to differ when buffer had an extra NUL). - Add memory optimization markers to Duktape.env (pointer compression and individual 16-bit field options) - Rename a few internal fields for clarity: duk_hobject &#39;p&#39; to &#39;props&#39;, heap-&gt;st to heap-&gt;strtable - Add a safety check for buffer alloc size (should not be triggered but prevents wrapping if call sites don&#39;t properly check for sizes) - Other minor cleanups
10 years ago
old_size = DUK_HOBJECT_P_COMPUTE_SIZE(DUK_HOBJECT_GET_ESIZE(obj),
DUK_HOBJECT_GET_ASIZE(obj),
DUK_HOBJECT_GET_HSIZE(obj));
12 years ago
#endif
DUK_DD(DUK_DDPRINT("compact object: %p", (void *) obj));
12 years ago
duk_push_hobject((duk_context *) thr, obj);
11 years ago
/* XXX: disable error handlers for duration of compaction? */
duk_safe_call((duk_context *) thr, duk__protected_compact_object, NULL, 1, 0);
12 years ago
#if defined(DUK_USE_DEBUG)
16-bit fields and heap pointer compression work Memory optimization work for very low memory devices (96 to 256kB system RAM). Overall changes are: - 16-bit fields for various internal structures to reduce their size - Heap pointer compression to reduce pointer size to 16 bits When DUK_OPT_LIGHTFUNC_BUILTINS and the new low memory options are enabled, Duktape initial heap memory usage is about 23kB (compared to baseline of about 45kB) on x86. Unless low memory feature options are enabled, there should be no visible changes to Duktape behavior. More detailed changes: - 16-bit changes for duk_heaphdr: pointer compression, refcount - 16-bit changes for duk_hstring: hash, blen, and clen can all be 16 bits, use 0xFFFF as string byte length limit (call sites ensure this limit is never exceeded) - 16-bit changes for duk_hbuffer, use 0xFFFF as buffer length limit - 16-bit fields for hobject size (entry part, array part), drop hash part since it&#39;s not usually needed for extremely low memory environments - 16-bit changes for duk_hcompiledfunction - Heap pointer packing for stringtable - Heap pointer packing for &#39;strs&#39; built-in strings list (saves around 600 to 700 bytes but may not be a good tradeoff because call site size will increase) Other changes: - Heaphdr NULL init fix. The original macros were broken: the double/single linked macro variants were the wrong way around. Now sets through macro to work properly with compressed pointers. - Rename duk_hbuffer CURR_DATA_PTR -&gt; DATA_PTR to reduce macro length (previous name was tediously long) - Rename buffer &#34;usable_size&#34; to &#34;alloc_size&#34; throughout as they have been the same for a while now (they used to differ when buffer had an extra NUL). - Add memory optimization markers to Duktape.env (pointer compression and individual 16-bit field options) - Rename a few internal fields for clarity: duk_hobject &#39;p&#39; to &#39;props&#39;, heap-&gt;st to heap-&gt;strtable - Add a safety check for buffer alloc size (should not be triggered but prevents wrapping if call sites don&#39;t properly check for sizes) - Other minor cleanups
10 years ago
new_size = DUK_HOBJECT_P_COMPUTE_SIZE(DUK_HOBJECT_GET_ESIZE(obj),
DUK_HOBJECT_GET_ASIZE(obj),
DUK_HOBJECT_GET_HSIZE(obj));
12 years ago
#endif
#if defined(DUK_USE_DEBUG)
12 years ago
(*p_count_compact)++;
(*p_count_bytes_saved) += (duk_size_t) (old_size - new_size);
12 years ago
#endif
next:
curr = DUK_HEAPHDR_GET_NEXT(heap, curr);
#if defined(DUK_USE_DEBUG)
12 years ago
(*p_count_check)++;
#endif
}
}
DUK_LOCAL void duk__compact_objects(duk_heap *heap) {
11 years ago
/* XXX: which lists should participate? to be finalized? */
#if defined(DUK_USE_DEBUG)
duk_size_t count_check = 0;
duk_size_t count_compact = 0;
duk_size_t count_bytes_saved = 0;
12 years ago
#endif
duk_hthread *thr;
DUK_DD(DUK_DDPRINT("duk__compact_objects: %p", (void *) heap));
12 years ago
thr = duk__get_temp_hthread(heap);
12 years ago
DUK_ASSERT(thr != NULL);
#if defined(DUK_USE_DEBUG)
duk__compact_object_list(heap, thr, heap->heap_allocated, &count_check, &count_compact, &count_bytes_saved);
duk__compact_object_list(heap, thr, heap->finalize_list, &count_check, &count_compact, &count_bytes_saved);
#if defined(DUK_USE_REFERENCE_COUNTING)
duk__compact_object_list(heap, thr, heap->refzero_list, &count_check, &count_compact, &count_bytes_saved);
12 years ago
#endif
#else
duk__compact_object_list(heap, thr, heap->heap_allocated);
duk__compact_object_list(heap, thr, heap->finalize_list);
#if defined(DUK_USE_REFERENCE_COUNTING)
duk__compact_object_list(heap, thr, heap->refzero_list);
12 years ago
#endif
#endif
#if defined(DUK_USE_DEBUG)
DUK_D(DUK_DPRINT("mark-and-sweep compact objects: %ld checked, %ld compaction attempts, %ld bytes saved by compaction",
(long) count_check, (long) count_compact, (long) count_bytes_saved));
12 years ago
#endif
}
/*
* Assertion helpers.
*/
#if defined(DUK_USE_ASSERTIONS)
DUK_LOCAL void duk__assert_heaphdr_flags(duk_heap *heap) {
12 years ago
duk_heaphdr *hdr;
hdr = heap->heap_allocated;
while (hdr) {
DUK_ASSERT(!DUK_HEAPHDR_HAS_REACHABLE(hdr));
DUK_ASSERT(!DUK_HEAPHDR_HAS_TEMPROOT(hdr));
DUK_ASSERT(!DUK_HEAPHDR_HAS_FINALIZABLE(hdr));
/* may have FINALIZED */
hdr = DUK_HEAPHDR_GET_NEXT(heap, hdr);
12 years ago
}
#if defined(DUK_USE_REFERENCE_COUNTING)
12 years ago
hdr = heap->refzero_list;
while (hdr) {
DUK_ASSERT(!DUK_HEAPHDR_HAS_REACHABLE(hdr));
DUK_ASSERT(!DUK_HEAPHDR_HAS_TEMPROOT(hdr));
DUK_ASSERT(!DUK_HEAPHDR_HAS_FINALIZABLE(hdr));
/* DUK_HEAPHDR_HAS_FINALIZED may be set if we're doing a
* refzero finalization and mark-and-sweep gets triggered
* during the finalizer.
*/
/* DUK_HEAPHDR_HAS_FINALIZED may or may not be set. */
hdr = DUK_HEAPHDR_GET_NEXT(heap, hdr);
12 years ago
}
#endif /* DUK_USE_REFERENCE_COUNTING */
}
#if defined(DUK_USE_REFERENCE_COUNTING)
DUK_LOCAL void duk__assert_valid_refcounts(duk_heap *heap) {
12 years ago
duk_heaphdr *hdr = heap->heap_allocated;
while (hdr) {
if (DUK_HEAPHDR_GET_REFCOUNT(hdr) == 0 &&
DUK_HEAPHDR_HAS_FINALIZED(hdr)) {
/* An object may be in heap_allocated list with a zero
* refcount if it has just been finalized and is waiting
* to be collected by the next cycle.
*/
} else if (DUK_HEAPHDR_GET_REFCOUNT(hdr) == 0) {
/* An object may be in heap_allocated list with a zero
* refcount also if it is a temporary object created by
* a finalizer; because finalization now runs inside
* mark-and-sweep, such objects will not be queued to
* refzero_list and will thus appear here with refcount
* zero.
*/
11 years ago
#if 0 /* this case can no longer occur because refcount is unsigned */
12 years ago
} else if (DUK_HEAPHDR_GET_REFCOUNT(hdr) < 0) {
DUK_D(DUK_DPRINT("invalid refcount: %ld, %p -> %!O",
(hdr != NULL ? (long) DUK_HEAPHDR_GET_REFCOUNT(hdr) : (long) 0),
(void *) hdr, (duk_heaphdr *) hdr));
12 years ago
DUK_ASSERT(DUK_HEAPHDR_GET_REFCOUNT(hdr) > 0);
11 years ago
#endif
12 years ago
}
hdr = DUK_HEAPHDR_GET_NEXT(heap, hdr);
12 years ago
}
}
DUK_LOCAL void duk__clear_assert_refcounts(duk_heap *heap) {
duk_heaphdr *curr;
duk_uint32_t i;
for (curr = heap->heap_allocated; curr != NULL; curr = DUK_HEAPHDR_GET_NEXT(heap, curr)) {
curr->h_assert_refcount = 0;
}
for (curr = heap->finalize_list; curr != NULL; curr = DUK_HEAPHDR_GET_NEXT(heap, curr)) {
curr->h_assert_refcount = 0;
}
for (curr = heap->refzero_list; curr != NULL; curr = DUK_HEAPHDR_GET_NEXT(heap, curr)) {
curr->h_assert_refcount = 0;
}
for (i = 0; i < heap->st_size; i++) {
duk_hstring *h;
#if defined(DUK_USE_STRTAB_PTRCOMP)
h = DUK_USE_HEAPPTR_DEC16(heap->heap_udata, heap->strtable16[i]);
#else
h = heap->strtable[i];
#endif
while (h != NULL) {
((duk_heaphdr *) h)->h_assert_refcount = 0;
h = h->hdr.h_next;
}
}
}
DUK_LOCAL void duk__check_refcount_heaphdr(duk_heaphdr *hdr) {
duk_bool_t count_ok;
/* The refcount check only makes sense for reachable objects on
* heap_allocated or string table, after the sweep phase. Prior to
* sweep phase refcounts will include references that are not visible
* via reachability roots.
*
* Because we're called after the sweep phase, all heap objects on
* heap_allocated are reachable. REACHABLE flags have already been
* cleared so we can't check them.
*/
/* ROM objects have intentionally incorrect refcount (1), but we won't
* check them.
*/
DUK_ASSERT(!DUK_HEAPHDR_HAS_READONLY(hdr));
count_ok = ((duk_size_t) DUK_HEAPHDR_GET_REFCOUNT(hdr) == hdr->h_assert_refcount);
if (!count_ok) {
DUK_D(DUK_DPRINT("refcount mismatch for: %p: header=%ld counted=%ld --> %!iO",
(void *) hdr, (long) DUK_HEAPHDR_GET_REFCOUNT(hdr),
(long) hdr->h_assert_refcount, hdr));
DUK_ASSERT(0);
}
}
DUK_LOCAL void duk__check_assert_refcounts(duk_heap *heap) {
duk_heaphdr *curr;
duk_uint32_t i;
for (curr = heap->heap_allocated; curr != NULL; curr = DUK_HEAPHDR_GET_NEXT(heap, curr)) {
duk__check_refcount_heaphdr(curr);
}
for (i = 0; i < heap->st_size; i++) {
duk_hstring *h;
#if defined(DUK_USE_STRTAB_PTRCOMP)
h = DUK_USE_HEAPPTR_DEC16(heap->heap_udata, heap->strtable16[i]);
#else
h = heap->strtable[i];
#endif
while (h != NULL) {
duk__check_refcount_heaphdr((duk_heaphdr *) h);
h = h->hdr.h_next;
}
}
}
12 years ago
#endif /* DUK_USE_REFERENCE_COUNTING */
#endif /* DUK_USE_ASSERTIONS */
/*
* Finalizer torture. Do one fake finalizer call which causes side effects
* similar to one or more finalizers on actual objects.
*/
#if defined(DUK_USE_FINALIZER_SUPPORT)
#if defined(DUK_USE_MARKANDSWEEP_FINALIZER_TORTURE)
DUK_LOCAL duk_ret_t duk__markandsweep_fake_finalizer(duk_context *ctx) {
DUK_D(DUK_DPRINT("fake mark-and-sweep torture finalizer executed"));
/* Require a lot of stack to force a value stack grow/shrink.
* Recursive mark-and-sweep is prevented by allocation macros
* so this won't trigger another mark-and-sweep.
*/
duk_require_stack(ctx, 100000);
/* XXX: do something to force a callstack grow/shrink, perhaps
* just a manual forced resize or a forced relocating realloc?
*/
return 0;
}
DUK_LOCAL void duk__markandsweep_torture_finalizer(duk_hthread *thr) {
duk_context *ctx;
duk_int_t rc;
DUK_ASSERT(thr != NULL);
ctx = (duk_context *) thr;
/* Avoid fake finalization when callstack limit has been reached.
* Otherwise a callstack limit error will be created, then refzero'ed.
*/
if (thr->heap->call_recursion_depth >= thr->heap->call_recursion_limit ||
thr->callstack_size + 2 * DUK_CALLSTACK_GROW_STEP >= thr->callstack_max /*approximate*/) {
DUK_D(DUK_DPRINT("call recursion depth reached, avoid fake mark-and-sweep torture finalizer"));
return;
}
/* Run fake finalizer. Avoid creating unnecessary garbage. */
duk_push_c_function(ctx, duk__markandsweep_fake_finalizer, 0 /*nargs*/);
rc = duk_pcall(ctx, 0 /*nargs*/);
DUK_UNREF(rc); /* ignored */
duk_pop(ctx);
}
#endif /* DUK_USE_MARKANDSWEEP_FINALIZER_TORTURE */
#endif /* DUK_USE_FINALIZER_SUPPORT */
12 years ago
/*
* Main mark-and-sweep function.
*
* 'flags' represents the features requested by the caller. The current
* heap->mark_and_sweep_base_flags is ORed automatically into the flags;
* the base flags mask typically prevents certain mark-and-sweep operations
* to avoid trouble.
*/
DUK_INTERNAL duk_bool_t duk_heap_mark_and_sweep(duk_heap *heap, duk_small_uint_t flags) {
duk_hthread *thr;
duk_size_t count_keep_obj;
duk_size_t count_keep_str;
#if defined(DUK_USE_VOLUNTARY_GC)
duk_size_t tmp;
#endif
if (DUK_HEAP_HAS_MARKANDSWEEP_RUNNING(heap)) {
DUK_D(DUK_DPRINT("refuse to do a recursive mark-and-sweep"));
return 0;
}
10 years ago
/* XXX: thread selection for mark-and-sweep is currently a hack.
12 years ago
* If we don't have a thread, the entire mark-and-sweep is now
* skipped (although we could just skip finalizations).
*/
/* If thr != NULL, the thr may still be in the middle of
* initialization.
* XXX: Improve the thread viability test.
*/
thr = duk__get_temp_hthread(heap);
if (thr == NULL) {
DUK_D(DUK_DPRINT("gc skipped because we don't have a temp thread"));
12 years ago
/* reset voluntary gc trigger count */
#if defined(DUK_USE_VOLUNTARY_GC)
heap->mark_and_sweep_trigger_counter = DUK_HEAP_MARK_AND_SWEEP_TRIGGER_SKIP;
#endif
return 0; /* OK */
12 years ago
}
/* If debugger is paused, garbage collection is disabled by default. */
/* XXX: will need a force flag if garbage collection is triggered
* explicitly during paused state.
*/
#if defined(DUK_USE_DEBUGGER_SUPPORT)
if (DUK_HEAP_IS_PAUSED(heap)) {
/* Checking this here rather that in memory alloc primitives
* reduces checking code there but means a failed allocation
* will go through a few retries before giving up. That's
* fine because this only happens during debugging.
*/
DUK_D(DUK_DPRINT("gc skipped because debugger is paused"));
return 0;
}
#endif
DUK_D(DUK_DPRINT("garbage collect (mark-and-sweep) starting, requested flags: 0x%08lx, effective flags: 0x%08lx",
(unsigned long) flags, (unsigned long) (flags | heap->mark_and_sweep_base_flags)));
12 years ago
flags |= heap->mark_and_sweep_base_flags;
/*
* Assertions before
*/
#if defined(DUK_USE_ASSERTIONS)
12 years ago
DUK_ASSERT(!DUK_HEAP_HAS_MARKANDSWEEP_RUNNING(heap));
DUK_ASSERT(!DUK_HEAP_HAS_MARKANDSWEEP_RECLIMIT_REACHED(heap));
DUK_ASSERT(heap->mark_and_sweep_recursion_depth == 0);
duk__assert_heaphdr_flags(heap);
#if defined(DUK_USE_REFERENCE_COUNTING)
/* Note: heap->refzero_free_running may be true; a refcount
12 years ago
* finalizer may trigger a mark-and-sweep.
*/
duk__assert_valid_refcounts(heap);
12 years ago
#endif /* DUK_USE_REFERENCE_COUNTING */
#endif /* DUK_USE_ASSERTIONS */
/*
* Begin
*/
DUK_HEAP_SET_MARKANDSWEEP_RUNNING(heap);
/*
* Mark roots, hoping that recursion limit is not normally hit.
* If recursion limit is hit, run additional reachability rounds
* starting from "temproots" until marking is complete.
*
* Marking happens in two phases: first we mark actual reachability
* roots (and run "temproots" to complete the process). Then we
* check which objects are unreachable and are finalizable; such
* objects are marked as FINALIZABLE and marked as reachability
* (and "temproots" is run again to complete the process).
*
* The heap finalize_list must also be marked as a reachability root.
* There may be objects on the list from a previous round if the
* previous run had finalizer skip flag.
12 years ago
*/
#if defined(DUK_USE_ASSERTIONS) && defined(DUK_USE_REFERENCE_COUNTING)
duk__clear_assert_refcounts(heap);
#endif
duk__mark_roots_heap(heap); /* main reachability roots */
#if defined(DUK_USE_REFERENCE_COUNTING)
duk__mark_refzero_list(heap); /* refzero_list treated as reachability roots */
12 years ago
#endif
duk__mark_temproots_by_heap_scan(heap); /* temproots */
12 years ago
#if defined(DUK_USE_FINALIZER_SUPPORT)
duk__mark_finalizable(heap); /* mark finalizable as reachability roots */
duk__mark_finalize_list(heap); /* mark finalizer work list as reachability roots */
#endif
duk__mark_temproots_by_heap_scan(heap); /* temproots */
12 years ago
/*
* Sweep garbage and remove marking flags, and move objects with
* finalizers to the finalizer work list.
*
* Objects to be swept need to get their refcounts finalized before
* they are swept. In other words, their target object refcounts
* need to be decreased. This has to be done before freeing any
* objects to avoid decref'ing dangling pointers (which may happen
* even without bugs, e.g. with reference loops)
*
* Because strings don't point to other heap objects, similar
* finalization is not necessary for strings.
*/
11 years ago
/* XXX: more emergency behavior, e.g. find smaller hash sizes etc */
12 years ago
#if defined(DUK_USE_REFERENCE_COUNTING)
duk__finalize_refcounts(heap);
12 years ago
#endif
duk__sweep_heap(heap, flags, &count_keep_obj);
duk__sweep_stringtable(heap, &count_keep_str);
#if defined(DUK_USE_ASSERTIONS) && defined(DUK_USE_REFERENCE_COUNTING)
duk__check_assert_refcounts(heap);
#endif
#if defined(DUK_USE_REFERENCE_COUNTING)
duk__clear_refzero_list_flags(heap);
12 years ago
#endif
#if defined(DUK_USE_FINALIZER_SUPPORT)
duk__clear_finalize_list_flags(heap);
#endif
12 years ago
/*
* Object compaction (emergency only).
*
* Object compaction is a separate step after sweeping, as there is
* more free memory for it to work with. Also, currently compaction
* may insert new objects into the heap allocated list and the string
* table which we don't want to do during a sweep (the reachability
* flags of such objects would be incorrect). The objects inserted
* are currently:
*
* - a temporary duk_hbuffer for a new properties allocation
* - if array part is abandoned, string keys are interned
*
* The object insertions go to the front of the list, so they do not
* cause an infinite loop (they are not compacted).
*/
if ((flags & DUK_MS_FLAG_EMERGENCY) &&
!(flags & DUK_MS_FLAG_NO_OBJECT_COMPACTION)) {
duk__compact_objects(heap);
12 years ago
}
/*
* String table resize check.
*
* This is mainly useful in emergency GC: if the string table load
* factor is really low for some reason, we can shrink the string
* table to a smaller size and free some memory in the process.
* Only execute in emergency GC. String table has internal flags
* to protect against recursive resizing if this mark-and-sweep pass
* was triggered by a string table resize.
12 years ago
*/
if (flags & DUK_MS_FLAG_EMERGENCY) {
DUK_D(DUK_DPRINT("stringtable resize check in emergency gc"));
duk_heap_strtable_force_resize(heap);
12 years ago
}
/*
* Finalize objects in the finalization work list. Finalized
* objects are queued back to heap_allocated with FINALIZED set.
*
* Since finalizers may cause arbitrary side effects, they are
* prevented during string table and object property allocation
* resizing using the DUK_MS_FLAG_NO_FINALIZERS flag in
* heap->mark_and_sweep_base_flags. In this case the objects
* remain in the finalization work list after mark-and-sweep
* exits and they may be finalized on the next pass.
12 years ago
*
* Finalization currently happens inside "MARKANDSWEEP_RUNNING"
* protection (no mark-and-sweep may be triggered by the
* finalizers). As a side effect:
*
* 1) an out-of-memory error inside a finalizer will not
* cause a mark-and-sweep and may cause the finalizer
* to fail unnecessarily
*
* 2) any temporary objects whose refcount decreases to zero
* during finalization will not be put into refzero_list;
* they can only be collected by another mark-and-sweep
*
* This is not optimal, but since the sweep for this phase has
* already happened, this is probably good enough for now.
*/
#if defined(DUK_USE_FINALIZER_SUPPORT)
#if defined(DUK_USE_MARKANDSWEEP_FINALIZER_TORTURE)
/* Cannot simulate individual finalizers because finalize_list only
* contains objects with actual finalizers. But simulate side effects
* from finalization by doing a bogus function call and resizing the
* stacks.
*/
if (flags & DUK_MS_FLAG_NO_FINALIZERS) {
DUK_D(DUK_DPRINT("skip mark-and-sweep torture finalizer, DUK_MS_FLAG_NO_FINALIZERS is set"));
} else if (!(thr->valstack != NULL && thr->callstack != NULL && thr->catchstack != NULL)) {
DUK_D(DUK_DPRINT("skip mark-and-sweep torture finalizer, thread not yet viable"));
12 years ago
} else {
DUK_D(DUK_DPRINT("run mark-and-sweep torture finalizer"));
duk__markandsweep_torture_finalizer(thr);
}
#endif /* DUK_USE_MARKANDSWEEP_FINALIZER_TORTURE */
if (flags & DUK_MS_FLAG_NO_FINALIZERS) {
DUK_D(DUK_DPRINT("finalizer run skipped because DUK_MS_FLAG_NO_FINALIZERS is set"));
} else {
duk__run_object_finalizers(heap, flags);
12 years ago
}
#endif /* DUK_USE_FINALIZER_SUPPORT */
12 years ago
/*
* Finish
*/
DUK_HEAP_CLEAR_MARKANDSWEEP_RUNNING(heap);
/*
* Assertions after
*/
#if defined(DUK_USE_ASSERTIONS)
12 years ago
DUK_ASSERT(!DUK_HEAP_HAS_MARKANDSWEEP_RUNNING(heap));
DUK_ASSERT(!DUK_HEAP_HAS_MARKANDSWEEP_RECLIMIT_REACHED(heap));
DUK_ASSERT(heap->mark_and_sweep_recursion_depth == 0);
duk__assert_heaphdr_flags(heap);
#if defined(DUK_USE_REFERENCE_COUNTING)
/* Note: heap->refzero_free_running may be true; a refcount
12 years ago
* finalizer may trigger a mark-and-sweep.
*/
duk__assert_valid_refcounts(heap);
12 years ago
#endif /* DUK_USE_REFERENCE_COUNTING */
#endif /* DUK_USE_ASSERTIONS */
/*
* Reset trigger counter
*/
#if defined(DUK_USE_VOLUNTARY_GC)
tmp = (count_keep_obj + count_keep_str) / 256;
heap->mark_and_sweep_trigger_counter = (duk_int_t) (
(tmp * DUK_HEAP_MARK_AND_SWEEP_TRIGGER_MULT) +
DUK_HEAP_MARK_AND_SWEEP_TRIGGER_ADD);
DUK_D(DUK_DPRINT("garbage collect (mark-and-sweep) finished: %ld objects kept, %ld strings kept, trigger reset to %ld",
(long) count_keep_obj, (long) count_keep_str, (long) heap->mark_and_sweep_trigger_counter));
#else
DUK_D(DUK_DPRINT("garbage collect (mark-and-sweep) finished: %ld objects kept, %ld strings kept, no voluntary trigger",
(long) count_keep_obj, (long) count_keep_str));
#endif
return 0; /* OK */
12 years ago
}