Browse Source

Merge pull request #1537 from svaarala/lazy-valstack-shrink-cleanups

Minor cleanups to value stack grow/shrink
pull/1539/head
Sami Vaarala 8 years ago
committed by GitHub
parent
commit
c84f18d17f
  1. 5
      RELEASES.rst
  2. 2
      src-input/duk_api_internal.h
  3. 86
      src-input/duk_api_stack.c
  4. 24
      src-input/duk_js_call.c
  5. 6
      src-input/duk_js_executor.c
  6. 31
      tests/api/test-check-require-stack-top.c
  7. 31
      tests/api/test-check-require-stack.c

5
RELEASES.rst

@ -2868,6 +2868,11 @@ Planned
"collapse" bound function chains so that the target of a duk_hboundfunc is
always a non-bound function (GH-1503)
* Make call stack and value stack limits configurable via config options
(DUK_USE_CALLSTACK_LIMIT, DUK_USE_VALSTACK_LIMIT) (GH-1526)
* Add a wrap check to duk_{check,require}_stack{_top}() (GH-1537)
* Fix Reflect.construct() handling for four or more arguments (GH-1517,
GH-1518)

2
src-input/duk_api_internal.h

@ -26,6 +26,8 @@ DUK_INTERNAL_DECL void duk_copy_tvals_incref(duk_hthread *thr, duk_tval *tv_dst,
DUK_INTERNAL_DECL duk_tval *duk_create_gap(duk_context *ctx, duk_idx_t idx_base, duk_idx_t count);
DUK_INTERNAL_DECL void duk_set_top_and_wipe(duk_context *ctx, duk_idx_t top, duk_idx_t idx_wipe_start);
DUK_INTERNAL_DECL void duk_dup_0(duk_context *ctx);
DUK_INTERNAL_DECL void duk_dup_1(duk_context *ctx);
DUK_INTERNAL_DECL void duk_dup_2(duk_context *ctx);

86
src-input/duk_api_stack.c

@ -492,6 +492,24 @@ DUK_EXTERNAL void duk_set_top(duk_context *ctx, duk_idx_t idx) {
}
}
/* Internal helper: set top to 'top', and set [idx_wipe_start,top[ to
* 'undefined' (doing nothing if idx_wipe_start == top). Indices are
* positive and within value stack reserve. This is used by call handling.
*/
DUK_INTERNAL void duk_set_top_and_wipe(duk_context *ctx, duk_idx_t top, duk_idx_t idx_wipe_start) {
duk_hthread *thr = (duk_hthread *) ctx;
DUK_ASSERT(top >= 0);
DUK_ASSERT(idx_wipe_start >= 0);
DUK_ASSERT(idx_wipe_start <= top);
DUK_ASSERT(thr->valstack_bottom + top <= thr->valstack_end);
DUK_ASSERT(thr->valstack_bottom + idx_wipe_start <= thr->valstack_end);
DUK_UNREF(thr);
duk_set_top(ctx, idx_wipe_start);
duk_set_top(ctx, top);
}
DUK_EXTERNAL duk_idx_t duk_get_top_index(duk_context *ctx) {
duk_hthread *thr = (duk_hthread *) ctx;
duk_idx_t ret;
@ -707,7 +725,7 @@ DUK_LOCAL DUK_COLD DUK_NOINLINE duk_bool_t duk__resize_valstack(duk_hthread *thr
return 1;
}
DUK_LOCAL DUK_COLD DUK_NOINLINE duk_bool_t duk__valstack_grow_check_resize(duk_hthread *thr, duk_size_t min_bytes, duk_bool_t throw_on_error) {
DUK_LOCAL DUK_COLD DUK_NOINLINE duk_bool_t duk__valstack_grow(duk_hthread *thr, duk_size_t min_bytes, duk_bool_t throw_on_error) {
duk_size_t min_size;
duk_size_t new_size;
@ -726,7 +744,7 @@ DUK_LOCAL DUK_COLD DUK_NOINLINE duk_bool_t duk__valstack_grow_check_resize(duk_h
new_size = min_size;
#endif
if (DUK_UNLIKELY(new_size > DUK_USE_VALSTACK_LIMIT)) {
if (DUK_UNLIKELY(new_size > DUK_USE_VALSTACK_LIMIT || new_size < min_size /*wrap*/)) {
/* Note: may be triggered even if minimal new_size would not reach the limit,
* plan limit accordingly.
*/
@ -766,7 +784,7 @@ DUK_INTERNAL DUK_INLINE void duk_valstack_grow_check_throw(duk_hthread *thr, duk
thr->valstack_end = tv;
return;
}
(void) duk__valstack_grow_check_resize(thr, min_bytes, 1 /*throw_on_error*/);
(void) duk__valstack_grow(thr, min_bytes, 1 /*throw_on_error*/);
}
/* Hot, inlined value stack grow check which doesn't throw. */
@ -781,7 +799,7 @@ DUK_INTERNAL DUK_INLINE duk_bool_t duk_valstack_grow_check_nothrow(duk_hthread *
thr->valstack_end = tv;
return 1;
}
return duk__valstack_grow_check_resize(thr, min_bytes, 0 /*throw_on_error*/);
return duk__valstack_grow(thr, min_bytes, 0 /*throw_on_error*/);
}
/* Value stack shrink check, called from mark-and-sweep. */
@ -859,11 +877,16 @@ DUK_EXTERNAL duk_bool_t duk_check_stack(duk_context *ctx, duk_idx_t extra) {
DUK_ASSERT_CTX_VALID(ctx);
DUK_ASSERT(thr != NULL);
if (DUK_UNLIKELY(extra < 0)) {
/* Clamping to zero makes the API more robust to calling code
* calculation errors.
*/
extra = 0;
if (DUK_UNLIKELY(extra < 0 || extra > DUK_USE_VALSTACK_LIMIT)) {
if (extra < 0) {
/* Clamping to zero makes the API more robust to calling code
* calculation errors.
*/
extra = 0;
} else {
/* Cause grow check to fail without wrapping arithmetic. */
extra = DUK_USE_VALSTACK_LIMIT;
}
}
min_new_bytes = (duk_size_t) ((duk_uint8_t *) thr->valstack_top - (duk_uint8_t *) thr->valstack) +
@ -878,11 +901,16 @@ DUK_EXTERNAL void duk_require_stack(duk_context *ctx, duk_idx_t extra) {
DUK_ASSERT_CTX_VALID(ctx);
DUK_ASSERT(thr != NULL);
if (DUK_UNLIKELY(extra < 0)) {
/* Clamping to zero makes the API more robust to calling code
* calculation errors.
*/
extra = 0;
if (DUK_UNLIKELY(extra < 0 || extra > DUK_USE_VALSTACK_LIMIT)) {
if (extra < 0) {
/* Clamping to zero makes the API more robust to calling code
* calculation errors.
*/
extra = 0;
} else {
/* Cause grow check to fail without wrapping arithmetic. */
extra = DUK_USE_VALSTACK_LIMIT;
}
}
min_new_bytes = (duk_size_t) ((duk_uint8_t *) thr->valstack_top - (duk_uint8_t *) thr->valstack) +
@ -897,11 +925,16 @@ DUK_EXTERNAL duk_bool_t duk_check_stack_top(duk_context *ctx, duk_idx_t top) {
DUK_ASSERT_CTX_VALID(ctx);
thr = (duk_hthread *) ctx;
if (DUK_UNLIKELY(top < 0)) {
/* Clamping to zero makes the API more robust to calling code
* calculation errors.
*/
top = 0;
if (DUK_UNLIKELY(top < 0 || top > DUK_USE_VALSTACK_LIMIT)) {
if (top < 0) {
/* Clamping to zero makes the API more robust to calling code
* calculation errors.
*/
top = 0;
} else {
/* Cause grow check to fail without wrapping arithmetic. */
top = DUK_USE_VALSTACK_LIMIT;
}
}
min_new_bytes = (duk_size_t) ((duk_uint8_t *) thr->valstack_bottom - (duk_uint8_t *) thr->valstack) +
@ -916,11 +949,16 @@ DUK_EXTERNAL void duk_require_stack_top(duk_context *ctx, duk_idx_t top) {
DUK_ASSERT_CTX_VALID(ctx);
thr = (duk_hthread *) ctx;
if (DUK_UNLIKELY(top < 0)) {
/* Clamping to zero makes the API more robust to calling code
* calculation errors.
*/
top = 0;
if (DUK_UNLIKELY(top < 0 || top > DUK_USE_VALSTACK_LIMIT)) {
if (top < 0) {
/* Clamping to zero makes the API more robust to calling code
* calculation errors.
*/
top = 0;
} else {
/* Cause grow check to fail without wrapping arithmetic. */
top = DUK_USE_VALSTACK_LIMIT;
}
}
min_new_bytes = (duk_size_t) ((duk_uint8_t *) thr->valstack_bottom - (duk_uint8_t *) thr->valstack) +

24
src-input/duk_js_call.c

@ -1696,10 +1696,8 @@ DUK_LOCAL void duk__handle_call_inner(duk_hthread *thr,
duk_valstack_grow_check_throw(ctx, vs_min_bytes);
act->reserve_byteoff = (duk_size_t) ((duk_uint8_t *) thr->valstack_end - (duk_uint8_t *) thr->valstack);
if (nregs >= 0) {
/* XXX: optimized operation for setting top and wiping */
DUK_ASSERT(nregs >= nargs);
duk_set_top(ctx, idx_func + 2 + nargs); /* clamp anything above nargs */
duk_set_top(ctx, idx_func + 2 + nregs); /* extend with undefined */
duk_set_top_and_wipe(ctx, idx_func + 2 + nregs, idx_func + 2 + nargs);
}
/*
@ -2490,9 +2488,9 @@ DUK_LOCAL void duk__handle_safe_call_shared(duk_hthread *thr,
* is empty in case of an initial Duktape.Thread.resume().
*
* The first thing to do here is to figure out whether an ecma-to-ecma
* call is actually possible. It's not always the case if the target is
* a bound function; the final function may be native. In that case,
* return an error so caller can fall back to a normal call path.
* call is actually possible. It's not always the case, e.g. if the final
* target function is native. In that case, return an error so caller can
* fall back to a normal call path.
*/
DUK_INTERNAL duk_bool_t duk_handle_ecma_call_setup(duk_hthread *thr,
@ -2702,9 +2700,15 @@ DUK_INTERNAL duk_bool_t duk_handle_ecma_call_setup(duk_hthread *thr,
duk_hthread_activation_unwind_reuse_norz(thr);
DUK_ASSERT(act == thr->callstack_curr);
/* Then reuse the unwound activation. */
/* XXX: We could restore the caller's value stack reserve
* here, as if we did an actual unwind-and-call. Without
* the restoration, value stack reserve may remain higher
* than would otherwise be possible until we return to a
* non-tailcall.
*/
/* Start filling in the activation */
/* Then reuse the unwound activation. */
DUK_ASSERT(act->parent == thr->callstack_curr);
act->cat = NULL;
act->var_env = NULL;
act->lex_env = NULL;
@ -2911,9 +2915,7 @@ DUK_INTERNAL duk_bool_t duk_handle_ecma_call_setup(duk_hthread *thr,
DUK_ASSERT(nregs >= 0);
DUK_ASSERT(nregs >= nargs);
/* XXX: optimized operation for setting top and wiping */
duk_set_top(ctx, idx_args + nargs); /* clamp anything above nargs */
duk_set_top(ctx, idx_args + nregs); /* extend with undefined */
duk_set_top_and_wipe(ctx, idx_args + nregs, idx_args + nargs);
/*
* Shift to new valstack_bottom.

6
src-input/duk_js_executor.c

@ -812,8 +812,7 @@ DUK_LOCAL void duk__reconfig_valstack_ecma_return(duk_hthread *thr) {
thr->valstack_bottom = (duk_tval *) (void *) ((duk_uint8_t *) thr->valstack + act->bottom_byteoff);
DUK_ASSERT(act->retval_byteoff >= act->bottom_byteoff);
clamp_top = (duk_idx_t) ((act->retval_byteoff - act->bottom_byteoff + sizeof(duk_tval)) / sizeof(duk_tval)); /* +1 = one retval */
duk_set_top((duk_context *) thr, clamp_top);
duk_set_top((duk_context *) thr, h_func->nregs);
duk_set_top_and_wipe((duk_context *) thr, h_func->nregs, clamp_top);
DUK_ASSERT((duk_uint8_t *) thr->valstack_end >= (duk_uint8_t *) thr->valstack + act->reserve_byteoff);
thr->valstack_end = (duk_tval *) (void *) ((duk_uint8_t *) thr->valstack + act->reserve_byteoff);
@ -843,8 +842,7 @@ DUK_LOCAL void duk__reconfig_valstack_ecma_catcher(duk_hthread *thr, duk_activat
idx_bottom = (duk_size_t) (thr->valstack_bottom - thr->valstack);
DUK_ASSERT(cat->idx_base >= idx_bottom);
clamp_top = (duk_idx_t) (cat->idx_base - idx_bottom + 2); /* +2 = catcher value, catcher lj_type */
duk_set_top((duk_context *) thr, clamp_top);
duk_set_top((duk_context *) thr, h_func->nregs);
duk_set_top_and_wipe((duk_context *) thr, h_func->nregs, clamp_top);
DUK_ASSERT((duk_uint8_t *) thr->valstack_end >= (duk_uint8_t *) thr->valstack + act->reserve_byteoff);
thr->valstack_end = (duk_tval *) (void *) ((duk_uint8_t *) thr->valstack + act->reserve_byteoff);

31
tests/api/test-check-require-stack-top.c

@ -12,6 +12,10 @@ final top: 1000
rc=0
final top: 0
==> rc=0, result='undefined'
*** check_4 (duk_safe_call)
rc=0
final top: 0
==> rc=0, result='undefined'
*** require_1 (duk_safe_call)
final top: 1000
==> rc=0, result='undefined'
@ -20,6 +24,8 @@ final top: 1000
==> rc=0, result='undefined'
*** require_3 (duk_safe_call)
==> rc=1, result='RangeError: valstack limit'
*** require_4 (duk_safe_call)
==> rc=1, result='RangeError: valstack limit'
===*/
/* demonstrate how pushing too many elements causes an error */
@ -102,6 +108,19 @@ static duk_ret_t check_3(duk_context *ctx, void *udata) {
return 0;
}
/* try to extend value stack too much; value is high enough to wrap */
static duk_ret_t check_4(duk_context *ctx, void *udata) {
duk_ret_t rc;
(void) udata;
rc = duk_check_stack_top(ctx, DUK_IDX_MAX);
printf("rc=%d\n", (int) rc); /* should print 0: fail */
printf("final top: %ld\n", (long) duk_get_top(ctx));
return 0;
}
/* same as check_1 but with duk_require_stack_top() */
static duk_ret_t require_1_inner(duk_context *ctx) {
int i;
@ -158,12 +177,24 @@ static duk_ret_t require_3(duk_context *ctx, void *udata) {
return 0;
}
/* same as check_4 but with duk_require_stack_top */
static duk_ret_t require_4(duk_context *ctx, void *udata) {
(void) udata;
duk_require_stack_top(ctx, DUK_IDX_MAX);
printf("final top: %ld\n", (long) duk_get_top(ctx));
return 0;
}
void test(duk_context *ctx) {
TEST_SAFE_CALL(test_1);
TEST_SAFE_CALL(check_1);
TEST_SAFE_CALL(check_2);
TEST_SAFE_CALL(check_3);
TEST_SAFE_CALL(check_4);
TEST_SAFE_CALL(require_1);
TEST_SAFE_CALL(require_2);
TEST_SAFE_CALL(require_3);
TEST_SAFE_CALL(require_4);
}

31
tests/api/test-check-require-stack.c

@ -12,6 +12,10 @@ final top: 1000
rc=0
final top: 0
==> rc=0, result='undefined'
*** check_4 (duk_safe_call)
rc=0
final top: 0
==> rc=0, result='undefined'
*** require_1 (duk_safe_call)
final top: 1000
==> rc=0, result='undefined'
@ -20,6 +24,8 @@ final top: 1000
==> rc=0, result='undefined'
*** require_3 (duk_safe_call)
==> rc=1, result='RangeError: valstack limit'
*** require_4 (duk_safe_call)
==> rc=1, result='RangeError: valstack limit'
===*/
/* demonstrate how pushing too many elements causes an error */
@ -102,6 +108,19 @@ static duk_ret_t check_3(duk_context *ctx, void *udata) {
return 0;
}
/* try to extend value stack too much; value is high enough to wrap */
static duk_ret_t check_4(duk_context *ctx, void *udata) {
duk_ret_t rc;
(void) udata;
rc = duk_check_stack(ctx, DUK_IDX_MAX);
printf("rc=%d\n", (int) rc); /* should print 0: fail */
printf("final top: %ld\n", (long) duk_get_top(ctx));
return 0;
}
/* same as check_1 but with duk_require_stack() */
static duk_ret_t require_1_inner(duk_context *ctx) {
int i;
@ -157,12 +176,24 @@ static duk_ret_t require_3(duk_context *ctx, void *udata) {
return 0;
}
/* same as check_4 but with duk_require_stack */
static duk_ret_t require_4(duk_context *ctx, void *udata) {
(void) udata;
duk_require_stack(ctx, DUK_IDX_MAX);
printf("final top: %ld\n", (long) duk_get_top(ctx));
return 0;
}
void test(duk_context *ctx) {
TEST_SAFE_CALL(test_1);
TEST_SAFE_CALL(check_1);
TEST_SAFE_CALL(check_2);
TEST_SAFE_CALL(check_3);
TEST_SAFE_CALL(check_4);
TEST_SAFE_CALL(require_1);
TEST_SAFE_CALL(require_2);
TEST_SAFE_CALL(require_3);
TEST_SAFE_CALL(require_4);
}

Loading…
Cancel
Save