Browse Source

py: Put all global state together in state structures.

This patch consolidates all global variables in py/ core into one place,
in a global structure.  Root pointers are all located together to make
GC tracing easier and more efficient.
pull/1049/head
Damien George 10 years ago
parent
commit
b4b10fd350
  1. 3
      bare-arm/main.c
  2. 3
      esp8266/main.c
  3. 1
      py/builtin.h
  4. 3
      py/emitbc.c
  5. 161
      py/gc.c
  6. 5
      py/gc.h
  7. 5
      py/lexer.c
  8. 2
      py/lexer.h
  9. 35
      py/malloc.c
  10. 7
      py/modgc.c
  11. 5
      py/mpconfig.h
  12. 29
      py/mpstate.c
  13. 155
      py/mpstate.h
  14. 8
      py/nlr.h
  15. 7
      py/nlrsetjmp.c
  16. 5
      py/nlrthumb.S
  17. 29
      py/nlrx64.S
  18. 7
      py/nlrx86.S
  19. 11
      py/nlrxtensa.S
  20. 34
      py/objexcept.c
  21. 38
      py/objexcept.h
  22. 19
      py/objmodule.c
  23. 1
      py/py.mk
  24. 37
      py/qstr.c
  25. 8
      py/qstr.h
  26. 69
      py/runtime.c
  27. 10
      py/runtime.h
  28. 14
      py/stackctrl.c
  29. 7
      py/vm.c
  30. 7
      stmhal/pendsv.c
  31. 3
      teensy/main.c
  32. 15
      unix/gccollect.c
  33. 15
      unix/main.c
  34. 3
      unix/mpconfigport.h

3
bare-arm/main.c

@ -5,8 +5,8 @@
#include "py/nlr.h"
#include "py/parsehelper.h"
#include "py/compile.h"
#include "py/runtime0.h"
#include "py/runtime.h"
#include "py/stackctrl.h"
#include "py/repl.h"
#include "py/pfenv.h"
@ -48,6 +48,7 @@ void do_str(const char *src) {
}
int main(int argc, char **argv) {
mp_stack_set_limit(10240);
mp_init();
do_str("print('hello world!', list(x+1 for x in range(10)), end='eol\n')");
mp_deinit();

3
esp8266/main.c

@ -32,6 +32,7 @@
#include "py/compile.h"
#include "py/runtime0.h"
#include "py/runtime.h"
#include "py/stackctrl.h"
#include "py/gc.h"
#include "pyexec.h"
#include "gccollect.h"
@ -39,7 +40,7 @@
void user_init(void) {
soft_reset:
//mp_stack_set_limit((char*)&_ram_end - (char*)&_heap_end - 1024);
mp_stack_set_limit(10240);
mp_hal_init();
gc_init(&_heap_start, &_heap_end);
gc_collect_init();

1
py/builtin.h

@ -90,7 +90,6 @@ extern const mp_obj_module_t mp_module_sys;
extern const mp_obj_module_t mp_module_gc;
extern const mp_obj_dict_t mp_module_builtins_globals;
extern mp_obj_dict_t *mp_module_builtins_override_dict;
struct _dummy_t;
extern struct _dummy_t mp_sys_stdin_obj;

3
py/emitbc.c

@ -30,6 +30,7 @@
#include <string.h>
#include <assert.h>
#include "py/mpstate.h"
#include "py/emit.h"
#include "py/bc0.h"
@ -383,7 +384,7 @@ STATIC void emit_bc_adjust_stack_size(emit_t *emit, mp_int_t delta) {
STATIC void emit_bc_set_source_line(emit_t *emit, mp_uint_t source_line) {
//printf("source: line %d -> %d offset %d -> %d\n", emit->last_source_line, source_line, emit->last_source_line_offset, emit->bytecode_offset);
#if MICROPY_ENABLE_SOURCE_LINE
if (mp_optimise_value >= 3) {
if (MP_STATE_VM(mp_optimise_value) >= 3) {
// If we compile with -O3, don't store line numbers.
return;
}

161
py/gc.c

@ -28,6 +28,7 @@
#include <stdio.h>
#include <string.h>
#include "py/mpstate.h"
#include "py/gc.h"
#include "py/obj.h"
#include "py/runtime.h"
@ -48,25 +49,6 @@
#define WORDS_PER_BLOCK (4)
#define BYTES_PER_BLOCK (WORDS_PER_BLOCK * BYTES_PER_WORD)
STATIC byte *gc_alloc_table_start;
STATIC mp_uint_t gc_alloc_table_byte_len;
#if MICROPY_ENABLE_FINALISER
STATIC byte *gc_finaliser_table_start;
#endif
// We initialise gc_pool_start to a dummy value so it stays out of the bss
// section. This makes sure we don't trace this pointer in a collect cycle.
// If we did trace it, it would make the first block of the heap always
// reachable, and hence we can never free that block.
STATIC mp_uint_t *gc_pool_start = (void*)4;
STATIC mp_uint_t *gc_pool_end;
STATIC int gc_stack_overflow;
STATIC mp_uint_t gc_stack[MICROPY_ALLOC_GC_STACK_SIZE];
STATIC mp_uint_t *gc_sp;
STATIC uint16_t gc_lock_depth;
uint16_t gc_auto_collect_enabled;
STATIC mp_uint_t gc_last_free_atb_index;
// ATB = allocation table byte
// 0b00 = FREE -- free block
// 0b01 = HEAD -- head of a chain of blocks
@ -90,15 +72,15 @@ STATIC mp_uint_t gc_last_free_atb_index;
#define ATB_3_IS_FREE(a) (((a) & ATB_MASK_3) == 0)
#define BLOCK_SHIFT(block) (2 * ((block) & (BLOCKS_PER_ATB - 1)))
#define ATB_GET_KIND(block) ((gc_alloc_table_start[(block) / BLOCKS_PER_ATB] >> BLOCK_SHIFT(block)) & 3)
#define ATB_ANY_TO_FREE(block) do { gc_alloc_table_start[(block) / BLOCKS_PER_ATB] &= (~(AT_MARK << BLOCK_SHIFT(block))); } while (0)
#define ATB_FREE_TO_HEAD(block) do { gc_alloc_table_start[(block) / BLOCKS_PER_ATB] |= (AT_HEAD << BLOCK_SHIFT(block)); } while (0)
#define ATB_FREE_TO_TAIL(block) do { gc_alloc_table_start[(block) / BLOCKS_PER_ATB] |= (AT_TAIL << BLOCK_SHIFT(block)); } while (0)
#define ATB_HEAD_TO_MARK(block) do { gc_alloc_table_start[(block) / BLOCKS_PER_ATB] |= (AT_MARK << BLOCK_SHIFT(block)); } while (0)
#define ATB_MARK_TO_HEAD(block) do { gc_alloc_table_start[(block) / BLOCKS_PER_ATB] &= (~(AT_TAIL << BLOCK_SHIFT(block))); } while (0)
#define BLOCK_FROM_PTR(ptr) (((ptr) - (mp_uint_t)gc_pool_start) / BYTES_PER_BLOCK)
#define PTR_FROM_BLOCK(block) (((block) * BYTES_PER_BLOCK + (mp_uint_t)gc_pool_start))
#define ATB_GET_KIND(block) ((MP_STATE_MEM(gc_alloc_table_start)[(block) / BLOCKS_PER_ATB] >> BLOCK_SHIFT(block)) & 3)
#define ATB_ANY_TO_FREE(block) do { MP_STATE_MEM(gc_alloc_table_start)[(block) / BLOCKS_PER_ATB] &= (~(AT_MARK << BLOCK_SHIFT(block))); } while (0)
#define ATB_FREE_TO_HEAD(block) do { MP_STATE_MEM(gc_alloc_table_start)[(block) / BLOCKS_PER_ATB] |= (AT_HEAD << BLOCK_SHIFT(block)); } while (0)
#define ATB_FREE_TO_TAIL(block) do { MP_STATE_MEM(gc_alloc_table_start)[(block) / BLOCKS_PER_ATB] |= (AT_TAIL << BLOCK_SHIFT(block)); } while (0)
#define ATB_HEAD_TO_MARK(block) do { MP_STATE_MEM(gc_alloc_table_start)[(block) / BLOCKS_PER_ATB] |= (AT_MARK << BLOCK_SHIFT(block)); } while (0)
#define ATB_MARK_TO_HEAD(block) do { MP_STATE_MEM(gc_alloc_table_start)[(block) / BLOCKS_PER_ATB] &= (~(AT_TAIL << BLOCK_SHIFT(block))); } while (0)
#define BLOCK_FROM_PTR(ptr) (((ptr) - (mp_uint_t)MP_STATE_MEM(gc_pool_start)) / BYTES_PER_BLOCK)
#define PTR_FROM_BLOCK(block) (((block) * BYTES_PER_BLOCK + (mp_uint_t)MP_STATE_MEM(gc_pool_start)))
#define ATB_FROM_BLOCK(bl) ((bl) / BLOCKS_PER_ATB)
#if MICROPY_ENABLE_FINALISER
@ -107,9 +89,9 @@ STATIC mp_uint_t gc_last_free_atb_index;
#define BLOCKS_PER_FTB (8)
#define FTB_GET(block) ((gc_finaliser_table_start[(block) / BLOCKS_PER_FTB] >> ((block) & 7)) & 1)
#define FTB_SET(block) do { gc_finaliser_table_start[(block) / BLOCKS_PER_FTB] |= (1 << ((block) & 7)); } while (0)
#define FTB_CLEAR(block) do { gc_finaliser_table_start[(block) / BLOCKS_PER_FTB] &= (~(1 << ((block) & 7))); } while (0)
#define FTB_GET(block) ((MP_STATE_MEM(gc_finaliser_table_start)[(block) / BLOCKS_PER_FTB] >> ((block) & 7)) & 1)
#define FTB_SET(block) do { MP_STATE_MEM(gc_finaliser_table_start)[(block) / BLOCKS_PER_FTB] |= (1 << ((block) & 7)); } while (0)
#define FTB_CLEAR(block) do { MP_STATE_MEM(gc_finaliser_table_start)[(block) / BLOCKS_PER_FTB] &= (~(1 << ((block) & 7))); } while (0)
#endif
// TODO waste less memory; currently requires that all entries in alloc_table have a corresponding block in pool
@ -125,67 +107,67 @@ void gc_init(void *start, void *end) {
// => T = A * (1 + BLOCKS_PER_ATB / BLOCKS_PER_FTB + BLOCKS_PER_ATB * BYTES_PER_BLOCK)
mp_uint_t total_byte_len = (byte*)end - (byte*)start;
#if MICROPY_ENABLE_FINALISER
gc_alloc_table_byte_len = total_byte_len * BITS_PER_BYTE / (BITS_PER_BYTE + BITS_PER_BYTE * BLOCKS_PER_ATB / BLOCKS_PER_FTB + BITS_PER_BYTE * BLOCKS_PER_ATB * BYTES_PER_BLOCK);
MP_STATE_MEM(gc_alloc_table_byte_len) = total_byte_len * BITS_PER_BYTE / (BITS_PER_BYTE + BITS_PER_BYTE * BLOCKS_PER_ATB / BLOCKS_PER_FTB + BITS_PER_BYTE * BLOCKS_PER_ATB * BYTES_PER_BLOCK);
#else
gc_alloc_table_byte_len = total_byte_len / (1 + BITS_PER_BYTE / 2 * BYTES_PER_BLOCK);
MP_STATE_MEM(gc_alloc_table_byte_len) = total_byte_len / (1 + BITS_PER_BYTE / 2 * BYTES_PER_BLOCK);
#endif
gc_alloc_table_start = (byte*)start;
MP_STATE_MEM(gc_alloc_table_start) = (byte*)start;
#if MICROPY_ENABLE_FINALISER
mp_uint_t gc_finaliser_table_byte_len = (gc_alloc_table_byte_len * BLOCKS_PER_ATB + BLOCKS_PER_FTB - 1) / BLOCKS_PER_FTB;
gc_finaliser_table_start = gc_alloc_table_start + gc_alloc_table_byte_len;
mp_uint_t gc_finaliser_table_byte_len = (MP_STATE_MEM(gc_alloc_table_byte_len) * BLOCKS_PER_ATB + BLOCKS_PER_FTB - 1) / BLOCKS_PER_FTB;
MP_STATE_MEM(gc_finaliser_table_start) = MP_STATE_MEM(gc_alloc_table_start) + MP_STATE_MEM(gc_alloc_table_byte_len);
#endif
mp_uint_t gc_pool_block_len = gc_alloc_table_byte_len * BLOCKS_PER_ATB;
gc_pool_start = (mp_uint_t*)((byte*)end - gc_pool_block_len * BYTES_PER_BLOCK);
gc_pool_end = (mp_uint_t*)end;
mp_uint_t gc_pool_block_len = MP_STATE_MEM(gc_alloc_table_byte_len) * BLOCKS_PER_ATB;
MP_STATE_MEM(gc_pool_start) = (mp_uint_t*)((byte*)end - gc_pool_block_len * BYTES_PER_BLOCK);
MP_STATE_MEM(gc_pool_end) = (mp_uint_t*)end;
#if MICROPY_ENABLE_FINALISER
assert((byte*)gc_pool_start >= gc_finaliser_table_start + gc_finaliser_table_byte_len);
assert((byte*)MP_STATE_MEM(gc_pool_start) >= MP_STATE_MEM(gc_finaliser_table_start) + gc_finaliser_table_byte_len);
#endif
// clear ATBs
memset(gc_alloc_table_start, 0, gc_alloc_table_byte_len);
memset(MP_STATE_MEM(gc_alloc_table_start), 0, MP_STATE_MEM(gc_alloc_table_byte_len));
#if MICROPY_ENABLE_FINALISER
// clear FTBs
memset(gc_finaliser_table_start, 0, gc_finaliser_table_byte_len);
memset(MP_STATE_MEM(gc_finaliser_table_start), 0, gc_finaliser_table_byte_len);
#endif
// set last free ATB index to start of heap
gc_last_free_atb_index = 0;
MP_STATE_MEM(gc_last_free_atb_index) = 0;
// unlock the GC
gc_lock_depth = 0;
MP_STATE_MEM(gc_lock_depth) = 0;
// allow auto collection
gc_auto_collect_enabled = 1;
MP_STATE_MEM(gc_auto_collect_enabled) = 1;
DEBUG_printf("GC layout:\n");
DEBUG_printf(" alloc table at %p, length " UINT_FMT " bytes, " UINT_FMT " blocks\n", gc_alloc_table_start, gc_alloc_table_byte_len, gc_alloc_table_byte_len * BLOCKS_PER_ATB);
DEBUG_printf(" alloc table at %p, length " UINT_FMT " bytes, " UINT_FMT " blocks\n", MP_STATE_MEM(gc_alloc_table_start), MP_STATE_MEM(gc_alloc_table_byte_len), MP_STATE_MEM(gc_alloc_table_byte_len) * BLOCKS_PER_ATB);
#if MICROPY_ENABLE_FINALISER
DEBUG_printf(" finaliser table at %p, length " UINT_FMT " bytes, " UINT_FMT " blocks\n", gc_finaliser_table_start, gc_finaliser_table_byte_len, gc_finaliser_table_byte_len * BLOCKS_PER_FTB);
DEBUG_printf(" finaliser table at %p, length " UINT_FMT " bytes, " UINT_FMT " blocks\n", MP_STATE_MEM(gc_finaliser_table_start), gc_finaliser_table_byte_len, gc_finaliser_table_byte_len * BLOCKS_PER_FTB);
#endif
DEBUG_printf(" pool at %p, length " UINT_FMT " bytes, " UINT_FMT " blocks\n", gc_pool_start, gc_pool_block_len * BYTES_PER_BLOCK, gc_pool_block_len);
DEBUG_printf(" pool at %p, length " UINT_FMT " bytes, " UINT_FMT " blocks\n", MP_STATE_MEM(gc_pool_start), gc_pool_block_len * BYTES_PER_BLOCK, gc_pool_block_len);
}
void gc_lock(void) {
gc_lock_depth++;
MP_STATE_MEM(gc_lock_depth)++;
}
void gc_unlock(void) {
gc_lock_depth--;
MP_STATE_MEM(gc_lock_depth)--;
}
bool gc_is_locked(void) {
return gc_lock_depth != 0;
return MP_STATE_MEM(gc_lock_depth) != 0;
}
#define VERIFY_PTR(ptr) ( \
(ptr & (BYTES_PER_BLOCK - 1)) == 0 /* must be aligned on a block */ \
&& ptr >= (mp_uint_t)gc_pool_start /* must be above start of pool */ \
&& ptr < (mp_uint_t)gc_pool_end /* must be below end of pool */ \
&& ptr >= (mp_uint_t)MP_STATE_MEM(gc_pool_start) /* must be above start of pool */ \
&& ptr < (mp_uint_t)MP_STATE_MEM(gc_pool_end) /* must be below end of pool */ \
)
#define VERIFY_MARK_AND_PUSH(ptr) \
@ -195,19 +177,19 @@ bool gc_is_locked(void) {
if (ATB_GET_KIND(_block) == AT_HEAD) { \
/* an unmarked head, mark it, and push it on gc stack */ \
ATB_HEAD_TO_MARK(_block); \
if (gc_sp < &gc_stack[MICROPY_ALLOC_GC_STACK_SIZE]) { \
*gc_sp++ = _block; \
if (MP_STATE_MEM(gc_sp) < &MP_STATE_MEM(gc_stack)[MICROPY_ALLOC_GC_STACK_SIZE]) { \
*MP_STATE_MEM(gc_sp)++ = _block; \
} else { \
gc_stack_overflow = 1; \
MP_STATE_MEM(gc_stack_overflow) = 1; \
} \
} \
} \
} while (0)
STATIC void gc_drain_stack(void) {
while (gc_sp > gc_stack) {
while (MP_STATE_MEM(gc_sp) > MP_STATE_MEM(gc_stack)) {
// pop the next block off the stack
mp_uint_t block = *--gc_sp;
mp_uint_t block = *--MP_STATE_MEM(gc_sp);
// work out number of consecutive blocks in the chain starting with this one
mp_uint_t n_blocks = 0;
@ -225,15 +207,15 @@ STATIC void gc_drain_stack(void) {
}
STATIC void gc_deal_with_stack_overflow(void) {
while (gc_stack_overflow) {
gc_stack_overflow = 0;
gc_sp = gc_stack;
while (MP_STATE_MEM(gc_stack_overflow)) {
MP_STATE_MEM(gc_stack_overflow) = 0;
MP_STATE_MEM(gc_sp) = MP_STATE_MEM(gc_stack);
// scan entire memory looking for blocks which have been marked but not their children
for (mp_uint_t block = 0; block < gc_alloc_table_byte_len * BLOCKS_PER_ATB; block++) {
for (mp_uint_t block = 0; block < MP_STATE_MEM(gc_alloc_table_byte_len) * BLOCKS_PER_ATB; block++) {
// trace (again) if mark bit set
if (ATB_GET_KIND(block) == AT_MARK) {
*gc_sp++ = block;
*MP_STATE_MEM(gc_sp)++ = block;
gc_drain_stack();
}
}
@ -250,7 +232,7 @@ STATIC void gc_sweep(void) {
#endif
// free unmarked heads and their tails
int free_tail = 0;
for (mp_uint_t block = 0; block < gc_alloc_table_byte_len * BLOCKS_PER_ATB; block++) {
for (mp_uint_t block = 0; block < MP_STATE_MEM(gc_alloc_table_byte_len) * BLOCKS_PER_ATB; block++) {
switch (ATB_GET_KIND(block)) {
case AT_HEAD:
#if MICROPY_ENABLE_FINALISER
@ -292,8 +274,13 @@ STATIC void gc_sweep(void) {
void gc_collect_start(void) {
gc_lock();
gc_stack_overflow = 0;
gc_sp = gc_stack;
MP_STATE_MEM(gc_stack_overflow) = 0;
MP_STATE_MEM(gc_sp) = MP_STATE_MEM(gc_stack);
// Trace root pointers. This relies on the root pointers being organised
// correctly in the mp_state_ctx structure. We scan nlr_top, dict_locals,
// dict_globals, then the root pointer section of mp_state_vm.
void **ptrs = (void**)(void*)&mp_state_ctx;
gc_collect_root(ptrs, offsetof(mp_state_ctx_t, vm.stack_top) / sizeof(mp_uint_t));
}
void gc_collect_root(void **ptrs, mp_uint_t len) {
@ -307,18 +294,18 @@ void gc_collect_root(void **ptrs, mp_uint_t len) {
void gc_collect_end(void) {
gc_deal_with_stack_overflow();
gc_sweep();
gc_last_free_atb_index = 0;
MP_STATE_MEM(gc_last_free_atb_index) = 0;
gc_unlock();
}
void gc_info(gc_info_t *info) {
info->total = (gc_pool_end - gc_pool_start) * sizeof(mp_uint_t);
info->total = (MP_STATE_MEM(gc_pool_end) - MP_STATE_MEM(gc_pool_start)) * sizeof(mp_uint_t);
info->used = 0;
info->free = 0;
info->num_1block = 0;
info->num_2block = 0;
info->max_block = 0;
for (mp_uint_t block = 0, len = 0; block < gc_alloc_table_byte_len * BLOCKS_PER_ATB; block++) {
for (mp_uint_t block = 0, len = 0; block < MP_STATE_MEM(gc_alloc_table_byte_len) * BLOCKS_PER_ATB; block++) {
mp_uint_t kind = ATB_GET_KIND(block);
if (kind == AT_FREE || kind == AT_HEAD) {
if (len == 1) {
@ -361,7 +348,7 @@ void *gc_alloc(mp_uint_t n_bytes, bool has_finaliser) {
DEBUG_printf("gc_alloc(" UINT_FMT " bytes -> " UINT_FMT " blocks)\n", n_bytes, n_blocks);
// check if GC is locked
if (gc_lock_depth > 0) {
if (MP_STATE_MEM(gc_lock_depth) > 0) {
return NULL;
}
@ -374,12 +361,12 @@ void *gc_alloc(mp_uint_t n_bytes, bool has_finaliser) {
mp_uint_t end_block;
mp_uint_t start_block;
mp_uint_t n_free = 0;
int collected = !gc_auto_collect_enabled;
int collected = !MP_STATE_MEM(gc_auto_collect_enabled);
for (;;) {
// look for a run of n_blocks available blocks
for (i = gc_last_free_atb_index; i < gc_alloc_table_byte_len; i++) {
byte a = gc_alloc_table_start[i];
for (i = MP_STATE_MEM(gc_last_free_atb_index); i < MP_STATE_MEM(gc_alloc_table_byte_len); i++) {
byte a = MP_STATE_MEM(gc_alloc_table_start)[i];
if (ATB_0_IS_FREE(a)) { if (++n_free >= n_blocks) { i = i * BLOCKS_PER_ATB + 0; goto found; } } else { n_free = 0; }
if (ATB_1_IS_FREE(a)) { if (++n_free >= n_blocks) { i = i * BLOCKS_PER_ATB + 1; goto found; } } else { n_free = 0; }
if (ATB_2_IS_FREE(a)) { if (++n_free >= n_blocks) { i = i * BLOCKS_PER_ATB + 2; goto found; } } else { n_free = 0; }
@ -407,7 +394,7 @@ found:
// before this one. Also, whenever we free or shink a block we must check
// if this index needs adjusting (see gc_realloc and gc_free).
if (n_free == 1) {
gc_last_free_atb_index = (i + 1) / BLOCKS_PER_ATB;
MP_STATE_MEM(gc_last_free_atb_index) = (i + 1) / BLOCKS_PER_ATB;
}
// mark first block as used head
@ -420,7 +407,7 @@ found:
}
// get pointer to first block
void *ret_ptr = (void*)(gc_pool_start + start_block * WORDS_PER_BLOCK);
void *ret_ptr = (void*)(MP_STATE_MEM(gc_pool_start) + start_block * WORDS_PER_BLOCK);
DEBUG_printf("gc_alloc(%p)\n", ret_ptr);
// zero out the additional bytes of the newly allocated blocks
@ -458,7 +445,7 @@ void *gc_alloc_with_finaliser(mp_uint_t n_bytes) {
// force the freeing of a piece of memory
void gc_free(void *ptr_in) {
if (gc_lock_depth > 0) {
if (MP_STATE_MEM(gc_lock_depth) > 0) {
// TODO how to deal with this error?
return;
}
@ -470,8 +457,8 @@ void gc_free(void *ptr_in) {
mp_uint_t block = BLOCK_FROM_PTR(ptr);
if (ATB_GET_KIND(block) == AT_HEAD) {
// set the last_free pointer to this block if it's earlier in the heap
if (block / BLOCKS_PER_ATB < gc_last_free_atb_index) {
gc_last_free_atb_index = block / BLOCKS_PER_ATB;
if (block / BLOCKS_PER_ATB < MP_STATE_MEM(gc_last_free_atb_index)) {
MP_STATE_MEM(gc_last_free_atb_index) = block / BLOCKS_PER_ATB;
}
// free head and all of its tail blocks
@ -540,7 +527,7 @@ void *gc_realloc(void *ptr, mp_uint_t n_bytes) {
#else // Alternative gc_realloc impl
void *gc_realloc(void *ptr_in, mp_uint_t n_bytes) {
if (gc_lock_depth > 0) {
if (MP_STATE_MEM(gc_lock_depth) > 0) {
return NULL;
}
@ -581,7 +568,7 @@ void *gc_realloc(void *ptr_in, mp_uint_t n_bytes) {
// efficiently shrink it (see below for shrinking code).
mp_uint_t n_free = 0;
mp_uint_t n_blocks = 1; // counting HEAD block
mp_uint_t max_block = gc_alloc_table_byte_len * BLOCKS_PER_ATB;
mp_uint_t max_block = MP_STATE_MEM(gc_alloc_table_byte_len) * BLOCKS_PER_ATB;
for (mp_uint_t bl = block + n_blocks; bl < max_block; bl++) {
byte block_type = ATB_GET_KIND(bl);
if (block_type == AT_TAIL) {
@ -612,8 +599,8 @@ void *gc_realloc(void *ptr_in, mp_uint_t n_bytes) {
}
// set the last_free pointer to end of this block if it's earlier in the heap
if ((block + new_blocks) / BLOCKS_PER_ATB < gc_last_free_atb_index) {
gc_last_free_atb_index = (block + new_blocks) / BLOCKS_PER_ATB;
if ((block + new_blocks) / BLOCKS_PER_ATB < MP_STATE_MEM(gc_last_free_atb_index)) {
MP_STATE_MEM(gc_last_free_atb_index) = (block + new_blocks) / BLOCKS_PER_ATB;
}
#if EXTENSIVE_HEAP_PROFILING
@ -675,22 +662,22 @@ void gc_dump_alloc_table(void) {
#if !EXTENSIVE_HEAP_PROFILING
// When comparing heap output we don't want to print the starting
// pointer of the heap because it changes from run to run.
printf("GC memory layout; from %p:", gc_pool_start);
printf("GC memory layout; from %p:", MP_STATE_MEM(gc_pool_start));
#endif
for (mp_uint_t bl = 0; bl < gc_alloc_table_byte_len * BLOCKS_PER_ATB; bl++) {
for (mp_uint_t bl = 0; bl < MP_STATE_MEM(gc_alloc_table_byte_len) * BLOCKS_PER_ATB; bl++) {
if (bl % DUMP_BYTES_PER_LINE == 0) {
// a new line of blocks
{
// check if this line contains only free blocks
mp_uint_t bl2 = bl;
while (bl2 < gc_alloc_table_byte_len * BLOCKS_PER_ATB && ATB_GET_KIND(bl2) == AT_FREE) {
while (bl2 < MP_STATE_MEM(gc_alloc_table_byte_len) * BLOCKS_PER_ATB && ATB_GET_KIND(bl2) == AT_FREE) {
bl2++;
}
if (bl2 - bl >= 2 * DUMP_BYTES_PER_LINE) {
// there are at least 2 lines containing only free blocks, so abbreviate their printing
printf("\n (" UINT_FMT " lines all free)", (bl2 - bl) / DUMP_BYTES_PER_LINE);
bl = bl2 & (~(DUMP_BYTES_PER_LINE - 1));
if (bl >= gc_alloc_table_byte_len * BLOCKS_PER_ATB) {
if (bl >= MP_STATE_MEM(gc_alloc_table_byte_len) * BLOCKS_PER_ATB) {
// got to end of heap
break;
}
@ -736,7 +723,7 @@ void gc_dump_alloc_table(void) {
*/
/* this prints the uPy object type of the head block */
case AT_HEAD: {
mp_uint_t *ptr = gc_pool_start + bl * WORDS_PER_BLOCK;
mp_uint_t *ptr = MP_STATE_MEM(gc_pool_start) + bl * WORDS_PER_BLOCK;
if (*ptr == (mp_uint_t)&mp_type_tuple) { c = 'T'; }
else if (*ptr == (mp_uint_t)&mp_type_list) { c = 'L'; }
else if (*ptr == (mp_uint_t)&mp_type_dict) { c = 'D'; }

5
py/gc.h

@ -39,11 +39,6 @@ void gc_lock(void);
void gc_unlock(void);
bool gc_is_locked(void);
// This variable controls auto garbage collection. If set to 0 then the
// GC won't automatically run when gc_alloc can't find enough blocks. But
// you can still allocate/free memory and also explicitly call gc_collect.
extern uint16_t gc_auto_collect_enabled;
// A given port must implement gc_collect by using the other collect functions.
void gc_collect(void);
void gc_collect_start(void);

5
py/lexer.c

@ -27,6 +27,7 @@
#include <stdio.h>
#include <assert.h>
#include "py/mpstate.h"
#include "py/lexer.h"
#define TAB_SIZE (8)
@ -34,8 +35,6 @@
// TODO seems that CPython allows NULL byte in the input stream
// don't know if that's intentional or not, but we don't allow it
mp_uint_t mp_optimise_value;
// TODO replace with a call to a standard function
STATIC bool str_strn_equal(const char *str, const char *strn, mp_uint_t len) {
mp_uint_t i = 0;
@ -662,7 +661,7 @@ STATIC void mp_lexer_next_token_into(mp_lexer_t *lex, bool first_token) {
if (str_strn_equal(tok_kw[i], lex->vstr.buf, lex->vstr.len)) {
if (i == MP_ARRAY_SIZE(tok_kw) - 1) {
// tok_kw[MP_ARRAY_SIZE(tok_kw) - 1] == "__debug__"
lex->tok_kind = (mp_optimise_value == 0 ? MP_TOKEN_KW_TRUE : MP_TOKEN_KW_FALSE);
lex->tok_kind = (MP_STATE_VM(mp_optimise_value) == 0 ? MP_TOKEN_KW_TRUE : MP_TOKEN_KW_FALSE);
} else {
lex->tok_kind = MP_TOKEN_KW_FALSE + i;
}

2
py/lexer.h

@ -192,6 +192,4 @@ typedef enum {
mp_import_stat_t mp_import_stat(const char *path);
mp_lexer_t *mp_lexer_new_from_file(const char *filename);
extern mp_uint_t mp_optimise_value;
#endif // __MICROPY_INCLUDED_PY_LEXER_H__

35
py/malloc.c

@ -30,6 +30,7 @@
#include "py/mpconfig.h"
#include "py/misc.h"
#include "py/mpstate.h"
#if 0 // print debugging info
#define DEBUG_printf DEBUG_printf
@ -38,11 +39,7 @@
#endif
#if MICROPY_MEM_STATS
STATIC size_t total_bytes_allocated = 0;
STATIC size_t current_bytes_allocated = 0;
STATIC size_t peak_bytes_allocated = 0;
#define UPDATE_PEAK() { if (current_bytes_allocated > peak_bytes_allocated) peak_bytes_allocated = current_bytes_allocated; }
#define UPDATE_PEAK() { if (MP_STATE_MEM(current_bytes_allocated) > MP_STATE_MEM(peak_bytes_allocated)) MP_STATE_MEM(peak_bytes_allocated) = MP_STATE_MEM(current_bytes_allocated); }
#endif
#if MICROPY_ENABLE_GC
@ -68,8 +65,8 @@ void *m_malloc(size_t num_bytes) {
return m_malloc_fail(num_bytes);
}
#if MICROPY_MEM_STATS
total_bytes_allocated += num_bytes;
current_bytes_allocated += num_bytes;
MP_STATE_MEM(total_bytes_allocated) += num_bytes;
MP_STATE_MEM(current_bytes_allocated) += num_bytes;
UPDATE_PEAK();
#endif
DEBUG_printf("malloc %d : %p\n", num_bytes, ptr);
@ -79,8 +76,8 @@ void *m_malloc(size_t num_bytes) {
void *m_malloc_maybe(size_t num_bytes) {
void *ptr = malloc(num_bytes);
#if MICROPY_MEM_STATS
total_bytes_allocated += num_bytes;
current_bytes_allocated += num_bytes;
MP_STATE_MEM(total_bytes_allocated) += num_bytes;
MP_STATE_MEM(current_bytes_allocated) += num_bytes;
UPDATE_PEAK();
#endif
DEBUG_printf("malloc %d : %p\n", num_bytes, ptr);
@ -94,8 +91,8 @@ void *m_malloc_with_finaliser(size_t num_bytes) {
return m_malloc_fail(num_bytes);
}
#if MICROPY_MEM_STATS
total_bytes_allocated += num_bytes;
current_bytes_allocated += num_bytes;
MP_STATE_MEM(total_bytes_allocated) += num_bytes;
MP_STATE_MEM(current_bytes_allocated) += num_bytes;
UPDATE_PEAK();
#endif
DEBUG_printf("malloc %d : %p\n", num_bytes, ptr);
@ -124,8 +121,8 @@ void *m_realloc(void *ptr, size_t old_num_bytes, size_t new_num_bytes) {
// allocated total. If we process only positive increments,
// we'll count 3K.
size_t diff = new_num_bytes - old_num_bytes;
total_bytes_allocated += diff;
current_bytes_allocated += diff;
MP_STATE_MEM(total_bytes_allocated) += diff;
MP_STATE_MEM(current_bytes_allocated) += diff;
UPDATE_PEAK();
#endif
DEBUG_printf("realloc %p, %d, %d : %p\n", ptr, old_num_bytes, new_num_bytes, new_ptr);
@ -143,8 +140,8 @@ void *m_realloc_maybe(void *ptr, size_t old_num_bytes, size_t new_num_bytes) {
// Also, don't count failed reallocs.
if (!(new_ptr == NULL && new_num_bytes != 0)) {
size_t diff = new_num_bytes - old_num_bytes;
total_bytes_allocated += diff;
current_bytes_allocated += diff;
MP_STATE_MEM(total_bytes_allocated) += diff;
MP_STATE_MEM(current_bytes_allocated) += diff;
UPDATE_PEAK();
}
#endif
@ -155,21 +152,21 @@ void *m_realloc_maybe(void *ptr, size_t old_num_bytes, size_t new_num_bytes) {
void m_free(void *ptr, size_t num_bytes) {
free(ptr);
#if MICROPY_MEM_STATS
current_bytes_allocated -= num_bytes;
MP_STATE_MEM(current_bytes_allocated) -= num_bytes;
#endif
DEBUG_printf("free %p, %d\n", ptr, num_bytes);
}
#if MICROPY_MEM_STATS
size_t m_get_total_bytes_allocated(void) {
return total_bytes_allocated;
return MP_STATE_MEM(total_bytes_allocated);
}
size_t m_get_current_bytes_allocated(void) {
return current_bytes_allocated;
return MP_STATE_MEM(current_bytes_allocated);
}
size_t m_get_peak_bytes_allocated(void) {
return peak_bytes_allocated;
return MP_STATE_MEM(peak_bytes_allocated);
}
#endif

7
py/modgc.c

@ -24,6 +24,7 @@
* THE SOFTWARE.
*/
#include "py/mpstate.h"
#include "py/obj.h"
#include "py/gc.h"
@ -48,7 +49,7 @@ MP_DEFINE_CONST_FUN_OBJ_0(gc_collect_obj, py_gc_collect);
/// \function disable()
/// Disable the garbage collector.
STATIC mp_obj_t gc_disable(void) {
gc_auto_collect_enabled = 0;
MP_STATE_MEM(gc_auto_collect_enabled) = 0;
return mp_const_none;
}
MP_DEFINE_CONST_FUN_OBJ_0(gc_disable_obj, gc_disable);
@ -56,13 +57,13 @@ MP_DEFINE_CONST_FUN_OBJ_0(gc_disable_obj, gc_disable);
/// \function enable()
/// Enable the garbage collector.
STATIC mp_obj_t gc_enable(void) {
gc_auto_collect_enabled = 1;
MP_STATE_MEM(gc_auto_collect_enabled) = 1;
return mp_const_none;
}
MP_DEFINE_CONST_FUN_OBJ_0(gc_enable_obj, gc_enable);
STATIC mp_obj_t gc_isenabled(void) {
return MP_BOOL(gc_auto_collect_enabled);
return MP_BOOL(MP_STATE_MEM(gc_auto_collect_enabled));
}
MP_DEFINE_CONST_FUN_OBJ_0(gc_isenabled_obj, gc_isenabled);

5
py/mpconfig.h

@ -488,6 +488,11 @@ typedef double mp_float_t;
#define MICROPY_PORT_CONSTANTS
#endif
// Any root pointers for GC scanning - see mpstate.c
#ifndef MICROPY_PORT_ROOT_POINTERS
#define MICROPY_PORT_ROOT_POINTERS
#endif
/*****************************************************************************/
/* Miscellaneous settings */

29
py/mpstate.c

@ -0,0 +1,29 @@
/*
* This file is part of the Micro Python project, http://micropython.org/
*
* The MIT License (MIT)
*
* Copyright (c) 2014 Damien P. George
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
#include "py/mpstate.h"
mp_state_ctx_t mp_state_ctx;

155
py/mpstate.h

@ -0,0 +1,155 @@
/*
* This file is part of the Micro Python project, http://micropython.org/
*
* The MIT License (MIT)
*
* Copyright (c) 2014 Damien P. George
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
#ifndef __MICROPY_INCLUDED_PY_MPSTATE_H__
#define __MICROPY_INCLUDED_PY_MPSTATE_H__
#include <stdint.h>
#include "py/mpconfig.h"
#include "py/misc.h"
#include "py/nlr.h"
#include "py/obj.h"
#include "py/objexcept.h"
// This file contains structures defining the state of the Micro Python
// memory system, runtime and virtual machine. The state is a global
// variable, but in the future it is hoped that the state can become local.
// This structure hold information about the memory allocation system.
typedef struct _mp_state_mem_t {
#if MICROPY_MEM_STATS
size_t total_bytes_allocated;
size_t current_bytes_allocated;
size_t peak_bytes_allocated;
#endif
byte *gc_alloc_table_start;
mp_uint_t gc_alloc_table_byte_len;
#if MICROPY_ENABLE_FINALISER
byte *gc_finaliser_table_start;
#endif
mp_uint_t *gc_pool_start;
mp_uint_t *gc_pool_end;
int gc_stack_overflow;
mp_uint_t gc_stack[MICROPY_ALLOC_GC_STACK_SIZE];
mp_uint_t *gc_sp;
uint16_t gc_lock_depth;
// This variable controls auto garbage collection. If set to 0 then the
// GC won't automatically run when gc_alloc can't find enough blocks. But
// you can still allocate/free memory and also explicitly call gc_collect.
uint16_t gc_auto_collect_enabled;
mp_uint_t gc_last_free_atb_index;
} mp_state_mem_t;
// This structure hold runtime and VM information. It includes a section
// which contains root pointers that must be scanned by the GC.
typedef struct _mp_state_vm_t {
////////////////////////////////////////////////////////////
// START ROOT POINTER SECTION
// everything that needs GC scanning must go here
// this must start at the start of this structure
//
// Note: nlr asm code has the offset of this hard-coded
nlr_buf_t *nlr_top;
qstr_pool_t *last_pool;
// non-heap memory for creating an exception if we can't allocate RAM
mp_obj_exception_t mp_emergency_exception_obj;
// memory for exception arguments if we can't allocate RAM
#if MICROPY_ENABLE_EMERGENCY_EXCEPTION_BUF
#if MICROPY_EMERGENCY_EXCEPTION_BUF_SIZE > 0
// statically allocated buf
byte mp_emergency_exception_buf[MICROPY_EMERGENCY_EXCEPTION_BUF_SIZE];
#else
// dynamically allocated buf
byte *mp_emergency_exception_buf;
#endif
#endif
// map with loaded modules
// TODO: expose as sys.modules
mp_map_t mp_loaded_modules_map;
// pending exception object (MP_OBJ_NULL if not pending)
mp_obj_t mp_pending_exception;
// dictionary for the __main__ module
mp_obj_dict_t dict_main;
// dictionary for overridden builtins
#if MICROPY_CAN_OVERRIDE_BUILTINS
mp_obj_dict_t *mp_module_builtins_override_dict;
#endif
// include any root pointers defined by a port
MICROPY_PORT_ROOT_POINTERS
//
// END ROOT POINTER SECTION
////////////////////////////////////////////////////////////
// Stack top at the start of program
// Note: this entry is used to locate the end of the root pointer section.
char *stack_top;
#if MICROPY_STACK_CHECK
mp_uint_t stack_limit;
#endif
mp_uint_t mp_optimise_value;
// size of the emergency exception buf, if it's dynamically allocated
#if MICROPY_ENABLE_EMERGENCY_EXCEPTION_BUF && MICROPY_EMERGENCY_EXCEPTION_BUF_SIZE == 0
mp_int_t mp_emergency_exception_buf_size;
#endif
} mp_state_vm_t;
// This structure combines the above 2 structures, and adds the local
// and global dicts.
// Note: if this structure changes then revisit all nlr asm code since they
// have the offset of nlr_top hard-coded.
typedef struct _mp_state_ctx_t {
// these must come first for root pointer scanning in GC to work
mp_obj_dict_t *dict_locals;
mp_obj_dict_t *dict_globals;
// this must come next for root pointer scanning in GC to work
mp_state_vm_t vm;
mp_state_mem_t mem;
} mp_state_ctx_t;
extern mp_state_ctx_t mp_state_ctx;
#define MP_STATE_CTX(x) (mp_state_ctx.x)
#define MP_STATE_VM(x) (mp_state_ctx.vm.x)
#define MP_STATE_MEM(x) (mp_state_ctx.mem.x)
#endif // __MICROPY_INCLUDED_PY_MPSTATE_H__

8
py/nlr.h

@ -64,14 +64,14 @@ struct _nlr_buf_t {
#endif
};
extern nlr_buf_t *nlr_top;
#if MICROPY_NLR_SETJMP
#include "py/mpstate.h"
NORETURN void nlr_setjmp_jump(void *val);
// nlr_push() must be defined as a macro, because "The stack context will be
// invalidated if the function which called setjmp() returns."
#define nlr_push(buf) ((buf)->prev = nlr_top, nlr_top = (buf), setjmp((buf)->jmpbuf))
#define nlr_pop() { nlr_top = nlr_top->prev; }
#define nlr_push(buf) ((buf)->prev = MP_STATE_VM(nlr_top), MP_STATE_VM(nlr_top) = (buf), setjmp((buf)->jmpbuf))
#define nlr_pop() { MP_STATE_VM(nlr_top) = MP_STATE_VM(nlr_top)->prev; }
#define nlr_jump(val) nlr_setjmp_jump(val)
#else
unsigned int nlr_push(nlr_buf_t *);

7
py/nlrsetjmp.c

@ -26,14 +26,11 @@
#include "py/nlr.h"
// this global variable is used for all nlr implementations
nlr_buf_t *nlr_top;
#if MICROPY_NLR_SETJMP
void nlr_setjmp_jump(void *val) {
nlr_buf_t *buf = nlr_top;
nlr_top = buf->prev;
nlr_buf_t *buf = MP_STATE_VM(nlr_top);
MP_STATE_VM(nlr_top) = buf->prev;
buf->ret_val = val;
longjmp(buf->jmpbuf, 1);
}

5
py/nlrthumb.S

@ -32,6 +32,9 @@
// For reference, arm/thumb callee save regs are:
// r4-r11, r13=sp
// the offset of nlr_top within mp_state_ctx_t
#define NLR_TOP_OFFSET (2 * 4)
.syntax unified
/*.cpu cortex-m4*/
/*.thumb*/
@ -68,7 +71,7 @@ nlr_push:
bx lr @ return
.align 2
nlr_top_addr:
.word nlr_top
.word mp_state_ctx + NLR_TOP_OFFSET
.size nlr_push, .-nlr_push
/**************************************/

29
py/nlrx64.S

@ -32,6 +32,11 @@
// For reference, x86-64 callee save regs are:
// rbx, rbp, rsp, r12, r13, r14, r15
// the offset of nlr_top within mp_state_ctx_t
#define NLR_TOP_OFFSET (2 * 8)
#define NLR_TOP (mp_state_ctx + NLR_TOP_OFFSET)
.file "nlr.s"
.text
@ -64,9 +69,9 @@ _nlr_push:
movq %r13, 56(%rdi) # store %r13 into nlr_buf
movq %r14, 64(%rdi) # store %r14 into nlr_buf
movq %r15, 72(%rdi) # store %r15 into nlr_buf
movq nlr_top(%rip), %rax # get last nlr_buf
movq NLR_TOP(%rip), %rax # get last nlr_buf
movq %rax, (%rdi) # store it
movq %rdi, nlr_top(%rip) # stor new nlr_buf (to make linked list)
movq %rdi, NLR_TOP(%rip) # stor new nlr_buf (to make linked list)
xorq %rax, %rax # return 0, normal return
ret # return
#if !(defined(__APPLE__) && defined(__MACH__))
@ -84,9 +89,9 @@ nlr_pop:
.globl _nlr_pop
_nlr_pop:
#endif
movq nlr_top(%rip), %rax # get nlr_top into %rax
movq NLR_TOP(%rip), %rax # get nlr_top into %rax
movq (%rax), %rax # load prev nlr_buf
movq %rax, nlr_top(%rip) # store prev nlr_buf (to unlink list)
movq %rax, NLR_TOP(%rip) # store prev nlr_buf (to unlink list)
ret # return
#if !(defined(__APPLE__) && defined(__MACH__))
.size nlr_pop, .-nlr_pop
@ -104,12 +109,12 @@ nlr_jump:
_nlr_jump:
#endif
movq %rdi, %rax # put return value in %rax
movq nlr_top(%rip), %rdi # get nlr_top into %rdi
movq NLR_TOP(%rip), %rdi # get nlr_top into %rdi
test %rdi, %rdi # check for nlr_top being NULL
je .fail # fail if nlr_top is NULL
movq %rax, 8(%rdi) # store return value
movq (%rdi), %rax # load prev nlr_buf
movq %rax, nlr_top(%rip) # store prev nlr_buf (to unlink list)
movq %rax, NLR_TOP(%rip) # store prev nlr_buf (to unlink list)
movq 72(%rdi), %r15 # load saved %r15
movq 64(%rdi), %r14 # load saved %r14
movq 56(%rdi), %r13 # load saved %r13
@ -155,9 +160,9 @@ nlr_push:
movq %r15, 72(%rcx) # store %r15 into
movq %rdi, 80(%rcx) # store %rdr into
movq %rsi, 88(%rcx) # store %rsi into
movq nlr_top(%rip), %rax # get last nlr_buf
movq NLR_TOP(%rip), %rax # get last nlr_buf
movq %rax, (%rcx) # store it
movq %rcx, nlr_top(%rip) # stor new nlr_buf (to make linked list)
movq %rcx, NLR_TOP(%rip) # stor new nlr_buf (to make linked list)
xorq %rax, %rax # return 0, normal return
ret # return
@ -166,9 +171,9 @@ nlr_push:
.globl nlr_pop
nlr_pop:
movq nlr_top(%rip), %rax # get nlr_top into %rax
movq NLR_TOP(%rip), %rax # get nlr_top into %rax
movq (%rax), %rax # load prev nlr_buf
movq %rax, nlr_top(%rip) # store prev nlr_buf (to unlink list)
movq %rax, NLR_TOP(%rip) # store prev nlr_buf (to unlink list)
ret # return
/**************************************/
@ -177,12 +182,12 @@ nlr_pop:
.globl nlr_jump
nlr_jump:
movq %rcx, %rax # put return value in %rax
movq nlr_top(%rip), %rcx # get nlr_top into %rcx
movq NLR_TOP(%rip), %rcx # get nlr_top into %rcx
test %rcx, %rcx # check for nlr_top being NULL
je .fail # fail if nlr_top is NULL
movq %rax, 8(%rcx) # store return value
movq (%rcx), %rax # load prev nlr_buf
movq %rax, nlr_top(%rip) # store prev nlr_buf (to unlink list)
movq %rax, NLR_TOP(%rip) # store prev nlr_buf (to unlink list)
movq 72(%rcx), %r15 # load saved %r15
movq 64(%rcx), %r14 # load saved %r14
movq 56(%rcx), %r13 # load saved %r13

7
py/nlrx86.S

@ -32,10 +32,13 @@
// For reference, x86 callee save regs are:
// ebx, esi, edi, ebp, esp, eip
// the offset of nlr_top within mp_state_ctx_t
#define NLR_TOP_OFFSET (2 * 4)
#ifdef _WIN32
#define NLR_TOP _nlr_top
#define NLR_TOP (_mp_state_ctx + NLR_TOP_OFFSET)
#else
#define NLR_TOP nlr_top
#define NLR_TOP (mp_state_ctx + NLR_TOP_OFFSET)
#endif
.file "nlr.s"

11
py/nlrxtensa.S

@ -34,11 +34,16 @@
a3-a7 = rest of args
*/
// the offset of nlr_top within mp_state_ctx_t
#define NLR_TOP_OFFSET (2 * 4)
#define NLR_TOP (mp_state_ctx + NLR_TOP_OFFSET)
.file "nlr.s"
.text
.literal_position
.literal .LC0, nlr_top
.literal .LC0, NLR_TOP
.align 4
.global nlr_push
.type nlr_push, @function
@ -64,7 +69,7 @@ nlr_push:
.size nlr_push, .-nlr_push
.literal_position
.literal .LC1, nlr_top
.literal .LC1, NLR_TOP
.align 4
.global nlr_pop
.type nlr_pop, @function
@ -77,7 +82,7 @@ nlr_pop:
.size nlr_pop, .-nlr_pop
.literal_position
.literal .LC2, nlr_top
.literal .LC2, NLR_TOP
.align 4
.global nlr_jump
.type nlr_jump, @function

34
py/objexcept.c

@ -29,6 +29,7 @@
#include <assert.h>
#include <stdio.h>
#include "py/mpstate.h"
#include "py/nlr.h"
#include "py/objlist.h"
#include "py/objstr.h"
@ -36,23 +37,13 @@
#include "py/objtype.h"
#include "py/gc.h"
typedef struct _mp_obj_exception_t {
mp_obj_base_t base;
mp_obj_t traceback; // a list object, holding (file,line,block) as numbers (not Python objects); a hack for now
mp_obj_tuple_t *args;
} mp_obj_exception_t;
// Instance of MemoryError exception - needed by mp_malloc_fail
const mp_obj_exception_t mp_const_MemoryError_obj = {{&mp_type_MemoryError}, MP_OBJ_NULL, mp_const_empty_tuple};
// Local non-heap memory for allocating an exception when we run out of RAM
STATIC mp_obj_exception_t mp_emergency_exception_obj;
// Optionally allocated buffer for storing the first argument of an exception
// allocated when the heap is locked.
#if MICROPY_ENABLE_EMERGENCY_EXCEPTION_BUF
# if MICROPY_EMERGENCY_EXCEPTION_BUF_SIZE > 0
STATIC byte mp_emergency_exception_buf[MICROPY_EMERGENCY_EXCEPTION_BUF_SIZE];
#define mp_emergency_exception_buf_size MICROPY_EMERGENCY_EXCEPTION_BUF_SIZE
void mp_init_emergency_exception_buf(void) {
@ -62,12 +53,11 @@ void mp_init_emergency_exception_buf(void) {
}
#else
STATIC mp_int_t mp_emergency_exception_buf_size = 0;
STATIC byte *mp_emergency_exception_buf = NULL;
#define mp_emergency_exception_buf_size MP_STATE_VM(mp_emergency_exception_buf_size)
void mp_init_emergency_exception_buf(void) {
mp_emergency_exception_buf_size = 0;
mp_emergency_exception_buf = NULL;
MP_STATE_VM(mp_emergency_exception_buf) = NULL;
}
mp_obj_t mp_alloc_emergency_exception_buf(mp_obj_t size_in) {
@ -78,13 +68,13 @@ mp_obj_t mp_alloc_emergency_exception_buf(mp_obj_t size_in) {
}
int old_size = mp_emergency_exception_buf_size;
void *old_buf = mp_emergency_exception_buf;
void *old_buf = MP_STATE_VM(mp_emergency_exception_buf);
// Update the 2 variables atomically so that an interrupt can't occur
// between the assignments.
mp_uint_t atomic_state = MICROPY_BEGIN_ATOMIC_SECTION();
mp_emergency_exception_buf_size = size;
mp_emergency_exception_buf = buf;
MP_STATE_VM(mp_emergency_exception_buf) = buf;
MICROPY_END_ATOMIC_SECTION(atomic_state);
if (old_buf != NULL) {
@ -134,7 +124,7 @@ mp_obj_t mp_obj_exception_make_new(mp_obj_t type_in, mp_uint_t n_args, mp_uint_t
mp_obj_exception_t *o = m_new_obj_var_maybe(mp_obj_exception_t, mp_obj_t, 0);
if (o == NULL) {
// Couldn't allocate heap memory; use local data instead.
o = &mp_emergency_exception_obj;
o = &MP_STATE_VM(mp_emergency_exception_obj);
// We can't store any args.
n_args = 0;
o->args = mp_const_empty_tuple;
@ -308,7 +298,7 @@ mp_obj_t mp_obj_new_exception_msg_varg(const mp_obj_type_t *exc_type, const char
if (o == NULL) {
// Couldn't allocate heap memory; use local data instead.
// Unfortunately, we won't be able to format the string...
o = &mp_emergency_exception_obj;
o = &MP_STATE_VM(mp_emergency_exception_obj);
o->base.type = exc_type;
o->traceback = MP_OBJ_NULL;
o->args = mp_const_empty_tuple;
@ -318,7 +308,7 @@ mp_obj_t mp_obj_new_exception_msg_varg(const mp_obj_type_t *exc_type, const char
// of length 1, which has a string object and the string data.
if (mp_emergency_exception_buf_size > (sizeof(mp_obj_tuple_t) + sizeof(mp_obj_str_t) + sizeof(mp_obj_t))) {
mp_obj_tuple_t *tuple = (mp_obj_tuple_t *)mp_emergency_exception_buf;
mp_obj_tuple_t *tuple = (mp_obj_tuple_t *)MP_STATE_VM(mp_emergency_exception_buf);
mp_obj_str_t *str = (mp_obj_str_t *)&tuple->items[1];
tuple->base.type = &mp_type_tuple;
@ -326,7 +316,7 @@ mp_obj_t mp_obj_new_exception_msg_varg(const mp_obj_type_t *exc_type, const char
tuple->items[0] = str;
byte *str_data = (byte *)&str[1];
uint max_len = mp_emergency_exception_buf + mp_emergency_exception_buf_size
uint max_len = MP_STATE_VM(mp_emergency_exception_buf) + mp_emergency_exception_buf_size
- str_data;
va_list ap;
@ -340,16 +330,16 @@ mp_obj_t mp_obj_new_exception_msg_varg(const mp_obj_type_t *exc_type, const char
o->args = tuple;
uint offset = &str_data[str->len] - mp_emergency_exception_buf;
uint offset = &str_data[str->len] - MP_STATE_VM(mp_emergency_exception_buf);
offset += sizeof(void *) - 1;
offset &= ~(sizeof(void *) - 1);
if ((mp_emergency_exception_buf_size - offset) > (sizeof(mp_obj_list_t) + sizeof(mp_obj_t) * 3)) {
// We have room to store some traceback.
mp_obj_list_t *list = (mp_obj_list_t *)((byte *)mp_emergency_exception_buf + offset);
mp_obj_list_t *list = (mp_obj_list_t *)((byte *)MP_STATE_VM(mp_emergency_exception_buf) + offset);
list->base.type = &mp_type_list;
list->items = (mp_obj_t)&list[1];
list->alloc = (mp_emergency_exception_buf + mp_emergency_exception_buf_size - (byte *)list->items) / sizeof(list->items[0]);
list->alloc = (MP_STATE_VM(mp_emergency_exception_buf) + mp_emergency_exception_buf_size - (byte *)list->items) / sizeof(list->items[0]);
list->len = 0;
o->traceback = list;

38
py/objexcept.h

@ -0,0 +1,38 @@
/*
* This file is part of the Micro Python project, http://micropython.org/
*
* The MIT License (MIT)
*
* Copyright (c) 2014 Damien P. George
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
#ifndef __MICROPY_INCLUDED_PY_OBJEXCEPT_H__
#define __MICROPY_INCLUDED_PY_OBJEXCEPT_H__
#include "py/obj.h"
#include "py/objtuple.h"
typedef struct _mp_obj_exception_t {
mp_obj_base_t base;
mp_obj_t traceback; // a list object, holding (file,line,block) as numbers (not Python objects); a hack for now
mp_obj_tuple_t *args;
} mp_obj_exception_t;
#endif // __MICROPY_INCLUDED_PY_OBJEXCEPT_H__

19
py/objmodule.c

@ -27,13 +27,12 @@
#include <stdlib.h>
#include <assert.h>
#include "py/mpstate.h"
#include "py/nlr.h"
#include "py/objmodule.h"
#include "py/runtime.h"
#include "py/builtin.h"
STATIC mp_map_t mp_loaded_modules_map; // TODO: expose as sys.modules
STATIC void module_print(void (*print)(void *env, const char *fmt, ...), void *env, mp_obj_t self_in, mp_print_kind_t kind) {
mp_obj_module_t *self = self_in;
const char *name = qstr_str(self->name);
@ -65,10 +64,10 @@ STATIC bool module_store_attr(mp_obj_t self_in, qstr attr, mp_obj_t value) {
if (dict->map.table_is_fixed_array) {
#if MICROPY_CAN_OVERRIDE_BUILTINS
if (dict == &mp_module_builtins_globals) {
if (mp_module_builtins_override_dict == NULL) {
mp_module_builtins_override_dict = mp_obj_new_dict(1);
if (MP_STATE_VM(mp_module_builtins_override_dict) == NULL) {
MP_STATE_VM(mp_module_builtins_override_dict) = mp_obj_new_dict(1);
}
dict = mp_module_builtins_override_dict;
dict = MP_STATE_VM(mp_module_builtins_override_dict);
} else
#endif
{
@ -96,7 +95,7 @@ const mp_obj_type_t mp_type_module = {
};
mp_obj_t mp_obj_new_module(qstr module_name) {
mp_map_elem_t *el = mp_map_lookup(&mp_loaded_modules_map, MP_OBJ_NEW_QSTR(module_name), MP_MAP_LOOKUP_ADD_IF_NOT_FOUND);
mp_map_elem_t *el = mp_map_lookup(&MP_STATE_VM(mp_loaded_modules_map), MP_OBJ_NEW_QSTR(module_name), MP_MAP_LOOKUP_ADD_IF_NOT_FOUND);
// We could error out if module already exists, but let C extensions
// add new members to existing modules.
if (el->value != MP_OBJ_NULL) {
@ -192,17 +191,17 @@ STATIC const mp_map_elem_t mp_builtin_module_table[] = {
STATIC MP_DEFINE_CONST_MAP(mp_builtin_module_map, mp_builtin_module_table);
void mp_module_init(void) {
mp_map_init(&mp_loaded_modules_map, 3);
mp_map_init(&MP_STATE_VM(mp_loaded_modules_map), 3);
}
void mp_module_deinit(void) {
mp_map_deinit(&mp_loaded_modules_map);
mp_map_deinit(&MP_STATE_VM(mp_loaded_modules_map));
}
// returns MP_OBJ_NULL if not found
mp_obj_t mp_module_get(qstr module_name) {
// lookup module
mp_map_elem_t *el = mp_map_lookup(&mp_loaded_modules_map, MP_OBJ_NEW_QSTR(module_name), MP_MAP_LOOKUP);
mp_map_elem_t *el = mp_map_lookup(&MP_STATE_VM(mp_loaded_modules_map), MP_OBJ_NEW_QSTR(module_name), MP_MAP_LOOKUP);
if (el == NULL) {
// module not found, look for builtin module names
@ -217,5 +216,5 @@ mp_obj_t mp_module_get(qstr module_name) {
}
void mp_module_register(qstr qstr, mp_obj_t module) {
mp_map_lookup(&mp_loaded_modules_map, MP_OBJ_NEW_QSTR(qstr), MP_MAP_LOOKUP_ADD_IF_NOT_FOUND)->value = module;
mp_map_lookup(&MP_STATE_VM(mp_loaded_modules_map), MP_OBJ_NEW_QSTR(qstr), MP_MAP_LOOKUP_ADD_IF_NOT_FOUND)->value = module;
}

1
py/py.mk

@ -12,6 +12,7 @@ CSUPEROPT = -O3
# py object files
PY_O_BASENAME = \
mpstate.o \
nlrx86.o \
nlrx64.o \
nlrthumb.o \

37
py/qstr.c

@ -27,6 +27,7 @@
#include <assert.h>
#include <string.h>
#include "py/mpstate.h"
#include "py/qstr.h"
#include "py/gc.h"
@ -68,14 +69,6 @@ mp_uint_t qstr_compute_hash(const byte *data, mp_uint_t len) {
return hash;
}
typedef struct _qstr_pool_t {
struct _qstr_pool_t *prev;
mp_uint_t total_prev_len;
mp_uint_t alloc;
mp_uint_t len;
const byte *qstrs[];
} qstr_pool_t;
STATIC const qstr_pool_t const_pool = {
NULL, // no previous pool
0, // no previous pool
@ -90,15 +83,13 @@ STATIC const qstr_pool_t const_pool = {
},
};
STATIC qstr_pool_t *last_pool;
void qstr_init(void) {
last_pool = (qstr_pool_t*)&const_pool; // we won't modify the const_pool since it has no allocated room left
MP_STATE_VM(last_pool) = (qstr_pool_t*)&const_pool; // we won't modify the const_pool since it has no allocated room left
}
STATIC const byte *find_qstr(qstr q) {
// search pool for this qstr
for (qstr_pool_t *pool = last_pool; pool != NULL; pool = pool->prev) {
for (qstr_pool_t *pool = MP_STATE_VM(last_pool); pool != NULL; pool = pool->prev) {
if (q >= pool->total_prev_len) {
return pool->qstrs[q - pool->total_prev_len];
}
@ -112,21 +103,21 @@ STATIC qstr qstr_add(const byte *q_ptr) {
DEBUG_printf("QSTR: add hash=%d len=%d data=%.*s\n", Q_GET_HASH(q_ptr), Q_GET_LENGTH(q_ptr), Q_GET_LENGTH(q_ptr), Q_GET_DATA(q_ptr));
// make sure we have room in the pool for a new qstr
if (last_pool->len >= last_pool->alloc) {
qstr_pool_t *pool = m_new_obj_var(qstr_pool_t, const char*, last_pool->alloc * 2);
pool->prev = last_pool;
pool->total_prev_len = last_pool->total_prev_len + last_pool->len;
pool->alloc = last_pool->alloc * 2;
if (MP_STATE_VM(last_pool)->len >= MP_STATE_VM(last_pool)->alloc) {
qstr_pool_t *pool = m_new_obj_var(qstr_pool_t, const char*, MP_STATE_VM(last_pool)->alloc * 2);
pool->prev = MP_STATE_VM(last_pool);
pool->total_prev_len = MP_STATE_VM(last_pool)->total_prev_len + MP_STATE_VM(last_pool)->len;
pool->alloc = MP_STATE_VM(last_pool)->alloc * 2;
pool->len = 0;
last_pool = pool;
DEBUG_printf("QSTR: allocate new pool of size %d\n", last_pool->alloc);
MP_STATE_VM(last_pool) = pool;
DEBUG_printf("QSTR: allocate new pool of size %d\n", MP_STATE_VM(last_pool)->alloc);
}
// add the new qstr
last_pool->qstrs[last_pool->len++] = q_ptr;
MP_STATE_VM(last_pool)->qstrs[MP_STATE_VM(last_pool)->len++] = q_ptr;
// return id for the newly-added qstr
return last_pool->total_prev_len + last_pool->len - 1;
return MP_STATE_VM(last_pool)->total_prev_len + MP_STATE_VM(last_pool)->len - 1;
}
qstr qstr_find_strn(const char *str, mp_uint_t str_len) {
@ -134,7 +125,7 @@ qstr qstr_find_strn(const char *str, mp_uint_t str_len) {
mp_uint_t str_hash = qstr_compute_hash((const byte*)str, str_len);
// search pools for the data
for (qstr_pool_t *pool = last_pool; pool != NULL; pool = pool->prev) {
for (qstr_pool_t *pool = MP_STATE_VM(last_pool); pool != NULL; pool = pool->prev) {
for (const byte **q = pool->qstrs, **q_top = pool->qstrs + pool->len; q < q_top; q++) {
if (Q_GET_HASH(*q) == str_hash && Q_GET_LENGTH(*q) == str_len && memcmp(Q_GET_DATA(*q), str, str_len) == 0) {
return pool->total_prev_len + (q - pool->qstrs);
@ -215,7 +206,7 @@ void qstr_pool_info(mp_uint_t *n_pool, mp_uint_t *n_qstr, mp_uint_t *n_str_data_
*n_qstr = 0;
*n_str_data_bytes = 0;
*n_total_bytes = 0;
for (qstr_pool_t *pool = last_pool; pool != NULL && pool != &const_pool; pool = pool->prev) {
for (qstr_pool_t *pool = MP_STATE_VM(last_pool); pool != NULL && pool != &const_pool; pool = pool->prev) {
*n_pool += 1;
*n_qstr += pool->len;
for (const byte **q = pool->qstrs, **q_top = pool->qstrs + pool->len; q < q_top; q++) {

8
py/qstr.h

@ -46,6 +46,14 @@ enum {
typedef mp_uint_t qstr;
typedef struct _qstr_pool_t {
struct _qstr_pool_t *prev;
mp_uint_t total_prev_len;
mp_uint_t alloc;
mp_uint_t len;
const byte *qstrs[];
} qstr_pool_t;
#define QSTR_FROM_STR_STATIC(s) (qstr_from_strn((s), strlen(s)))
void qstr_init(void);

69
py/runtime.c

@ -28,6 +28,7 @@
#include <string.h>
#include <assert.h>
#include "py/mpstate.h"
#include "py/nlr.h"
#include "py/parsehelper.h"
#include "py/parsenum.h"
@ -52,32 +53,18 @@
#define DEBUG_OP_printf(...) (void)0
#endif
// pending exception object (MP_OBJ_NULL if not pending)
mp_obj_t mp_pending_exception;
// locals and globals need to be pointers because they can be the same in outer module scope
STATIC mp_obj_dict_t *dict_locals;
STATIC mp_obj_dict_t *dict_globals;
// dictionary for the __main__ module
STATIC mp_obj_dict_t dict_main;
const mp_obj_module_t mp_module___main__ = {
.base = { &mp_type_module },
.name = MP_QSTR___main__,
.globals = (mp_obj_dict_t*)&dict_main,
.globals = (mp_obj_dict_t*)&MP_STATE_VM(dict_main),
};
#if MICROPY_CAN_OVERRIDE_BUILTINS
mp_obj_dict_t *mp_module_builtins_override_dict;
#endif
void mp_init(void) {
qstr_init();
mp_stack_ctrl_init();
// no pending exceptions to start with
mp_pending_exception = MP_OBJ_NULL;
MP_STATE_VM(mp_pending_exception) = MP_OBJ_NULL;
#if MICROPY_ENABLE_EMERGENCY_EXCEPTION_BUF
mp_init_emergency_exception_buf();
@ -89,21 +76,21 @@ void mp_init(void) {
#endif
// optimization disabled by default
mp_optimise_value = 0;
MP_STATE_VM(mp_optimise_value) = 0;
// init global module stuff
mp_module_init();
// initialise the __main__ module
mp_obj_dict_init(&dict_main, 1);
mp_obj_dict_store(&dict_main, MP_OBJ_NEW_QSTR(MP_QSTR___name__), MP_OBJ_NEW_QSTR(MP_QSTR___main__));
mp_obj_dict_init(&MP_STATE_VM(dict_main), 1);
mp_obj_dict_store(&MP_STATE_VM(dict_main), MP_OBJ_NEW_QSTR(MP_QSTR___name__), MP_OBJ_NEW_QSTR(MP_QSTR___main__));
// locals = globals for outer module (see Objects/frameobject.c/PyFrame_New())
dict_locals = dict_globals = &dict_main;
MP_STATE_CTX(dict_locals) = MP_STATE_CTX(dict_globals) = &MP_STATE_VM(dict_main);
#if MICROPY_CAN_OVERRIDE_BUILTINS
// start with no extensions to builtins
mp_module_builtins_override_dict = NULL;
MP_STATE_VM(mp_module_builtins_override_dict) = NULL;
#endif
}
@ -147,8 +134,8 @@ mp_obj_t mp_load_name(qstr qstr) {
// logic: search locals, globals, builtins
DEBUG_OP_printf("load name %s\n", qstr_str(qstr));
// If we're at the outer scope (locals == globals), dispatch to load_global right away
if (dict_locals != dict_globals) {
mp_map_elem_t *elem = mp_map_lookup(&dict_locals->map, MP_OBJ_NEW_QSTR(qstr), MP_MAP_LOOKUP);
if (MP_STATE_CTX(dict_locals) != MP_STATE_CTX(dict_globals)) {
mp_map_elem_t *elem = mp_map_lookup(&MP_STATE_CTX(dict_locals)->map, MP_OBJ_NEW_QSTR(qstr), MP_MAP_LOOKUP);
if (elem != NULL) {
return elem->value;
}
@ -159,12 +146,12 @@ mp_obj_t mp_load_name(qstr qstr) {
mp_obj_t mp_load_global(qstr qstr) {
// logic: search globals, builtins
DEBUG_OP_printf("load global %s\n", qstr_str(qstr));
mp_map_elem_t *elem = mp_map_lookup(&dict_globals->map, MP_OBJ_NEW_QSTR(qstr), MP_MAP_LOOKUP);
mp_map_elem_t *elem = mp_map_lookup(&MP_STATE_CTX(dict_globals)->map, MP_OBJ_NEW_QSTR(qstr), MP_MAP_LOOKUP);
if (elem == NULL) {
#if MICROPY_CAN_OVERRIDE_BUILTINS
if (mp_module_builtins_override_dict != NULL) {
if (MP_STATE_VM(mp_module_builtins_override_dict) != NULL) {
// lookup in additional dynamic table of builtins first
elem = mp_map_lookup(&mp_module_builtins_override_dict->map, MP_OBJ_NEW_QSTR(qstr), MP_MAP_LOOKUP);
elem = mp_map_lookup(&MP_STATE_VM(mp_module_builtins_override_dict)->map, MP_OBJ_NEW_QSTR(qstr), MP_MAP_LOOKUP);
if (elem != NULL) {
return elem->value;
}
@ -187,9 +174,9 @@ mp_obj_t mp_load_global(qstr qstr) {
mp_obj_t mp_load_build_class(void) {
DEBUG_OP_printf("load_build_class\n");
#if MICROPY_CAN_OVERRIDE_BUILTINS
if (mp_module_builtins_override_dict != NULL) {
if (MP_STATE_VM(mp_module_builtins_override_dict) != NULL) {
// lookup in additional dynamic table of builtins first
mp_map_elem_t *elem = mp_map_lookup(&mp_module_builtins_override_dict->map, MP_OBJ_NEW_QSTR(MP_QSTR___build_class__), MP_MAP_LOOKUP);
mp_map_elem_t *elem = mp_map_lookup(&MP_STATE_VM(mp_module_builtins_override_dict)->map, MP_OBJ_NEW_QSTR(MP_QSTR___build_class__), MP_MAP_LOOKUP);
if (elem != NULL) {
return elem->value;
}
@ -200,24 +187,24 @@ mp_obj_t mp_load_build_class(void) {
void mp_store_name(qstr qstr, mp_obj_t obj) {
DEBUG_OP_printf("store name %s <- %p\n", qstr_str(qstr), obj);
mp_obj_dict_store(dict_locals, MP_OBJ_NEW_QSTR(qstr), obj);
mp_obj_dict_store(MP_STATE_CTX(dict_locals), MP_OBJ_NEW_QSTR(qstr), obj);
}
void mp_delete_name(qstr qstr) {
DEBUG_OP_printf("delete name %s\n", qstr_str(qstr));
// TODO convert KeyError to NameError if qstr not found
mp_obj_dict_delete(dict_locals, MP_OBJ_NEW_QSTR(qstr));
mp_obj_dict_delete(MP_STATE_CTX(dict_locals), MP_OBJ_NEW_QSTR(qstr));
}
void mp_store_global(qstr qstr, mp_obj_t obj) {
DEBUG_OP_printf("store global %s <- %p\n", qstr_str(qstr), obj);
mp_obj_dict_store(dict_globals, MP_OBJ_NEW_QSTR(qstr), obj);
mp_obj_dict_store(MP_STATE_CTX(dict_globals), MP_OBJ_NEW_QSTR(qstr), obj);
}
void mp_delete_global(qstr qstr) {
DEBUG_OP_printf("delete global %s\n", qstr_str(qstr));
// TODO convert KeyError to NameError if qstr not found
mp_obj_dict_delete(dict_globals, MP_OBJ_NEW_QSTR(qstr));
mp_obj_dict_delete(MP_STATE_CTX(dict_globals), MP_OBJ_NEW_QSTR(qstr));
}
mp_obj_t mp_unary_op(mp_uint_t op, mp_obj_t arg) {
@ -1241,24 +1228,6 @@ void mp_import_all(mp_obj_t module) {
}
}
mp_obj_dict_t *mp_locals_get(void) {
return dict_locals;
}
void mp_locals_set(mp_obj_dict_t *d) {
DEBUG_OP_printf("mp_locals_set(%p)\n", d);
dict_locals = d;
}
mp_obj_dict_t *mp_globals_get(void) {
return dict_globals;
}
void mp_globals_set(mp_obj_dict_t *d) {
DEBUG_OP_printf("mp_globals_set(%p)\n", d);
dict_globals = d;
}
// this is implemented in this file so it can optimise access to locals/globals
mp_obj_t mp_parse_compile_execute(mp_lexer_t *lex, mp_parse_input_kind_t parse_input_kind, mp_obj_dict_t *globals, mp_obj_dict_t *locals) {
// parse the string

10
py/runtime.h

@ -26,6 +26,7 @@
#ifndef __MICROPY_INCLUDED_PY_RUNTIME_H__
#define __MICROPY_INCLUDED_PY_RUNTIME_H__
#include "py/mpstate.h"
#include "py/obj.h"
typedef enum {
@ -64,10 +65,10 @@ void mp_arg_parse_all_kw_array(mp_uint_t n_pos, mp_uint_t n_kw, const mp_obj_t *
NORETURN void mp_arg_error_terse_mismatch(void);
NORETURN void mp_arg_error_unimpl_kw(void);
mp_obj_dict_t *mp_locals_get(void);
void mp_locals_set(mp_obj_dict_t *d);
mp_obj_dict_t *mp_globals_get(void);
void mp_globals_set(mp_obj_dict_t *d);
static inline mp_obj_dict_t *mp_locals_get(void) { return MP_STATE_CTX(dict_locals); }
static inline void mp_locals_set(mp_obj_dict_t *d) { MP_STATE_CTX(dict_locals) = d; }
static inline mp_obj_dict_t *mp_globals_get(void) { return MP_STATE_CTX(dict_globals); }
static inline void mp_globals_set(mp_obj_dict_t *d) { MP_STATE_CTX(dict_globals) = d; }
mp_obj_t mp_load_name(qstr qstr);
mp_obj_t mp_load_global(qstr qstr);
@ -120,7 +121,6 @@ mp_obj_t mp_convert_native_to_obj(mp_uint_t val, mp_uint_t type);
mp_obj_t mp_native_call_function_n_kw(mp_obj_t fun_in, mp_uint_t n_args_kw, const mp_obj_t *args);
NORETURN void mp_native_raise(mp_obj_t o);
extern mp_obj_t mp_pending_exception;
extern struct _mp_obj_list_t mp_sys_path_obj;
extern struct _mp_obj_list_t mp_sys_argv_obj;
#define mp_sys_path ((mp_obj_t)&mp_sys_path_obj)

14
py/stackctrl.c

@ -24,34 +24,30 @@
* THE SOFTWARE.
*/
#include "py/mpstate.h"
#include "py/nlr.h"
#include "py/obj.h"
#include "py/stackctrl.h"
// Stack top at the start of program
char *stack_top;
void mp_stack_ctrl_init(void) {
volatile int stack_dummy;
stack_top = (char*)&stack_dummy;
MP_STATE_VM(stack_top) = (char*)&stack_dummy;
}
mp_uint_t mp_stack_usage(void) {
// Assumes descending stack
volatile int stack_dummy;
return stack_top - (char*)&stack_dummy;
return MP_STATE_VM(stack_top) - (char*)&stack_dummy;
}
#if MICROPY_STACK_CHECK
static mp_uint_t stack_limit = 10240;
void mp_stack_set_limit(mp_uint_t limit) {
stack_limit = limit;
MP_STATE_VM(stack_limit) = limit;
}
void mp_stack_check(void) {
if (mp_stack_usage() >= stack_limit) {
if (mp_stack_usage() >= MP_STATE_VM(stack_limit)) {
nlr_raise(mp_obj_new_exception_msg(&mp_type_RuntimeError, "maximum recursion depth exceeded"));
}
}

7
py/vm.c

@ -29,6 +29,7 @@
#include <string.h>
#include <assert.h>
#include "py/mpstate.h"
#include "py/nlr.h"
#include "py/emitglue.h"
#include "py/runtime.h"
@ -991,10 +992,10 @@ yield:
#endif
pending_exception_check:
if (mp_pending_exception != MP_OBJ_NULL) {
if (MP_STATE_VM(mp_pending_exception) != MP_OBJ_NULL) {
MARK_EXC_IP_SELECTIVE();
mp_obj_t obj = mp_pending_exception;
mp_pending_exception = MP_OBJ_NULL;
mp_obj_t obj = MP_STATE_VM(mp_pending_exception);
MP_STATE_VM(mp_pending_exception) = MP_OBJ_NULL;
RAISE(obj);
}

7
stmhal/pendsv.c

@ -27,6 +27,7 @@
#include <stdlib.h>
#include <stm32f4xx_hal.h>
#include "py/mpstate.h"
#include "py/runtime.h"
#include "pendsv.h"
@ -46,10 +47,10 @@ void pendsv_init(void) {
// the given exception object using nlr_jump in the context of the top-level
// thread.
void pendsv_nlr_jump(void *o) {
if (mp_pending_exception == MP_OBJ_NULL) {
mp_pending_exception = o;
if (MP_STATE_VM(mp_pending_exception) == MP_OBJ_NULL) {
MP_STATE_VM(mp_pending_exception) = o;
} else {
mp_pending_exception = MP_OBJ_NULL;
MP_STATE_VM(mp_pending_exception) = MP_OBJ_NULL;
pendsv_object = o;
SCB->ICSR = SCB_ICSR_PENDSVSET_Msk;
}

3
teensy/main.c

@ -7,6 +7,7 @@
#include "py/parse.h"
#include "py/lexer.h"
#include "py/runtime.h"
#include "py/stackctrl.h"
#include "py/gc.h"
#include "gccollect.h"
#include "pyexec.h"
@ -255,6 +256,8 @@ int main(void) {
#define SCB_CCR_STKALIGN (1 << 9)
SCB_CCR |= SCB_CCR_STKALIGN;
mp_stack_set_limit(10240);
pinMode(LED_BUILTIN, OUTPUT);
led_init();

15
unix/gccollect.c

@ -26,6 +26,7 @@
#include <stdio.h>
#include "py/mpstate.h"
#include "py/gc.h"
#if MICROPY_ENABLE_GC
@ -127,23 +128,11 @@ void gc_collect(void) {
//gc_dump_info();
gc_collect_start();
// this traces the .bss section
#if defined( __CYGWIN__ )
#define BSS_START __bss_start__
#elif defined( _MSC_VER ) || defined( __MINGW32__ )
#define BSS_START *bss_start
#define _end *bss_end
#else
#define BSS_START __bss_start
#endif
extern char BSS_START, _end;
//printf(".bss: %p-%p\n", &BSS_START, &_end);
gc_collect_root((void**)&BSS_START, ((mp_uint_t)&_end - (mp_uint_t)&BSS_START) / sizeof(mp_uint_t));
regs_t regs;
gc_helper_get_regs(regs);
// GC stack (and regs because we captured them)
void **regs_ptr = (void**)(void*)&regs;
gc_collect_root(regs_ptr, ((mp_uint_t)stack_top - (mp_uint_t)&regs) / sizeof(mp_uint_t));
gc_collect_root(regs_ptr, ((mp_uint_t)MP_STATE_VM(stack_top) - (mp_uint_t)&regs) / sizeof(mp_uint_t));
gc_collect_end();
//printf("-----\n");

15
unix/main.c

@ -35,6 +35,7 @@
#include <sys/types.h>
#include <errno.h>
#include "py/mpstate.h"
#include "py/nlr.h"
#include "py/compile.h"
#include "py/parsehelper.h"
@ -61,12 +62,10 @@ long heap_size = 128*1024 * (sizeof(mp_uint_t) / 4);
#ifndef _WIN32
#include <signal.h>
STATIC mp_obj_t keyboard_interrupt_obj;
STATIC void sighandler(int signum) {
if (signum == SIGINT) {
mp_obj_exception_clear_traceback(keyboard_interrupt_obj);
mp_pending_exception = keyboard_interrupt_obj;
mp_obj_exception_clear_traceback(MP_STATE_VM(keyboard_interrupt_obj));
MP_STATE_VM(mp_pending_exception) = MP_STATE_VM(keyboard_interrupt_obj);
// disable our handler so next we really die
struct sigaction sa;
sa.sa_handler = SIG_DFL;
@ -336,7 +335,7 @@ int main(int argc, char **argv) {
#ifndef _WIN32
// create keyboard interrupt object
keyboard_interrupt_obj = mp_obj_new_exception(&mp_type_KeyboardInterrupt);
MP_STATE_VM(keyboard_interrupt_obj) = mp_obj_new_exception(&mp_type_KeyboardInterrupt);
#endif
char *home = getenv("HOME");
@ -448,10 +447,10 @@ int main(int argc, char **argv) {
mp_verbose_flag++;
} else if (strncmp(argv[a], "-O", 2) == 0) {
if (isdigit(argv[a][2])) {
mp_optimise_value = argv[a][2] & 0xf;
MP_STATE_VM(mp_optimise_value) = argv[a][2] & 0xf;
} else {
mp_optimise_value = 0;
for (char *p = argv[a] + 1; *p && *p == 'O'; p++, mp_optimise_value++);
MP_STATE_VM(mp_optimise_value) = 0;
for (char *p = argv[a] + 1; *p && *p == 'O'; p++, MP_STATE_VM(mp_optimise_value)++);
}
} else {
return usage(argv);

3
unix/mpconfigport.h

@ -153,6 +153,9 @@ extern const struct _mp_obj_fun_builtin_t mp_builtin_open_obj;
{ MP_OBJ_NEW_QSTR(MP_QSTR_input), (mp_obj_t)&mp_builtin_input_obj }, \
{ MP_OBJ_NEW_QSTR(MP_QSTR_open), (mp_obj_t)&mp_builtin_open_obj },
#define MICROPY_PORT_ROOT_POINTERS \
mp_obj_t keyboard_interrupt_obj;
// We need to provide a declaration/definition of alloca()
#ifdef __FreeBSD__
#include <stdlib.h>

Loading…
Cancel
Save