|
|
|
/*
|
|
|
|
* This file is part of the MicroPython project, http://micropython.org/
|
|
|
|
*
|
|
|
|
* The MIT License (MIT)
|
|
|
|
*
|
|
|
|
* Copyright (c) 2014 Damien P. George
|
|
|
|
*
|
|
|
|
* Permission is hereby granted, free of charge, to any person obtaining a copy
|
|
|
|
* of this software and associated documentation files (the "Software"), to deal
|
|
|
|
* in the Software without restriction, including without limitation the rights
|
|
|
|
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
|
|
|
* copies of the Software, and to permit persons to whom the Software is
|
|
|
|
* furnished to do so, subject to the following conditions:
|
|
|
|
*
|
|
|
|
* The above copyright notice and this permission notice shall be included in
|
|
|
|
* all copies or substantial portions of the Software.
|
|
|
|
*
|
|
|
|
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
|
|
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
|
|
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
|
|
|
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
|
|
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
|
|
|
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
|
|
|
* THE SOFTWARE.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <stdarg.h>
|
|
|
|
#include <stdio.h>
|
|
|
|
#include <string.h>
|
|
|
|
#include <assert.h>
|
|
|
|
|
|
|
|
#include "py/runtime.h"
|
|
|
|
#include "py/smallint.h"
|
|
|
|
#include "py/nativeglue.h"
|
|
|
|
#include "py/gc.h"
|
|
|
|
|
|
|
|
#if MICROPY_DEBUG_VERBOSE // print debugging info
|
|
|
|
#define DEBUG_printf DEBUG_printf
|
|
|
|
#else // don't print debugging info
|
|
|
|
#define DEBUG_printf(...) (void)0
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#if MICROPY_EMIT_NATIVE
|
|
|
|
|
|
|
|
int mp_native_type_from_qstr(qstr qst) {
|
|
|
|
switch (qst) {
|
|
|
|
case MP_QSTR_object:
|
|
|
|
return MP_NATIVE_TYPE_OBJ;
|
|
|
|
case MP_QSTR_bool:
|
|
|
|
return MP_NATIVE_TYPE_BOOL;
|
|
|
|
case MP_QSTR_int:
|
|
|
|
return MP_NATIVE_TYPE_INT;
|
|
|
|
case MP_QSTR_uint:
|
|
|
|
return MP_NATIVE_TYPE_UINT;
|
|
|
|
case MP_QSTR_ptr:
|
|
|
|
return MP_NATIVE_TYPE_PTR;
|
|
|
|
case MP_QSTR_ptr8:
|
|
|
|
return MP_NATIVE_TYPE_PTR8;
|
|
|
|
case MP_QSTR_ptr16:
|
|
|
|
return MP_NATIVE_TYPE_PTR16;
|
|
|
|
case MP_QSTR_ptr32:
|
|
|
|
return MP_NATIVE_TYPE_PTR32;
|
|
|
|
default:
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// convert a MicroPython object to a valid native value based on type
|
|
|
|
mp_uint_t mp_native_from_obj(mp_obj_t obj, mp_uint_t type) {
|
|
|
|
DEBUG_printf("mp_native_from_obj(%p, " UINT_FMT ")\n", obj, type);
|
|
|
|
switch (type & 0xf) {
|
|
|
|
case MP_NATIVE_TYPE_OBJ:
|
|
|
|
return (mp_uint_t)obj;
|
|
|
|
case MP_NATIVE_TYPE_BOOL:
|
|
|
|
return mp_obj_is_true(obj);
|
|
|
|
case MP_NATIVE_TYPE_INT:
|
|
|
|
case MP_NATIVE_TYPE_UINT:
|
|
|
|
return mp_obj_get_int_truncated(obj);
|
|
|
|
default: { // cast obj to a pointer
|
|
|
|
mp_buffer_info_t bufinfo;
|
|
|
|
if (mp_get_buffer(obj, &bufinfo, MP_BUFFER_READ)) {
|
|
|
|
return (mp_uint_t)bufinfo.buf;
|
|
|
|
} else {
|
|
|
|
// assume obj is an integer that represents an address
|
|
|
|
return mp_obj_get_int_truncated(obj);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#if MICROPY_EMIT_MACHINE_CODE
|
|
|
|
|
|
|
|
// convert a native value to a MicroPython object based on type
|
|
|
|
mp_obj_t mp_native_to_obj(mp_uint_t val, mp_uint_t type) {
|
|
|
|
DEBUG_printf("mp_native_to_obj(" UINT_FMT ", " UINT_FMT ")\n", val, type);
|
|
|
|
switch (type & 0xf) {
|
|
|
|
case MP_NATIVE_TYPE_OBJ:
|
|
|
|
return (mp_obj_t)val;
|
|
|
|
case MP_NATIVE_TYPE_BOOL:
|
|
|
|
return mp_obj_new_bool(val);
|
|
|
|
case MP_NATIVE_TYPE_INT:
|
|
|
|
return mp_obj_new_int(val);
|
|
|
|
case MP_NATIVE_TYPE_UINT:
|
|
|
|
return mp_obj_new_int_from_uint(val);
|
py/dynruntime.h: Implement MP_OBJ_NEW_QSTR.
Because mpy_ld.py doesn't know the target object representation, it emits
instances of `MP_OBJ_NEW_QSTR(MP_QSTR_Foo)` as const string objects, rather
than qstrs. However this doesn't work for map keys (e.g. for a locals dict)
because the map has all_keys_are_qstrs flag is set (and also auto-complete
requires the map keys to be qstrs).
Instead, emit them as regular qstrs, and make a functioning MP_OBJ_NEW_QSTR
function available (via `native_to_obj`, also used for e.g. making
integers).
Remove the code from mpy_ld.py to emit qstrs as constant strings, but leave
behind the scaffold to emit constant objects in case we want to do use this
in the future.
Strictly this should be a .mpy sub-version bump, even though the function
table isn't changing, it does lead to a change in behavior for a new .mpy
running against old MicroPython. `mp_native_to_obj` will incorrectly return
the qstr value directly as an `mp_obj_t`, leading to unexpected results.
But given that it's broken at the moment, it seems unlikely that anyone is
relying on this, so it's not work the other downsides of a sub-version bump
(i.e. breaking pure-Python modules that use @native). The opposite case of
running an old .mpy on new MicroPython is unchanged, and remains broken in
exactly the same way.
This work was funded through GitHub Sponsors.
Signed-off-by: Jim Mussared <jim.mussared@gmail.com>
1 year ago
|
|
|
case MP_NATIVE_TYPE_QSTR:
|
|
|
|
return MP_OBJ_NEW_QSTR(val);
|
|
|
|
default: // a pointer
|
|
|
|
// we return just the value of the pointer as an integer
|
|
|
|
return mp_obj_new_int_from_uint(val);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#if MICROPY_EMIT_NATIVE && !MICROPY_DYNAMIC_COMPILER
|
|
|
|
|
|
|
|
#if !MICROPY_PY_BUILTINS_SET
|
|
|
|
mp_obj_t mp_obj_new_set(size_t n_args, mp_obj_t *items) {
|
|
|
|
(void)n_args;
|
|
|
|
(void)items;
|
|
|
|
mp_raise_msg(&mp_type_RuntimeError, MP_ERROR_TEXT("set unsupported"));
|
|
|
|
}
|
|
|
|
|
|
|
|
void mp_obj_set_store(mp_obj_t self_in, mp_obj_t item) {
|
|
|
|
(void)self_in;
|
|
|
|
(void)item;
|
|
|
|
mp_raise_msg(&mp_type_RuntimeError, MP_ERROR_TEXT("set unsupported"));
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#if !MICROPY_PY_BUILTINS_SLICE
|
|
|
|
mp_obj_t mp_obj_new_slice(mp_obj_t ostart, mp_obj_t ostop, mp_obj_t ostep) {
|
|
|
|
(void)ostart;
|
|
|
|
(void)ostop;
|
|
|
|
(void)ostep;
|
|
|
|
mp_raise_msg(&mp_type_RuntimeError, MP_ERROR_TEXT("slice unsupported"));
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
|
|
|
STATIC mp_obj_dict_t *mp_native_swap_globals(mp_obj_dict_t *new_globals) {
|
py: Fix native functions so they run with their correct globals context.
Prior to this commit a function compiled with the native decorator
@micropython.native would not work correctly when accessing global
variables, because the globals dict was not being set upon function entry.
This commit fixes this problem by, upon function entry, setting as the
current globals dict the globals dict context the function was defined
within, as per normal Python semantics, and as bytecode does. Upon
function exit the original globals dict is restored.
In order to restore the globals dict when an exception is raised the native
function must guard its internals with an nlr_push/nlr_pop pair. Because
this push/pop is relatively expensive, in both C stack usage for the
nlr_buf_t and CPU execution time, the implementation here optimises things
as much as possible. First, the compiler keeps track of whether a function
even needs to access global variables. Using this information the native
emitter then generates three different kinds of code:
1. no globals used, no exception handlers: no nlr handling code and no
setting of the globals dict.
2. globals used, no exception handlers: an nlr_buf_t is allocated on the
C stack but it is not used if the globals dict is unchanged, saving
execution time because nlr_push/nlr_pop don't need to run.
3. function has exception handlers, may use globals: an nlr_buf_t is
allocated and nlr_push/nlr_pop are always called.
In the end, native functions that don't access globals and don't have
exception handlers will run more efficiently than those that do.
Fixes issue #1573.
6 years ago
|
|
|
if (new_globals == NULL) {
|
|
|
|
// Globals were the originally the same so don't restore them
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
mp_obj_dict_t *old_globals = mp_globals_get();
|
|
|
|
if (old_globals == new_globals) {
|
|
|
|
// Don't set globals if they are the same, and return NULL to indicate this
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
mp_globals_set(new_globals);
|
|
|
|
return old_globals;
|
|
|
|
}
|
|
|
|
|
|
|
|
// wrapper that accepts n_args and n_kw in one argument
|
|
|
|
// (native emitter can only pass at most 3 arguments to a function)
|
|
|
|
STATIC mp_obj_t mp_native_call_function_n_kw(mp_obj_t fun_in, size_t n_args_kw, const mp_obj_t *args) {
|
|
|
|
return mp_call_function_n_kw(fun_in, n_args_kw & 0xff, (n_args_kw >> 8) & 0xff, args);
|
|
|
|
}
|
|
|
|
|
|
|
|
// wrapper that makes raise obj and raises it
|
|
|
|
// END_FINALLY opcode requires that we don't raise if o==None
|
|
|
|
STATIC void mp_native_raise(mp_obj_t o) {
|
|
|
|
if (o != MP_OBJ_NULL && o != mp_const_none) {
|
|
|
|
nlr_raise(mp_make_raise_obj(o));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// wrapper that handles iterator buffer
|
|
|
|
STATIC mp_obj_t mp_native_getiter(mp_obj_t obj, mp_obj_iter_buf_t *iter) {
|
|
|
|
if (iter == NULL) {
|
|
|
|
return mp_getiter(obj, NULL);
|
|
|
|
} else {
|
|
|
|
obj = mp_getiter(obj, iter);
|
|
|
|
if (obj != MP_OBJ_FROM_PTR(iter)) {
|
|
|
|
// Iterator didn't use the stack so indicate that with MP_OBJ_NULL.
|
|
|
|
iter->base.type = MP_OBJ_NULL;
|
|
|
|
iter->buf[0] = obj;
|
|
|
|
}
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// wrapper that handles iterator buffer
|
|
|
|
STATIC mp_obj_t mp_native_iternext(mp_obj_iter_buf_t *iter) {
|
|
|
|
mp_obj_t obj;
|
|
|
|
if (iter->base.type == MP_OBJ_NULL) {
|
|
|
|
obj = iter->buf[0];
|
|
|
|
} else {
|
|
|
|
obj = MP_OBJ_FROM_PTR(iter);
|
|
|
|
}
|
|
|
|
return mp_iternext(obj);
|
|
|
|
}
|
|
|
|
|
|
|
|
STATIC bool mp_native_yield_from(mp_obj_t gen, mp_obj_t send_value, mp_obj_t *ret_value) {
|
|
|
|
mp_vm_return_kind_t ret_kind;
|
|
|
|
nlr_buf_t nlr_buf;
|
|
|
|
mp_obj_t throw_value = *ret_value;
|
|
|
|
if (nlr_push(&nlr_buf) == 0) {
|
|
|
|
if (throw_value != MP_OBJ_NULL) {
|
|
|
|
send_value = MP_OBJ_NULL;
|
|
|
|
}
|
|
|
|
ret_kind = mp_resume(gen, send_value, throw_value, ret_value);
|
|
|
|
nlr_pop();
|
|
|
|
} else {
|
|
|
|
ret_kind = MP_VM_RETURN_EXCEPTION;
|
|
|
|
*ret_value = nlr_buf.ret_val;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (ret_kind == MP_VM_RETURN_YIELD) {
|
|
|
|
return true;
|
|
|
|
} else if (ret_kind == MP_VM_RETURN_NORMAL) {
|
|
|
|
if (*ret_value == MP_OBJ_STOP_ITERATION) {
|
|
|
|
*ret_value = mp_const_none;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
assert(ret_kind == MP_VM_RETURN_EXCEPTION);
|
|
|
|
if (!mp_obj_exception_match(*ret_value, MP_OBJ_FROM_PTR(&mp_type_StopIteration))) {
|
|
|
|
nlr_raise(*ret_value);
|
|
|
|
}
|
|
|
|
*ret_value = mp_obj_exception_get_value(*ret_value);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (throw_value != MP_OBJ_NULL && mp_obj_exception_match(throw_value, MP_OBJ_FROM_PTR(&mp_type_GeneratorExit))) {
|
|
|
|
nlr_raise(mp_make_raise_obj(throw_value));
|
|
|
|
}
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
#if !MICROPY_PY_BUILTINS_FLOAT
|
|
|
|
|
|
|
|
STATIC mp_obj_t mp_obj_new_float_from_f(float f) {
|
|
|
|
(void)f;
|
|
|
|
mp_raise_msg(&mp_type_RuntimeError, MP_ERROR_TEXT("float unsupported"));
|
|
|
|
}
|
|
|
|
|
|
|
|
STATIC mp_obj_t mp_obj_new_float_from_d(double d) {
|
|
|
|
(void)d;
|
|
|
|
mp_raise_msg(&mp_type_RuntimeError, MP_ERROR_TEXT("float unsupported"));
|
|
|
|
}
|
|
|
|
|
|
|
|
STATIC float mp_obj_get_float_to_f(mp_obj_t o) {
|
|
|
|
(void)o;
|
|
|
|
mp_raise_msg(&mp_type_RuntimeError, MP_ERROR_TEXT("float unsupported"));
|
|
|
|
}
|
|
|
|
|
|
|
|
STATIC double mp_obj_get_float_to_d(mp_obj_t o) {
|
|
|
|
(void)o;
|
|
|
|
mp_raise_msg(&mp_type_RuntimeError, MP_ERROR_TEXT("float unsupported"));
|
|
|
|
}
|
|
|
|
|
|
|
|
#endif
|
|
|
|
|
|
|
|
// these must correspond to the respective enum in nativeglue.h
|
|
|
|
const mp_fun_table_t mp_fun_table = {
|
|
|
|
mp_const_none,
|
|
|
|
mp_const_false,
|
|
|
|
mp_const_true,
|
|
|
|
mp_native_from_obj,
|
|
|
|
mp_native_to_obj,
|
py: Fix native functions so they run with their correct globals context.
Prior to this commit a function compiled with the native decorator
@micropython.native would not work correctly when accessing global
variables, because the globals dict was not being set upon function entry.
This commit fixes this problem by, upon function entry, setting as the
current globals dict the globals dict context the function was defined
within, as per normal Python semantics, and as bytecode does. Upon
function exit the original globals dict is restored.
In order to restore the globals dict when an exception is raised the native
function must guard its internals with an nlr_push/nlr_pop pair. Because
this push/pop is relatively expensive, in both C stack usage for the
nlr_buf_t and CPU execution time, the implementation here optimises things
as much as possible. First, the compiler keeps track of whether a function
even needs to access global variables. Using this information the native
emitter then generates three different kinds of code:
1. no globals used, no exception handlers: no nlr handling code and no
setting of the globals dict.
2. globals used, no exception handlers: an nlr_buf_t is allocated on the
C stack but it is not used if the globals dict is unchanged, saving
execution time because nlr_push/nlr_pop don't need to run.
3. function has exception handlers, may use globals: an nlr_buf_t is
allocated and nlr_push/nlr_pop are always called.
In the end, native functions that don't access globals and don't have
exception handlers will run more efficiently than those that do.
Fixes issue #1573.
6 years ago
|
|
|
mp_native_swap_globals,
|
|
|
|
mp_load_name,
|
|
|
|
mp_load_global,
|
|
|
|
mp_load_build_class,
|
|
|
|
mp_load_attr,
|
|
|
|
mp_load_method,
|
|
|
|
mp_load_super_method,
|
|
|
|
mp_store_name,
|
|
|
|
mp_store_global,
|
|
|
|
mp_store_attr,
|
|
|
|
mp_obj_subscr,
|
|
|
|
mp_obj_is_true,
|
|
|
|
mp_unary_op,
|
|
|
|
mp_binary_op,
|
|
|
|
mp_obj_new_tuple,
|
|
|
|
mp_obj_new_list,
|
|
|
|
mp_obj_new_dict,
|
|
|
|
mp_obj_new_set,
|
|
|
|
mp_obj_set_store,
|
|
|
|
mp_obj_list_append,
|
|
|
|
mp_obj_dict_store,
|
|
|
|
mp_make_function_from_raw_code,
|
|
|
|
mp_native_call_function_n_kw,
|
|
|
|
mp_call_method_n_kw,
|
|
|
|
mp_call_method_n_kw_var,
|
|
|
|
mp_native_getiter,
|
|
|
|
mp_native_iternext,
|
|
|
|
#if MICROPY_NLR_SETJMP
|
|
|
|
nlr_push_tail,
|
|
|
|
#else
|
|
|
|
nlr_push,
|
|
|
|
#endif
|
|
|
|
nlr_pop,
|
|
|
|
mp_native_raise,
|
|
|
|
mp_import_name,
|
|
|
|
mp_import_from,
|
|
|
|
mp_import_all,
|
|
|
|
mp_obj_new_slice,
|
|
|
|
mp_unpack_sequence,
|
|
|
|
mp_unpack_ex,
|
|
|
|
mp_delete_name,
|
|
|
|
mp_delete_global,
|
py: Rework bytecode and .mpy file format to be mostly static data.
Background: .mpy files are precompiled .py files, built using mpy-cross,
that contain compiled bytecode functions (and can also contain machine
code). The benefit of using an .mpy file over a .py file is that they are
faster to import and take less memory when importing. They are also
smaller on disk.
But the real benefit of .mpy files comes when they are frozen into the
firmware. This is done by loading the .mpy file during compilation of the
firmware and turning it into a set of big C data structures (the job of
mpy-tool.py), which are then compiled and downloaded into the ROM of a
device. These C data structures can be executed in-place, ie directly from
ROM. This makes importing even faster because there is very little to do,
and also means such frozen modules take up much less RAM (because their
bytecode stays in ROM).
The downside of frozen code is that it requires recompiling and reflashing
the entire firmware. This can be a big barrier to entry, slows down
development time, and makes it harder to do OTA updates of frozen code
(because the whole firmware must be updated).
This commit attempts to solve this problem by providing a solution that
sits between loading .mpy files into RAM and freezing them into the
firmware. The .mpy file format has been reworked so that it consists of
data and bytecode which is mostly static and ready to run in-place. If
these new .mpy files are located in flash/ROM which is memory addressable,
the .mpy file can be executed (mostly) in-place.
With this approach there is still a small amount of unpacking and linking
of the .mpy file that needs to be done when it's imported, but it's still
much better than loading an .mpy from disk into RAM (although not as good
as freezing .mpy files into the firmware).
The main trick to make static .mpy files is to adjust the bytecode so any
qstrs that it references now go through a lookup table to convert from
local qstr number in the module to global qstr number in the firmware.
That means the bytecode does not need linking/rewriting of qstrs when it's
loaded. Instead only a small qstr table needs to be built (and put in RAM)
at import time. This means the bytecode itself is static/constant and can
be used directly if it's in addressable memory. Also the qstr string data
in the .mpy file, and some constant object data, can be used directly.
Note that the qstr table is global to the module (ie not per function).
In more detail, in the VM what used to be (schematically):
qst = DECODE_QSTR_VALUE;
is now (schematically):
idx = DECODE_QSTR_INDEX;
qst = qstr_table[idx];
That allows the bytecode to be fixed at compile time and not need
relinking/rewriting of the qstr values. Only qstr_table needs to be linked
when the .mpy is loaded.
Incidentally, this helps to reduce the size of bytecode because what used
to be 2-byte qstr values in the bytecode are now (mostly) 1-byte indices.
If the module uses the same qstr more than two times then the bytecode is
smaller than before.
The following changes are measured for this commit compared to the
previous (the baseline):
- average 7%-9% reduction in size of .mpy files
- frozen code size is reduced by about 5%-7%
- importing .py files uses about 5% less RAM in total
- importing .mpy files uses about 4% less RAM in total
- importing .py and .mpy files takes about the same time as before
The qstr indirection in the bytecode has only a small impact on VM
performance. For stm32 on PYBv1.0 the performance change of this commit
is:
diff of scores (higher is better)
N=100 M=100 baseline -> this-commit diff diff% (error%)
bm_chaos.py 371.07 -> 357.39 : -13.68 = -3.687% (+/-0.02%)
bm_fannkuch.py 78.72 -> 77.49 : -1.23 = -1.563% (+/-0.01%)
bm_fft.py 2591.73 -> 2539.28 : -52.45 = -2.024% (+/-0.00%)
bm_float.py 6034.93 -> 5908.30 : -126.63 = -2.098% (+/-0.01%)
bm_hexiom.py 48.96 -> 47.93 : -1.03 = -2.104% (+/-0.00%)
bm_nqueens.py 4510.63 -> 4459.94 : -50.69 = -1.124% (+/-0.00%)
bm_pidigits.py 650.28 -> 644.96 : -5.32 = -0.818% (+/-0.23%)
core_import_mpy_multi.py 564.77 -> 581.49 : +16.72 = +2.960% (+/-0.01%)
core_import_mpy_single.py 68.67 -> 67.16 : -1.51 = -2.199% (+/-0.01%)
core_qstr.py 64.16 -> 64.12 : -0.04 = -0.062% (+/-0.00%)
core_yield_from.py 362.58 -> 354.50 : -8.08 = -2.228% (+/-0.00%)
misc_aes.py 429.69 -> 405.59 : -24.10 = -5.609% (+/-0.01%)
misc_mandel.py 3485.13 -> 3416.51 : -68.62 = -1.969% (+/-0.00%)
misc_pystone.py 2496.53 -> 2405.56 : -90.97 = -3.644% (+/-0.01%)
misc_raytrace.py 381.47 -> 374.01 : -7.46 = -1.956% (+/-0.01%)
viper_call0.py 576.73 -> 572.49 : -4.24 = -0.735% (+/-0.04%)
viper_call1a.py 550.37 -> 546.21 : -4.16 = -0.756% (+/-0.09%)
viper_call1b.py 438.23 -> 435.68 : -2.55 = -0.582% (+/-0.06%)
viper_call1c.py 442.84 -> 440.04 : -2.80 = -0.632% (+/-0.08%)
viper_call2a.py 536.31 -> 532.35 : -3.96 = -0.738% (+/-0.06%)
viper_call2b.py 382.34 -> 377.07 : -5.27 = -1.378% (+/-0.03%)
And for unix on x64:
diff of scores (higher is better)
N=2000 M=2000 baseline -> this-commit diff diff% (error%)
bm_chaos.py 13594.20 -> 13073.84 : -520.36 = -3.828% (+/-5.44%)
bm_fannkuch.py 60.63 -> 59.58 : -1.05 = -1.732% (+/-3.01%)
bm_fft.py 112009.15 -> 111603.32 : -405.83 = -0.362% (+/-4.03%)
bm_float.py 246202.55 -> 247923.81 : +1721.26 = +0.699% (+/-2.79%)
bm_hexiom.py 615.65 -> 617.21 : +1.56 = +0.253% (+/-1.64%)
bm_nqueens.py 215807.95 -> 215600.96 : -206.99 = -0.096% (+/-3.52%)
bm_pidigits.py 8246.74 -> 8422.82 : +176.08 = +2.135% (+/-3.64%)
misc_aes.py 16133.00 -> 16452.74 : +319.74 = +1.982% (+/-1.50%)
misc_mandel.py 128146.69 -> 130796.43 : +2649.74 = +2.068% (+/-3.18%)
misc_pystone.py 83811.49 -> 83124.85 : -686.64 = -0.819% (+/-1.03%)
misc_raytrace.py 21688.02 -> 21385.10 : -302.92 = -1.397% (+/-3.20%)
The code size change is (firmware with a lot of frozen code benefits the
most):
bare-arm: +396 +0.697%
minimal x86: +1595 +0.979% [incl +32(data)]
unix x64: +2408 +0.470% [incl +800(data)]
unix nanbox: +1396 +0.309% [incl -96(data)]
stm32: -1256 -0.318% PYBV10
cc3200: +288 +0.157%
esp8266: -260 -0.037% GENERIC
esp32: -216 -0.014% GENERIC[incl -1072(data)]
nrf: +116 +0.067% pca10040
rp2: -664 -0.135% PICO
samd: +844 +0.607% ADAFRUIT_ITSYBITSY_M4_EXPRESS
As part of this change the .mpy file format version is bumped to version 6.
And mpy-tool.py has been improved to provide a good visualisation of the
contents of .mpy files.
In summary: this commit changes the bytecode to use qstr indirection, and
reworks the .mpy file format to be simpler and allow .mpy files to be
executed in-place. Performance is not impacted too much. Eventually it
will be possible to store such .mpy files in a linear, read-only, memory-
mappable filesystem so they can be executed from flash/ROM. This will
essentially be able to replace frozen code for most applications.
Signed-off-by: Damien George <damien@micropython.org>
3 years ago
|
|
|
mp_obj_new_closure,
|
|
|
|
mp_arg_check_num_sig,
|
|
|
|
mp_setup_code_state_native,
|
|
|
|
mp_small_int_floor_divide,
|
|
|
|
mp_small_int_modulo,
|
|
|
|
mp_native_yield_from,
|
|
|
|
#if MICROPY_NLR_SETJMP
|
|
|
|
setjmp,
|
|
|
|
#else
|
|
|
|
NULL,
|
|
|
|
#endif
|
|
|
|
// Additional entries for dynamic runtime, starts at index 50
|
|
|
|
memset,
|
|
|
|
memmove,
|
|
|
|
gc_realloc,
|
|
|
|
mp_printf,
|
|
|
|
mp_vprintf,
|
|
|
|
mp_raise_msg,
|
|
|
|
mp_obj_get_type,
|
|
|
|
mp_obj_new_str,
|
|
|
|
mp_obj_new_bytes,
|
|
|
|
mp_obj_new_bytearray_by_ref,
|
|
|
|
mp_obj_new_float_from_f,
|
|
|
|
mp_obj_new_float_from_d,
|
|
|
|
mp_obj_get_float_to_f,
|
|
|
|
mp_obj_get_float_to_d,
|
|
|
|
mp_get_buffer,
|
|
|
|
mp_get_stream_raise,
|
|
|
|
&mp_plat_print,
|
|
|
|
&mp_type_type,
|
|
|
|
&mp_type_str,
|
|
|
|
&mp_type_list,
|
|
|
|
&mp_type_dict,
|
|
|
|
&mp_type_fun_builtin_0,
|
|
|
|
&mp_type_fun_builtin_1,
|
|
|
|
&mp_type_fun_builtin_2,
|
|
|
|
&mp_type_fun_builtin_3,
|
|
|
|
&mp_type_fun_builtin_var,
|
|
|
|
&mp_stream_read_obj,
|
|
|
|
&mp_stream_readinto_obj,
|
|
|
|
&mp_stream_unbuffered_readline_obj,
|
|
|
|
&mp_stream_write_obj,
|
|
|
|
};
|
|
|
|
|
py: Rework bytecode and .mpy file format to be mostly static data.
Background: .mpy files are precompiled .py files, built using mpy-cross,
that contain compiled bytecode functions (and can also contain machine
code). The benefit of using an .mpy file over a .py file is that they are
faster to import and take less memory when importing. They are also
smaller on disk.
But the real benefit of .mpy files comes when they are frozen into the
firmware. This is done by loading the .mpy file during compilation of the
firmware and turning it into a set of big C data structures (the job of
mpy-tool.py), which are then compiled and downloaded into the ROM of a
device. These C data structures can be executed in-place, ie directly from
ROM. This makes importing even faster because there is very little to do,
and also means such frozen modules take up much less RAM (because their
bytecode stays in ROM).
The downside of frozen code is that it requires recompiling and reflashing
the entire firmware. This can be a big barrier to entry, slows down
development time, and makes it harder to do OTA updates of frozen code
(because the whole firmware must be updated).
This commit attempts to solve this problem by providing a solution that
sits between loading .mpy files into RAM and freezing them into the
firmware. The .mpy file format has been reworked so that it consists of
data and bytecode which is mostly static and ready to run in-place. If
these new .mpy files are located in flash/ROM which is memory addressable,
the .mpy file can be executed (mostly) in-place.
With this approach there is still a small amount of unpacking and linking
of the .mpy file that needs to be done when it's imported, but it's still
much better than loading an .mpy from disk into RAM (although not as good
as freezing .mpy files into the firmware).
The main trick to make static .mpy files is to adjust the bytecode so any
qstrs that it references now go through a lookup table to convert from
local qstr number in the module to global qstr number in the firmware.
That means the bytecode does not need linking/rewriting of qstrs when it's
loaded. Instead only a small qstr table needs to be built (and put in RAM)
at import time. This means the bytecode itself is static/constant and can
be used directly if it's in addressable memory. Also the qstr string data
in the .mpy file, and some constant object data, can be used directly.
Note that the qstr table is global to the module (ie not per function).
In more detail, in the VM what used to be (schematically):
qst = DECODE_QSTR_VALUE;
is now (schematically):
idx = DECODE_QSTR_INDEX;
qst = qstr_table[idx];
That allows the bytecode to be fixed at compile time and not need
relinking/rewriting of the qstr values. Only qstr_table needs to be linked
when the .mpy is loaded.
Incidentally, this helps to reduce the size of bytecode because what used
to be 2-byte qstr values in the bytecode are now (mostly) 1-byte indices.
If the module uses the same qstr more than two times then the bytecode is
smaller than before.
The following changes are measured for this commit compared to the
previous (the baseline):
- average 7%-9% reduction in size of .mpy files
- frozen code size is reduced by about 5%-7%
- importing .py files uses about 5% less RAM in total
- importing .mpy files uses about 4% less RAM in total
- importing .py and .mpy files takes about the same time as before
The qstr indirection in the bytecode has only a small impact on VM
performance. For stm32 on PYBv1.0 the performance change of this commit
is:
diff of scores (higher is better)
N=100 M=100 baseline -> this-commit diff diff% (error%)
bm_chaos.py 371.07 -> 357.39 : -13.68 = -3.687% (+/-0.02%)
bm_fannkuch.py 78.72 -> 77.49 : -1.23 = -1.563% (+/-0.01%)
bm_fft.py 2591.73 -> 2539.28 : -52.45 = -2.024% (+/-0.00%)
bm_float.py 6034.93 -> 5908.30 : -126.63 = -2.098% (+/-0.01%)
bm_hexiom.py 48.96 -> 47.93 : -1.03 = -2.104% (+/-0.00%)
bm_nqueens.py 4510.63 -> 4459.94 : -50.69 = -1.124% (+/-0.00%)
bm_pidigits.py 650.28 -> 644.96 : -5.32 = -0.818% (+/-0.23%)
core_import_mpy_multi.py 564.77 -> 581.49 : +16.72 = +2.960% (+/-0.01%)
core_import_mpy_single.py 68.67 -> 67.16 : -1.51 = -2.199% (+/-0.01%)
core_qstr.py 64.16 -> 64.12 : -0.04 = -0.062% (+/-0.00%)
core_yield_from.py 362.58 -> 354.50 : -8.08 = -2.228% (+/-0.00%)
misc_aes.py 429.69 -> 405.59 : -24.10 = -5.609% (+/-0.01%)
misc_mandel.py 3485.13 -> 3416.51 : -68.62 = -1.969% (+/-0.00%)
misc_pystone.py 2496.53 -> 2405.56 : -90.97 = -3.644% (+/-0.01%)
misc_raytrace.py 381.47 -> 374.01 : -7.46 = -1.956% (+/-0.01%)
viper_call0.py 576.73 -> 572.49 : -4.24 = -0.735% (+/-0.04%)
viper_call1a.py 550.37 -> 546.21 : -4.16 = -0.756% (+/-0.09%)
viper_call1b.py 438.23 -> 435.68 : -2.55 = -0.582% (+/-0.06%)
viper_call1c.py 442.84 -> 440.04 : -2.80 = -0.632% (+/-0.08%)
viper_call2a.py 536.31 -> 532.35 : -3.96 = -0.738% (+/-0.06%)
viper_call2b.py 382.34 -> 377.07 : -5.27 = -1.378% (+/-0.03%)
And for unix on x64:
diff of scores (higher is better)
N=2000 M=2000 baseline -> this-commit diff diff% (error%)
bm_chaos.py 13594.20 -> 13073.84 : -520.36 = -3.828% (+/-5.44%)
bm_fannkuch.py 60.63 -> 59.58 : -1.05 = -1.732% (+/-3.01%)
bm_fft.py 112009.15 -> 111603.32 : -405.83 = -0.362% (+/-4.03%)
bm_float.py 246202.55 -> 247923.81 : +1721.26 = +0.699% (+/-2.79%)
bm_hexiom.py 615.65 -> 617.21 : +1.56 = +0.253% (+/-1.64%)
bm_nqueens.py 215807.95 -> 215600.96 : -206.99 = -0.096% (+/-3.52%)
bm_pidigits.py 8246.74 -> 8422.82 : +176.08 = +2.135% (+/-3.64%)
misc_aes.py 16133.00 -> 16452.74 : +319.74 = +1.982% (+/-1.50%)
misc_mandel.py 128146.69 -> 130796.43 : +2649.74 = +2.068% (+/-3.18%)
misc_pystone.py 83811.49 -> 83124.85 : -686.64 = -0.819% (+/-1.03%)
misc_raytrace.py 21688.02 -> 21385.10 : -302.92 = -1.397% (+/-3.20%)
The code size change is (firmware with a lot of frozen code benefits the
most):
bare-arm: +396 +0.697%
minimal x86: +1595 +0.979% [incl +32(data)]
unix x64: +2408 +0.470% [incl +800(data)]
unix nanbox: +1396 +0.309% [incl -96(data)]
stm32: -1256 -0.318% PYBV10
cc3200: +288 +0.157%
esp8266: -260 -0.037% GENERIC
esp32: -216 -0.014% GENERIC[incl -1072(data)]
nrf: +116 +0.067% pca10040
rp2: -664 -0.135% PICO
samd: +844 +0.607% ADAFRUIT_ITSYBITSY_M4_EXPRESS
As part of this change the .mpy file format version is bumped to version 6.
And mpy-tool.py has been improved to provide a good visualisation of the
contents of .mpy files.
In summary: this commit changes the bytecode to use qstr indirection, and
reworks the .mpy file format to be simpler and allow .mpy files to be
executed in-place. Performance is not impacted too much. Eventually it
will be possible to store such .mpy files in a linear, read-only, memory-
mappable filesystem so they can be executed from flash/ROM. This will
essentially be able to replace frozen code for most applications.
Signed-off-by: Damien George <damien@micropython.org>
3 years ago
|
|
|
#elif MICROPY_EMIT_NATIVE && MICROPY_DYNAMIC_COMPILER
|
|
|
|
|
|
|
|
const int mp_fun_table;
|
|
|
|
|
|
|
|
#endif // MICROPY_EMIT_NATIVE
|