You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

544 lines
19 KiB

/*
* This file is part of the MicroPython project, http://micropython.org/
*
* The MIT License (MIT)
*
* Copyright (c) 2013, 2014 Damien P. George
* Copyright (c) 2014 Paul Sokolovsky
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
#include <string.h>
#include <assert.h>
#include "py/objtuple.h"
#include "py/objfun.h"
#include "py/runtime.h"
#include "py/bc.h"
#include "py/cstack.h"
#if MICROPY_DEBUG_VERBOSE // print debugging info
#define DEBUG_PRINT (1)
#else // don't print debugging info
#define DEBUG_PRINT (0)
#define DEBUG_printf(...) (void)0
#endif
// Note: the "name" entry in mp_obj_type_t for a function type must be
// MP_QSTR_function because it is used to determine if an object is of generic
// function type.
/******************************************************************************/
/* builtin functions */
all: Remove the &#34;STATIC&#34; macro and just use &#34;static&#34; instead. The STATIC macro was introduced a very long time ago in commit d5df6cd44a433d6253a61cb0f987835fbc06b2de. The original reason for this was to have the option to define it to nothing so that all static functions become global functions and therefore visible to certain debug tools, so one could do function size comparison and other things. This STATIC feature is rarely (if ever) used. And with the use of LTO and heavy inline optimisation, analysing the size of individual functions when they are not static is not a good representation of the size of code when fully optimised. So the macro does not have much use and it&#39;s simpler to just remove it. Then you know exactly what it&#39;s doing. For example, newcomers don&#39;t have to learn what the STATIC macro is and why it exists. Reading the code is also less &#34;loud&#34; with a lowercase static. One other minor point in favour of removing it, is that it stops bugs with `STATIC inline`, which should always be `static inline`. Methodology for this commit was: 1) git ls-files | egrep &#39;\.[ch]$&#39; | \ xargs sed -Ei &#34;s/(^| )STATIC($| )/\1static\2/&#34; 2) Do some manual cleanup in the diff by searching for the word STATIC in comments and changing those back. 3) &#34;git-grep STATIC docs/&#34;, manually fixed those cases. 4) &#34;rg -t python STATIC&#34;, manually fixed codegen lines that used STATIC. This work was funded through GitHub Sponsors. Signed-off-by: Angus Gratton &lt;angus@redyak.com.au&gt;
8 months ago
static mp_obj_t fun_builtin_0_call(mp_obj_t self_in, size_t n_args, size_t n_kw, const mp_obj_t *args) {
(void)args;
assert(mp_obj_is_type(self_in, &mp_type_fun_builtin_0));
mp_obj_fun_builtin_fixed_t *self = MP_OBJ_TO_PTR(self_in);
mp_arg_check_num(n_args, n_kw, 0, 0, false);
return self->fun._0();
}
MP_DEFINE_CONST_OBJ_TYPE(
mp_type_fun_builtin_0, MP_QSTR_function, MP_TYPE_FLAG_BINDS_SELF | MP_TYPE_FLAG_BUILTIN_FUN,
call, fun_builtin_0_call
);
all: Remove the &#34;STATIC&#34; macro and just use &#34;static&#34; instead. The STATIC macro was introduced a very long time ago in commit d5df6cd44a433d6253a61cb0f987835fbc06b2de. The original reason for this was to have the option to define it to nothing so that all static functions become global functions and therefore visible to certain debug tools, so one could do function size comparison and other things. This STATIC feature is rarely (if ever) used. And with the use of LTO and heavy inline optimisation, analysing the size of individual functions when they are not static is not a good representation of the size of code when fully optimised. So the macro does not have much use and it&#39;s simpler to just remove it. Then you know exactly what it&#39;s doing. For example, newcomers don&#39;t have to learn what the STATIC macro is and why it exists. Reading the code is also less &#34;loud&#34; with a lowercase static. One other minor point in favour of removing it, is that it stops bugs with `STATIC inline`, which should always be `static inline`. Methodology for this commit was: 1) git ls-files | egrep &#39;\.[ch]$&#39; | \ xargs sed -Ei &#34;s/(^| )STATIC($| )/\1static\2/&#34; 2) Do some manual cleanup in the diff by searching for the word STATIC in comments and changing those back. 3) &#34;git-grep STATIC docs/&#34;, manually fixed those cases. 4) &#34;rg -t python STATIC&#34;, manually fixed codegen lines that used STATIC. This work was funded through GitHub Sponsors. Signed-off-by: Angus Gratton &lt;angus@redyak.com.au&gt;
8 months ago
static mp_obj_t fun_builtin_1_call(mp_obj_t self_in, size_t n_args, size_t n_kw, const mp_obj_t *args) {
assert(mp_obj_is_type(self_in, &mp_type_fun_builtin_1));
mp_obj_fun_builtin_fixed_t *self = MP_OBJ_TO_PTR(self_in);
mp_arg_check_num(n_args, n_kw, 1, 1, false);
return self->fun._1(args[0]);
}
MP_DEFINE_CONST_OBJ_TYPE(
mp_type_fun_builtin_1, MP_QSTR_function, MP_TYPE_FLAG_BINDS_SELF | MP_TYPE_FLAG_BUILTIN_FUN,
call, fun_builtin_1_call
);
all: Remove the &#34;STATIC&#34; macro and just use &#34;static&#34; instead. The STATIC macro was introduced a very long time ago in commit d5df6cd44a433d6253a61cb0f987835fbc06b2de. The original reason for this was to have the option to define it to nothing so that all static functions become global functions and therefore visible to certain debug tools, so one could do function size comparison and other things. This STATIC feature is rarely (if ever) used. And with the use of LTO and heavy inline optimisation, analysing the size of individual functions when they are not static is not a good representation of the size of code when fully optimised. So the macro does not have much use and it&#39;s simpler to just remove it. Then you know exactly what it&#39;s doing. For example, newcomers don&#39;t have to learn what the STATIC macro is and why it exists. Reading the code is also less &#34;loud&#34; with a lowercase static. One other minor point in favour of removing it, is that it stops bugs with `STATIC inline`, which should always be `static inline`. Methodology for this commit was: 1) git ls-files | egrep &#39;\.[ch]$&#39; | \ xargs sed -Ei &#34;s/(^| )STATIC($| )/\1static\2/&#34; 2) Do some manual cleanup in the diff by searching for the word STATIC in comments and changing those back. 3) &#34;git-grep STATIC docs/&#34;, manually fixed those cases. 4) &#34;rg -t python STATIC&#34;, manually fixed codegen lines that used STATIC. This work was funded through GitHub Sponsors. Signed-off-by: Angus Gratton &lt;angus@redyak.com.au&gt;
8 months ago
static mp_obj_t fun_builtin_2_call(mp_obj_t self_in, size_t n_args, size_t n_kw, const mp_obj_t *args) {
assert(mp_obj_is_type(self_in, &mp_type_fun_builtin_2));
mp_obj_fun_builtin_fixed_t *self = MP_OBJ_TO_PTR(self_in);
mp_arg_check_num(n_args, n_kw, 2, 2, false);
return self->fun._2(args[0], args[1]);
}
MP_DEFINE_CONST_OBJ_TYPE(
mp_type_fun_builtin_2, MP_QSTR_function, MP_TYPE_FLAG_BINDS_SELF | MP_TYPE_FLAG_BUILTIN_FUN,
call, fun_builtin_2_call
);
all: Remove the &#34;STATIC&#34; macro and just use &#34;static&#34; instead. The STATIC macro was introduced a very long time ago in commit d5df6cd44a433d6253a61cb0f987835fbc06b2de. The original reason for this was to have the option to define it to nothing so that all static functions become global functions and therefore visible to certain debug tools, so one could do function size comparison and other things. This STATIC feature is rarely (if ever) used. And with the use of LTO and heavy inline optimisation, analysing the size of individual functions when they are not static is not a good representation of the size of code when fully optimised. So the macro does not have much use and it&#39;s simpler to just remove it. Then you know exactly what it&#39;s doing. For example, newcomers don&#39;t have to learn what the STATIC macro is and why it exists. Reading the code is also less &#34;loud&#34; with a lowercase static. One other minor point in favour of removing it, is that it stops bugs with `STATIC inline`, which should always be `static inline`. Methodology for this commit was: 1) git ls-files | egrep &#39;\.[ch]$&#39; | \ xargs sed -Ei &#34;s/(^| )STATIC($| )/\1static\2/&#34; 2) Do some manual cleanup in the diff by searching for the word STATIC in comments and changing those back. 3) &#34;git-grep STATIC docs/&#34;, manually fixed those cases. 4) &#34;rg -t python STATIC&#34;, manually fixed codegen lines that used STATIC. This work was funded through GitHub Sponsors. Signed-off-by: Angus Gratton &lt;angus@redyak.com.au&gt;
8 months ago
static mp_obj_t fun_builtin_3_call(mp_obj_t self_in, size_t n_args, size_t n_kw, const mp_obj_t *args) {
assert(mp_obj_is_type(self_in, &mp_type_fun_builtin_3));
mp_obj_fun_builtin_fixed_t *self = MP_OBJ_TO_PTR(self_in);
mp_arg_check_num(n_args, n_kw, 3, 3, false);
return self->fun._3(args[0], args[1], args[2]);
}
MP_DEFINE_CONST_OBJ_TYPE(
mp_type_fun_builtin_3, MP_QSTR_function, MP_TYPE_FLAG_BINDS_SELF | MP_TYPE_FLAG_BUILTIN_FUN,
call, fun_builtin_3_call
);
all: Remove the &#34;STATIC&#34; macro and just use &#34;static&#34; instead. The STATIC macro was introduced a very long time ago in commit d5df6cd44a433d6253a61cb0f987835fbc06b2de. The original reason for this was to have the option to define it to nothing so that all static functions become global functions and therefore visible to certain debug tools, so one could do function size comparison and other things. This STATIC feature is rarely (if ever) used. And with the use of LTO and heavy inline optimisation, analysing the size of individual functions when they are not static is not a good representation of the size of code when fully optimised. So the macro does not have much use and it&#39;s simpler to just remove it. Then you know exactly what it&#39;s doing. For example, newcomers don&#39;t have to learn what the STATIC macro is and why it exists. Reading the code is also less &#34;loud&#34; with a lowercase static. One other minor point in favour of removing it, is that it stops bugs with `STATIC inline`, which should always be `static inline`. Methodology for this commit was: 1) git ls-files | egrep &#39;\.[ch]$&#39; | \ xargs sed -Ei &#34;s/(^| )STATIC($| )/\1static\2/&#34; 2) Do some manual cleanup in the diff by searching for the word STATIC in comments and changing those back. 3) &#34;git-grep STATIC docs/&#34;, manually fixed those cases. 4) &#34;rg -t python STATIC&#34;, manually fixed codegen lines that used STATIC. This work was funded through GitHub Sponsors. Signed-off-by: Angus Gratton &lt;angus@redyak.com.au&gt;
8 months ago
static mp_obj_t fun_builtin_var_call(mp_obj_t self_in, size_t n_args, size_t n_kw, const mp_obj_t *args) {
assert(mp_obj_is_type(self_in, &mp_type_fun_builtin_var));
mp_obj_fun_builtin_var_t *self = MP_OBJ_TO_PTR(self_in);
// check number of arguments
mp_arg_check_num_sig(n_args, n_kw, self->sig);
if (self->sig & 1) {
// function allows keywords
// we create a map directly from the given args array; self->fun.kw does still
// expect args to have both positional and keyword arguments, ordered as:
// arg0 arg1 ... arg<n_args> key0 value0 key1 value1 ... key<n_kw> value<n_kw>
mp_map_t kw_args;
mp_map_init_fixed_table(&kw_args, n_kw, args + n_args);
return self->fun.kw(n_args, args, &kw_args);
} else {
// function takes a variable number of arguments, but no keywords
return self->fun.var(n_args, args);
}
}
MP_DEFINE_CONST_OBJ_TYPE(
mp_type_fun_builtin_var, MP_QSTR_function, MP_TYPE_FLAG_BINDS_SELF | MP_TYPE_FLAG_BUILTIN_FUN,
call, fun_builtin_var_call
);
/******************************************************************************/
/* byte code functions */
qstr mp_obj_fun_get_name(mp_const_obj_t fun_in) {
const mp_obj_fun_bc_t *fun = MP_OBJ_TO_PTR(fun_in);
const byte *bc = fun->bytecode;
#if MICROPY_EMIT_NATIVE
if (fun->base.type == &mp_type_fun_native || fun->base.type == &mp_type_native_gen_wrap) {
bc = mp_obj_fun_native_get_prelude_ptr(fun);
}
#endif
MP_BC_PRELUDE_SIG_DECODE(bc);
MP_BC_PRELUDE_SIZE_DECODE(bc);
mp_uint_t name = mp_decode_uint_value(bc);
#if MICROPY_EMIT_BYTECODE_USES_QSTR_TABLE
name = fun->context->constants.qstr_table[name];
#endif
return name;
}
#if MICROPY_CPYTHON_COMPAT
all: Remove the &#34;STATIC&#34; macro and just use &#34;static&#34; instead. The STATIC macro was introduced a very long time ago in commit d5df6cd44a433d6253a61cb0f987835fbc06b2de. The original reason for this was to have the option to define it to nothing so that all static functions become global functions and therefore visible to certain debug tools, so one could do function size comparison and other things. This STATIC feature is rarely (if ever) used. And with the use of LTO and heavy inline optimisation, analysing the size of individual functions when they are not static is not a good representation of the size of code when fully optimised. So the macro does not have much use and it&#39;s simpler to just remove it. Then you know exactly what it&#39;s doing. For example, newcomers don&#39;t have to learn what the STATIC macro is and why it exists. Reading the code is also less &#34;loud&#34; with a lowercase static. One other minor point in favour of removing it, is that it stops bugs with `STATIC inline`, which should always be `static inline`. Methodology for this commit was: 1) git ls-files | egrep &#39;\.[ch]$&#39; | \ xargs sed -Ei &#34;s/(^| )STATIC($| )/\1static\2/&#34; 2) Do some manual cleanup in the diff by searching for the word STATIC in comments and changing those back. 3) &#34;git-grep STATIC docs/&#34;, manually fixed those cases. 4) &#34;rg -t python STATIC&#34;, manually fixed codegen lines that used STATIC. This work was funded through GitHub Sponsors. Signed-off-by: Angus Gratton &lt;angus@redyak.com.au&gt;
8 months ago
static void fun_bc_print(const mp_print_t *print, mp_obj_t o_in, mp_print_kind_t kind) {
(void)kind;
mp_obj_fun_bc_t *o = MP_OBJ_TO_PTR(o_in);
mp_printf(print, "<function %q at 0x%p>", mp_obj_fun_get_name(o_in), o);
}
#endif
#if DEBUG_PRINT
all: Remove the &#34;STATIC&#34; macro and just use &#34;static&#34; instead. The STATIC macro was introduced a very long time ago in commit d5df6cd44a433d6253a61cb0f987835fbc06b2de. The original reason for this was to have the option to define it to nothing so that all static functions become global functions and therefore visible to certain debug tools, so one could do function size comparison and other things. This STATIC feature is rarely (if ever) used. And with the use of LTO and heavy inline optimisation, analysing the size of individual functions when they are not static is not a good representation of the size of code when fully optimised. So the macro does not have much use and it&#39;s simpler to just remove it. Then you know exactly what it&#39;s doing. For example, newcomers don&#39;t have to learn what the STATIC macro is and why it exists. Reading the code is also less &#34;loud&#34; with a lowercase static. One other minor point in favour of removing it, is that it stops bugs with `STATIC inline`, which should always be `static inline`. Methodology for this commit was: 1) git ls-files | egrep &#39;\.[ch]$&#39; | \ xargs sed -Ei &#34;s/(^| )STATIC($| )/\1static\2/&#34; 2) Do some manual cleanup in the diff by searching for the word STATIC in comments and changing those back. 3) &#34;git-grep STATIC docs/&#34;, manually fixed those cases. 4) &#34;rg -t python STATIC&#34;, manually fixed codegen lines that used STATIC. This work was funded through GitHub Sponsors. Signed-off-by: Angus Gratton &lt;angus@redyak.com.au&gt;
8 months ago
static void dump_args(const mp_obj_t *a, size_t sz) {
DEBUG_printf("%p: ", a);
for (size_t i = 0; i < sz; i++) {
DEBUG_printf("%p ", a[i]);
}
DEBUG_printf("\n");
}
#else
#define dump_args(...) (void)0
#endif
// With this macro you can tune the maximum number of function state bytes
// that will be allocated on the stack. Any function that needs more
// than this will try to use the heap, with fallback to stack allocation.
#define VM_MAX_STATE_ON_STACK (sizeof(mp_uint_t) * 11)
#define DECODE_CODESTATE_SIZE(bytecode, n_state_out_var, state_size_out_var) \
{ \
const uint8_t *ip = bytecode; \
size_t n_exc_stack, scope_flags, n_pos_args, n_kwonly_args, n_def_args; \
MP_BC_PRELUDE_SIG_DECODE_INTO(ip, n_state_out_var, n_exc_stack, scope_flags, n_pos_args, n_kwonly_args, n_def_args); \
(void)scope_flags; (void)n_pos_args; (void)n_kwonly_args; (void)n_def_args; \
\
/* state size in bytes */ \
state_size_out_var = n_state_out_var * sizeof(mp_obj_t) \
+ n_exc_stack * sizeof(mp_exc_stack_t); \
}
#define INIT_CODESTATE(code_state, _fun_bc, _n_state, n_args, n_kw, args) \
code_state->fun_bc = _fun_bc; \
code_state->n_state = _n_state; \
mp_setup_code_state(code_state, n_args, n_kw, args); \
code_state->old_globals = mp_globals_get();
#if MICROPY_STACKLESS
mp_code_state_t *mp_obj_fun_bc_prepare_codestate(mp_obj_t self_in, size_t n_args, size_t n_kw, const mp_obj_t *args) {
mp_cstack_check();
mp_obj_fun_bc_t *self = MP_OBJ_TO_PTR(self_in);
size_t n_state, state_size;
DECODE_CODESTATE_SIZE(self->bytecode, n_state, state_size);
mp_code_state_t *code_state;
#if MICROPY_ENABLE_PYSTACK
code_state = mp_pystack_alloc(sizeof(mp_code_state_t) + state_size);
#else
// If we use m_new_obj_var(), then on no memory, MemoryError will be
// raised. But this is not correct exception for a function call,
// RuntimeError should be raised instead. So, we use m_new_obj_var_maybe(),
// return NULL, then vm.c takes the needed action (either raise
// RuntimeError or fallback to stack allocation).
code_state = m_new_obj_var_maybe(mp_code_state_t, state, byte, state_size);
if (!code_state) {
return NULL;
}
#endif
INIT_CODESTATE(code_state, self, n_state, n_args, n_kw, args);
// execute the byte code with the correct globals context
py: Rework bytecode and .mpy file format to be mostly static data. Background: .mpy files are precompiled .py files, built using mpy-cross, that contain compiled bytecode functions (and can also contain machine code). The benefit of using an .mpy file over a .py file is that they are faster to import and take less memory when importing. They are also smaller on disk. But the real benefit of .mpy files comes when they are frozen into the firmware. This is done by loading the .mpy file during compilation of the firmware and turning it into a set of big C data structures (the job of mpy-tool.py), which are then compiled and downloaded into the ROM of a device. These C data structures can be executed in-place, ie directly from ROM. This makes importing even faster because there is very little to do, and also means such frozen modules take up much less RAM (because their bytecode stays in ROM). The downside of frozen code is that it requires recompiling and reflashing the entire firmware. This can be a big barrier to entry, slows down development time, and makes it harder to do OTA updates of frozen code (because the whole firmware must be updated). This commit attempts to solve this problem by providing a solution that sits between loading .mpy files into RAM and freezing them into the firmware. The .mpy file format has been reworked so that it consists of data and bytecode which is mostly static and ready to run in-place. If these new .mpy files are located in flash/ROM which is memory addressable, the .mpy file can be executed (mostly) in-place. With this approach there is still a small amount of unpacking and linking of the .mpy file that needs to be done when it&#39;s imported, but it&#39;s still much better than loading an .mpy from disk into RAM (although not as good as freezing .mpy files into the firmware). The main trick to make static .mpy files is to adjust the bytecode so any qstrs that it references now go through a lookup table to convert from local qstr number in the module to global qstr number in the firmware. That means the bytecode does not need linking/rewriting of qstrs when it&#39;s loaded. Instead only a small qstr table needs to be built (and put in RAM) at import time. This means the bytecode itself is static/constant and can be used directly if it&#39;s in addressable memory. Also the qstr string data in the .mpy file, and some constant object data, can be used directly. Note that the qstr table is global to the module (ie not per function). In more detail, in the VM what used to be (schematically): qst = DECODE_QSTR_VALUE; is now (schematically): idx = DECODE_QSTR_INDEX; qst = qstr_table[idx]; That allows the bytecode to be fixed at compile time and not need relinking/rewriting of the qstr values. Only qstr_table needs to be linked when the .mpy is loaded. Incidentally, this helps to reduce the size of bytecode because what used to be 2-byte qstr values in the bytecode are now (mostly) 1-byte indices. If the module uses the same qstr more than two times then the bytecode is smaller than before. The following changes are measured for this commit compared to the previous (the baseline): - average 7%-9% reduction in size of .mpy files - frozen code size is reduced by about 5%-7% - importing .py files uses about 5% less RAM in total - importing .mpy files uses about 4% less RAM in total - importing .py and .mpy files takes about the same time as before The qstr indirection in the bytecode has only a small impact on VM performance. For stm32 on PYBv1.0 the performance change of this commit is: diff of scores (higher is better) N=100 M=100 baseline -&gt; this-commit diff diff% (error%) bm_chaos.py 371.07 -&gt; 357.39 : -13.68 = -3.687% (+/-0.02%) bm_fannkuch.py 78.72 -&gt; 77.49 : -1.23 = -1.563% (+/-0.01%) bm_fft.py 2591.73 -&gt; 2539.28 : -52.45 = -2.024% (+/-0.00%) bm_float.py 6034.93 -&gt; 5908.30 : -126.63 = -2.098% (+/-0.01%) bm_hexiom.py 48.96 -&gt; 47.93 : -1.03 = -2.104% (+/-0.00%) bm_nqueens.py 4510.63 -&gt; 4459.94 : -50.69 = -1.124% (+/-0.00%) bm_pidigits.py 650.28 -&gt; 644.96 : -5.32 = -0.818% (+/-0.23%) core_import_mpy_multi.py 564.77 -&gt; 581.49 : +16.72 = +2.960% (+/-0.01%) core_import_mpy_single.py 68.67 -&gt; 67.16 : -1.51 = -2.199% (+/-0.01%) core_qstr.py 64.16 -&gt; 64.12 : -0.04 = -0.062% (+/-0.00%) core_yield_from.py 362.58 -&gt; 354.50 : -8.08 = -2.228% (+/-0.00%) misc_aes.py 429.69 -&gt; 405.59 : -24.10 = -5.609% (+/-0.01%) misc_mandel.py 3485.13 -&gt; 3416.51 : -68.62 = -1.969% (+/-0.00%) misc_pystone.py 2496.53 -&gt; 2405.56 : -90.97 = -3.644% (+/-0.01%) misc_raytrace.py 381.47 -&gt; 374.01 : -7.46 = -1.956% (+/-0.01%) viper_call0.py 576.73 -&gt; 572.49 : -4.24 = -0.735% (+/-0.04%) viper_call1a.py 550.37 -&gt; 546.21 : -4.16 = -0.756% (+/-0.09%) viper_call1b.py 438.23 -&gt; 435.68 : -2.55 = -0.582% (+/-0.06%) viper_call1c.py 442.84 -&gt; 440.04 : -2.80 = -0.632% (+/-0.08%) viper_call2a.py 536.31 -&gt; 532.35 : -3.96 = -0.738% (+/-0.06%) viper_call2b.py 382.34 -&gt; 377.07 : -5.27 = -1.378% (+/-0.03%) And for unix on x64: diff of scores (higher is better) N=2000 M=2000 baseline -&gt; this-commit diff diff% (error%) bm_chaos.py 13594.20 -&gt; 13073.84 : -520.36 = -3.828% (+/-5.44%) bm_fannkuch.py 60.63 -&gt; 59.58 : -1.05 = -1.732% (+/-3.01%) bm_fft.py 112009.15 -&gt; 111603.32 : -405.83 = -0.362% (+/-4.03%) bm_float.py 246202.55 -&gt; 247923.81 : +1721.26 = +0.699% (+/-2.79%) bm_hexiom.py 615.65 -&gt; 617.21 : +1.56 = +0.253% (+/-1.64%) bm_nqueens.py 215807.95 -&gt; 215600.96 : -206.99 = -0.096% (+/-3.52%) bm_pidigits.py 8246.74 -&gt; 8422.82 : +176.08 = +2.135% (+/-3.64%) misc_aes.py 16133.00 -&gt; 16452.74 : +319.74 = +1.982% (+/-1.50%) misc_mandel.py 128146.69 -&gt; 130796.43 : +2649.74 = +2.068% (+/-3.18%) misc_pystone.py 83811.49 -&gt; 83124.85 : -686.64 = -0.819% (+/-1.03%) misc_raytrace.py 21688.02 -&gt; 21385.10 : -302.92 = -1.397% (+/-3.20%) The code size change is (firmware with a lot of frozen code benefits the most): bare-arm: +396 +0.697% minimal x86: +1595 +0.979% [incl +32(data)] unix x64: +2408 +0.470% [incl +800(data)] unix nanbox: +1396 +0.309% [incl -96(data)] stm32: -1256 -0.318% PYBV10 cc3200: +288 +0.157% esp8266: -260 -0.037% GENERIC esp32: -216 -0.014% GENERIC[incl -1072(data)] nrf: +116 +0.067% pca10040 rp2: -664 -0.135% PICO samd: +844 +0.607% ADAFRUIT_ITSYBITSY_M4_EXPRESS As part of this change the .mpy file format version is bumped to version 6. And mpy-tool.py has been improved to provide a good visualisation of the contents of .mpy files. In summary: this commit changes the bytecode to use qstr indirection, and reworks the .mpy file format to be simpler and allow .mpy files to be executed in-place. Performance is not impacted too much. Eventually it will be possible to store such .mpy files in a linear, read-only, memory- mappable filesystem so they can be executed from flash/ROM. This will essentially be able to replace frozen code for most applications. Signed-off-by: Damien George &lt;damien@micropython.org&gt;
3 years ago
mp_globals_set(self->context->module.globals);
return code_state;
}
#endif
all: Remove the &#34;STATIC&#34; macro and just use &#34;static&#34; instead. The STATIC macro was introduced a very long time ago in commit d5df6cd44a433d6253a61cb0f987835fbc06b2de. The original reason for this was to have the option to define it to nothing so that all static functions become global functions and therefore visible to certain debug tools, so one could do function size comparison and other things. This STATIC feature is rarely (if ever) used. And with the use of LTO and heavy inline optimisation, analysing the size of individual functions when they are not static is not a good representation of the size of code when fully optimised. So the macro does not have much use and it&#39;s simpler to just remove it. Then you know exactly what it&#39;s doing. For example, newcomers don&#39;t have to learn what the STATIC macro is and why it exists. Reading the code is also less &#34;loud&#34; with a lowercase static. One other minor point in favour of removing it, is that it stops bugs with `STATIC inline`, which should always be `static inline`. Methodology for this commit was: 1) git ls-files | egrep &#39;\.[ch]$&#39; | \ xargs sed -Ei &#34;s/(^| )STATIC($| )/\1static\2/&#34; 2) Do some manual cleanup in the diff by searching for the word STATIC in comments and changing those back. 3) &#34;git-grep STATIC docs/&#34;, manually fixed those cases. 4) &#34;rg -t python STATIC&#34;, manually fixed codegen lines that used STATIC. This work was funded through GitHub Sponsors. Signed-off-by: Angus Gratton &lt;angus@redyak.com.au&gt;
8 months ago
static mp_obj_t fun_bc_call(mp_obj_t self_in, size_t n_args, size_t n_kw, const mp_obj_t *args) {
mp_cstack_check();
DEBUG_printf("Input n_args: " UINT_FMT ", n_kw: " UINT_FMT "\n", n_args, n_kw);
DEBUG_printf("Input pos args: ");
dump_args(args, n_args);
DEBUG_printf("Input kw args: ");
dump_args(args + n_args, n_kw * 2);
mp_obj_fun_bc_t *self = MP_OBJ_TO_PTR(self_in);
size_t n_state, state_size;
DECODE_CODESTATE_SIZE(self->bytecode, n_state, state_size);
// allocate state for locals and stack
mp_code_state_t *code_state = NULL;
#if MICROPY_ENABLE_PYSTACK
code_state = mp_pystack_alloc(offsetof(mp_code_state_t, state) + state_size);
#else
if (state_size > VM_MAX_STATE_ON_STACK) {
code_state = m_new_obj_var_maybe(mp_code_state_t, state, byte, state_size);
#if MICROPY_DEBUG_VM_STACK_OVERFLOW
if (code_state != NULL) {
memset(code_state->state, 0, state_size);
}
#endif
}
if (code_state == NULL) {
code_state = alloca(offsetof(mp_code_state_t, state) + state_size);
#if MICROPY_DEBUG_VM_STACK_OVERFLOW
memset(code_state->state, 0, state_size);
#endif
state_size = 0; // indicate that we allocated using alloca
}
#endif
INIT_CODESTATE(code_state, self, n_state, n_args, n_kw, args);
// execute the byte code with the correct globals context
py: Rework bytecode and .mpy file format to be mostly static data. Background: .mpy files are precompiled .py files, built using mpy-cross, that contain compiled bytecode functions (and can also contain machine code). The benefit of using an .mpy file over a .py file is that they are faster to import and take less memory when importing. They are also smaller on disk. But the real benefit of .mpy files comes when they are frozen into the firmware. This is done by loading the .mpy file during compilation of the firmware and turning it into a set of big C data structures (the job of mpy-tool.py), which are then compiled and downloaded into the ROM of a device. These C data structures can be executed in-place, ie directly from ROM. This makes importing even faster because there is very little to do, and also means such frozen modules take up much less RAM (because their bytecode stays in ROM). The downside of frozen code is that it requires recompiling and reflashing the entire firmware. This can be a big barrier to entry, slows down development time, and makes it harder to do OTA updates of frozen code (because the whole firmware must be updated). This commit attempts to solve this problem by providing a solution that sits between loading .mpy files into RAM and freezing them into the firmware. The .mpy file format has been reworked so that it consists of data and bytecode which is mostly static and ready to run in-place. If these new .mpy files are located in flash/ROM which is memory addressable, the .mpy file can be executed (mostly) in-place. With this approach there is still a small amount of unpacking and linking of the .mpy file that needs to be done when it&#39;s imported, but it&#39;s still much better than loading an .mpy from disk into RAM (although not as good as freezing .mpy files into the firmware). The main trick to make static .mpy files is to adjust the bytecode so any qstrs that it references now go through a lookup table to convert from local qstr number in the module to global qstr number in the firmware. That means the bytecode does not need linking/rewriting of qstrs when it&#39;s loaded. Instead only a small qstr table needs to be built (and put in RAM) at import time. This means the bytecode itself is static/constant and can be used directly if it&#39;s in addressable memory. Also the qstr string data in the .mpy file, and some constant object data, can be used directly. Note that the qstr table is global to the module (ie not per function). In more detail, in the VM what used to be (schematically): qst = DECODE_QSTR_VALUE; is now (schematically): idx = DECODE_QSTR_INDEX; qst = qstr_table[idx]; That allows the bytecode to be fixed at compile time and not need relinking/rewriting of the qstr values. Only qstr_table needs to be linked when the .mpy is loaded. Incidentally, this helps to reduce the size of bytecode because what used to be 2-byte qstr values in the bytecode are now (mostly) 1-byte indices. If the module uses the same qstr more than two times then the bytecode is smaller than before. The following changes are measured for this commit compared to the previous (the baseline): - average 7%-9% reduction in size of .mpy files - frozen code size is reduced by about 5%-7% - importing .py files uses about 5% less RAM in total - importing .mpy files uses about 4% less RAM in total - importing .py and .mpy files takes about the same time as before The qstr indirection in the bytecode has only a small impact on VM performance. For stm32 on PYBv1.0 the performance change of this commit is: diff of scores (higher is better) N=100 M=100 baseline -&gt; this-commit diff diff% (error%) bm_chaos.py 371.07 -&gt; 357.39 : -13.68 = -3.687% (+/-0.02%) bm_fannkuch.py 78.72 -&gt; 77.49 : -1.23 = -1.563% (+/-0.01%) bm_fft.py 2591.73 -&gt; 2539.28 : -52.45 = -2.024% (+/-0.00%) bm_float.py 6034.93 -&gt; 5908.30 : -126.63 = -2.098% (+/-0.01%) bm_hexiom.py 48.96 -&gt; 47.93 : -1.03 = -2.104% (+/-0.00%) bm_nqueens.py 4510.63 -&gt; 4459.94 : -50.69 = -1.124% (+/-0.00%) bm_pidigits.py 650.28 -&gt; 644.96 : -5.32 = -0.818% (+/-0.23%) core_import_mpy_multi.py 564.77 -&gt; 581.49 : +16.72 = +2.960% (+/-0.01%) core_import_mpy_single.py 68.67 -&gt; 67.16 : -1.51 = -2.199% (+/-0.01%) core_qstr.py 64.16 -&gt; 64.12 : -0.04 = -0.062% (+/-0.00%) core_yield_from.py 362.58 -&gt; 354.50 : -8.08 = -2.228% (+/-0.00%) misc_aes.py 429.69 -&gt; 405.59 : -24.10 = -5.609% (+/-0.01%) misc_mandel.py 3485.13 -&gt; 3416.51 : -68.62 = -1.969% (+/-0.00%) misc_pystone.py 2496.53 -&gt; 2405.56 : -90.97 = -3.644% (+/-0.01%) misc_raytrace.py 381.47 -&gt; 374.01 : -7.46 = -1.956% (+/-0.01%) viper_call0.py 576.73 -&gt; 572.49 : -4.24 = -0.735% (+/-0.04%) viper_call1a.py 550.37 -&gt; 546.21 : -4.16 = -0.756% (+/-0.09%) viper_call1b.py 438.23 -&gt; 435.68 : -2.55 = -0.582% (+/-0.06%) viper_call1c.py 442.84 -&gt; 440.04 : -2.80 = -0.632% (+/-0.08%) viper_call2a.py 536.31 -&gt; 532.35 : -3.96 = -0.738% (+/-0.06%) viper_call2b.py 382.34 -&gt; 377.07 : -5.27 = -1.378% (+/-0.03%) And for unix on x64: diff of scores (higher is better) N=2000 M=2000 baseline -&gt; this-commit diff diff% (error%) bm_chaos.py 13594.20 -&gt; 13073.84 : -520.36 = -3.828% (+/-5.44%) bm_fannkuch.py 60.63 -&gt; 59.58 : -1.05 = -1.732% (+/-3.01%) bm_fft.py 112009.15 -&gt; 111603.32 : -405.83 = -0.362% (+/-4.03%) bm_float.py 246202.55 -&gt; 247923.81 : +1721.26 = +0.699% (+/-2.79%) bm_hexiom.py 615.65 -&gt; 617.21 : +1.56 = +0.253% (+/-1.64%) bm_nqueens.py 215807.95 -&gt; 215600.96 : -206.99 = -0.096% (+/-3.52%) bm_pidigits.py 8246.74 -&gt; 8422.82 : +176.08 = +2.135% (+/-3.64%) misc_aes.py 16133.00 -&gt; 16452.74 : +319.74 = +1.982% (+/-1.50%) misc_mandel.py 128146.69 -&gt; 130796.43 : +2649.74 = +2.068% (+/-3.18%) misc_pystone.py 83811.49 -&gt; 83124.85 : -686.64 = -0.819% (+/-1.03%) misc_raytrace.py 21688.02 -&gt; 21385.10 : -302.92 = -1.397% (+/-3.20%) The code size change is (firmware with a lot of frozen code benefits the most): bare-arm: +396 +0.697% minimal x86: +1595 +0.979% [incl +32(data)] unix x64: +2408 +0.470% [incl +800(data)] unix nanbox: +1396 +0.309% [incl -96(data)] stm32: -1256 -0.318% PYBV10 cc3200: +288 +0.157% esp8266: -260 -0.037% GENERIC esp32: -216 -0.014% GENERIC[incl -1072(data)] nrf: +116 +0.067% pca10040 rp2: -664 -0.135% PICO samd: +844 +0.607% ADAFRUIT_ITSYBITSY_M4_EXPRESS As part of this change the .mpy file format version is bumped to version 6. And mpy-tool.py has been improved to provide a good visualisation of the contents of .mpy files. In summary: this commit changes the bytecode to use qstr indirection, and reworks the .mpy file format to be simpler and allow .mpy files to be executed in-place. Performance is not impacted too much. Eventually it will be possible to store such .mpy files in a linear, read-only, memory- mappable filesystem so they can be executed from flash/ROM. This will essentially be able to replace frozen code for most applications. Signed-off-by: Damien George &lt;damien@micropython.org&gt;
3 years ago
mp_globals_set(self->context->module.globals);
mp_vm_return_kind_t vm_return_kind = mp_execute_bytecode(code_state, MP_OBJ_NULL);
mp_globals_set(code_state->old_globals);
#if MICROPY_DEBUG_VM_STACK_OVERFLOW
if (vm_return_kind == MP_VM_RETURN_NORMAL) {
if (code_state->sp < code_state->state) {
mp_printf(MICROPY_DEBUG_PRINTER, "VM stack underflow: " INT_FMT "\n", code_state->sp - code_state->state);
assert(0);
}
}
const byte *bytecode_ptr = self->bytecode;
size_t n_state_unused, n_exc_stack_unused, scope_flags_unused;
size_t n_pos_args, n_kwonly_args, n_def_args_unused;
MP_BC_PRELUDE_SIG_DECODE_INTO(bytecode_ptr, n_state_unused, n_exc_stack_unused,
scope_flags_unused, n_pos_args, n_kwonly_args, n_def_args_unused);
// We can't check the case when an exception is returned in state[0]
// and there are no arguments, because in this case our detection slot may have
// been overwritten by the returned exception (which is allowed).
if (!(vm_return_kind == MP_VM_RETURN_EXCEPTION && n_pos_args + n_kwonly_args == 0)) {
// Just check to see that we have at least 1 null object left in the state.
bool overflow = true;
for (size_t i = 0; i < n_state - n_pos_args - n_kwonly_args; ++i) {
if (code_state->state[i] == MP_OBJ_NULL) {
overflow = false;
break;
}
}
if (overflow) {
mp_printf(MICROPY_DEBUG_PRINTER, "VM stack overflow state=%p n_state+1=" UINT_FMT "\n", code_state->state, n_state);
assert(0);
}
}
#endif
mp_obj_t result;
if (vm_return_kind == MP_VM_RETURN_NORMAL) {
// return value is in *sp
result = *code_state->sp;
} else {
// must be an exception because normal functions can't yield
assert(vm_return_kind == MP_VM_RETURN_EXCEPTION);
// returned exception is in state[0]
result = code_state->state[0];
}
#if MICROPY_ENABLE_PYSTACK
mp_pystack_free(code_state);
#else
// free the state if it was allocated on the heap
if (state_size != 0) {
m_del_var(mp_code_state_t, state, byte, state_size, code_state);
}
#endif
if (vm_return_kind == MP_VM_RETURN_NORMAL) {
return result;
} else { // MP_VM_RETURN_EXCEPTION
nlr_raise(result);
}
}
#if MICROPY_PY_FUNCTION_ATTRS
void mp_obj_fun_bc_attr(mp_obj_t self_in, qstr attr, mp_obj_t *dest) {
if (dest[0] != MP_OBJ_NULL) {
// not load attribute
return;
}
if (attr == MP_QSTR___name__) {
dest[0] = MP_OBJ_NEW_QSTR(mp_obj_fun_get_name(self_in));
}
if (attr == MP_QSTR___globals__) {
mp_obj_fun_bc_t *self = MP_OBJ_TO_PTR(self_in);
py: Rework bytecode and .mpy file format to be mostly static data. Background: .mpy files are precompiled .py files, built using mpy-cross, that contain compiled bytecode functions (and can also contain machine code). The benefit of using an .mpy file over a .py file is that they are faster to import and take less memory when importing. They are also smaller on disk. But the real benefit of .mpy files comes when they are frozen into the firmware. This is done by loading the .mpy file during compilation of the firmware and turning it into a set of big C data structures (the job of mpy-tool.py), which are then compiled and downloaded into the ROM of a device. These C data structures can be executed in-place, ie directly from ROM. This makes importing even faster because there is very little to do, and also means such frozen modules take up much less RAM (because their bytecode stays in ROM). The downside of frozen code is that it requires recompiling and reflashing the entire firmware. This can be a big barrier to entry, slows down development time, and makes it harder to do OTA updates of frozen code (because the whole firmware must be updated). This commit attempts to solve this problem by providing a solution that sits between loading .mpy files into RAM and freezing them into the firmware. The .mpy file format has been reworked so that it consists of data and bytecode which is mostly static and ready to run in-place. If these new .mpy files are located in flash/ROM which is memory addressable, the .mpy file can be executed (mostly) in-place. With this approach there is still a small amount of unpacking and linking of the .mpy file that needs to be done when it&#39;s imported, but it&#39;s still much better than loading an .mpy from disk into RAM (although not as good as freezing .mpy files into the firmware). The main trick to make static .mpy files is to adjust the bytecode so any qstrs that it references now go through a lookup table to convert from local qstr number in the module to global qstr number in the firmware. That means the bytecode does not need linking/rewriting of qstrs when it&#39;s loaded. Instead only a small qstr table needs to be built (and put in RAM) at import time. This means the bytecode itself is static/constant and can be used directly if it&#39;s in addressable memory. Also the qstr string data in the .mpy file, and some constant object data, can be used directly. Note that the qstr table is global to the module (ie not per function). In more detail, in the VM what used to be (schematically): qst = DECODE_QSTR_VALUE; is now (schematically): idx = DECODE_QSTR_INDEX; qst = qstr_table[idx]; That allows the bytecode to be fixed at compile time and not need relinking/rewriting of the qstr values. Only qstr_table needs to be linked when the .mpy is loaded. Incidentally, this helps to reduce the size of bytecode because what used to be 2-byte qstr values in the bytecode are now (mostly) 1-byte indices. If the module uses the same qstr more than two times then the bytecode is smaller than before. The following changes are measured for this commit compared to the previous (the baseline): - average 7%-9% reduction in size of .mpy files - frozen code size is reduced by about 5%-7% - importing .py files uses about 5% less RAM in total - importing .mpy files uses about 4% less RAM in total - importing .py and .mpy files takes about the same time as before The qstr indirection in the bytecode has only a small impact on VM performance. For stm32 on PYBv1.0 the performance change of this commit is: diff of scores (higher is better) N=100 M=100 baseline -&gt; this-commit diff diff% (error%) bm_chaos.py 371.07 -&gt; 357.39 : -13.68 = -3.687% (+/-0.02%) bm_fannkuch.py 78.72 -&gt; 77.49 : -1.23 = -1.563% (+/-0.01%) bm_fft.py 2591.73 -&gt; 2539.28 : -52.45 = -2.024% (+/-0.00%) bm_float.py 6034.93 -&gt; 5908.30 : -126.63 = -2.098% (+/-0.01%) bm_hexiom.py 48.96 -&gt; 47.93 : -1.03 = -2.104% (+/-0.00%) bm_nqueens.py 4510.63 -&gt; 4459.94 : -50.69 = -1.124% (+/-0.00%) bm_pidigits.py 650.28 -&gt; 644.96 : -5.32 = -0.818% (+/-0.23%) core_import_mpy_multi.py 564.77 -&gt; 581.49 : +16.72 = +2.960% (+/-0.01%) core_import_mpy_single.py 68.67 -&gt; 67.16 : -1.51 = -2.199% (+/-0.01%) core_qstr.py 64.16 -&gt; 64.12 : -0.04 = -0.062% (+/-0.00%) core_yield_from.py 362.58 -&gt; 354.50 : -8.08 = -2.228% (+/-0.00%) misc_aes.py 429.69 -&gt; 405.59 : -24.10 = -5.609% (+/-0.01%) misc_mandel.py 3485.13 -&gt; 3416.51 : -68.62 = -1.969% (+/-0.00%) misc_pystone.py 2496.53 -&gt; 2405.56 : -90.97 = -3.644% (+/-0.01%) misc_raytrace.py 381.47 -&gt; 374.01 : -7.46 = -1.956% (+/-0.01%) viper_call0.py 576.73 -&gt; 572.49 : -4.24 = -0.735% (+/-0.04%) viper_call1a.py 550.37 -&gt; 546.21 : -4.16 = -0.756% (+/-0.09%) viper_call1b.py 438.23 -&gt; 435.68 : -2.55 = -0.582% (+/-0.06%) viper_call1c.py 442.84 -&gt; 440.04 : -2.80 = -0.632% (+/-0.08%) viper_call2a.py 536.31 -&gt; 532.35 : -3.96 = -0.738% (+/-0.06%) viper_call2b.py 382.34 -&gt; 377.07 : -5.27 = -1.378% (+/-0.03%) And for unix on x64: diff of scores (higher is better) N=2000 M=2000 baseline -&gt; this-commit diff diff% (error%) bm_chaos.py 13594.20 -&gt; 13073.84 : -520.36 = -3.828% (+/-5.44%) bm_fannkuch.py 60.63 -&gt; 59.58 : -1.05 = -1.732% (+/-3.01%) bm_fft.py 112009.15 -&gt; 111603.32 : -405.83 = -0.362% (+/-4.03%) bm_float.py 246202.55 -&gt; 247923.81 : +1721.26 = +0.699% (+/-2.79%) bm_hexiom.py 615.65 -&gt; 617.21 : +1.56 = +0.253% (+/-1.64%) bm_nqueens.py 215807.95 -&gt; 215600.96 : -206.99 = -0.096% (+/-3.52%) bm_pidigits.py 8246.74 -&gt; 8422.82 : +176.08 = +2.135% (+/-3.64%) misc_aes.py 16133.00 -&gt; 16452.74 : +319.74 = +1.982% (+/-1.50%) misc_mandel.py 128146.69 -&gt; 130796.43 : +2649.74 = +2.068% (+/-3.18%) misc_pystone.py 83811.49 -&gt; 83124.85 : -686.64 = -0.819% (+/-1.03%) misc_raytrace.py 21688.02 -&gt; 21385.10 : -302.92 = -1.397% (+/-3.20%) The code size change is (firmware with a lot of frozen code benefits the most): bare-arm: +396 +0.697% minimal x86: +1595 +0.979% [incl +32(data)] unix x64: +2408 +0.470% [incl +800(data)] unix nanbox: +1396 +0.309% [incl -96(data)] stm32: -1256 -0.318% PYBV10 cc3200: +288 +0.157% esp8266: -260 -0.037% GENERIC esp32: -216 -0.014% GENERIC[incl -1072(data)] nrf: +116 +0.067% pca10040 rp2: -664 -0.135% PICO samd: +844 +0.607% ADAFRUIT_ITSYBITSY_M4_EXPRESS As part of this change the .mpy file format version is bumped to version 6. And mpy-tool.py has been improved to provide a good visualisation of the contents of .mpy files. In summary: this commit changes the bytecode to use qstr indirection, and reworks the .mpy file format to be simpler and allow .mpy files to be executed in-place. Performance is not impacted too much. Eventually it will be possible to store such .mpy files in a linear, read-only, memory- mappable filesystem so they can be executed from flash/ROM. This will essentially be able to replace frozen code for most applications. Signed-off-by: Damien George &lt;damien@micropython.org&gt;
3 years ago
dest[0] = MP_OBJ_FROM_PTR(self->context->module.globals);
}
}
#endif
#if MICROPY_CPYTHON_COMPAT
#define FUN_BC_TYPE_PRINT print, fun_bc_print,
#else
#define FUN_BC_TYPE_PRINT
#endif
#if MICROPY_PY_FUNCTION_ATTRS
#define FUN_BC_TYPE_ATTR attr, mp_obj_fun_bc_attr,
#else
#define FUN_BC_TYPE_ATTR
#endif
MP_DEFINE_CONST_OBJ_TYPE(
mp_type_fun_bc,
MP_QSTR_function,
MP_TYPE_FLAG_BINDS_SELF,
FUN_BC_TYPE_PRINT
FUN_BC_TYPE_ATTR
call, fun_bc_call
);
py: Rework bytecode and .mpy file format to be mostly static data. Background: .mpy files are precompiled .py files, built using mpy-cross, that contain compiled bytecode functions (and can also contain machine code). The benefit of using an .mpy file over a .py file is that they are faster to import and take less memory when importing. They are also smaller on disk. But the real benefit of .mpy files comes when they are frozen into the firmware. This is done by loading the .mpy file during compilation of the firmware and turning it into a set of big C data structures (the job of mpy-tool.py), which are then compiled and downloaded into the ROM of a device. These C data structures can be executed in-place, ie directly from ROM. This makes importing even faster because there is very little to do, and also means such frozen modules take up much less RAM (because their bytecode stays in ROM). The downside of frozen code is that it requires recompiling and reflashing the entire firmware. This can be a big barrier to entry, slows down development time, and makes it harder to do OTA updates of frozen code (because the whole firmware must be updated). This commit attempts to solve this problem by providing a solution that sits between loading .mpy files into RAM and freezing them into the firmware. The .mpy file format has been reworked so that it consists of data and bytecode which is mostly static and ready to run in-place. If these new .mpy files are located in flash/ROM which is memory addressable, the .mpy file can be executed (mostly) in-place. With this approach there is still a small amount of unpacking and linking of the .mpy file that needs to be done when it&#39;s imported, but it&#39;s still much better than loading an .mpy from disk into RAM (although not as good as freezing .mpy files into the firmware). The main trick to make static .mpy files is to adjust the bytecode so any qstrs that it references now go through a lookup table to convert from local qstr number in the module to global qstr number in the firmware. That means the bytecode does not need linking/rewriting of qstrs when it&#39;s loaded. Instead only a small qstr table needs to be built (and put in RAM) at import time. This means the bytecode itself is static/constant and can be used directly if it&#39;s in addressable memory. Also the qstr string data in the .mpy file, and some constant object data, can be used directly. Note that the qstr table is global to the module (ie not per function). In more detail, in the VM what used to be (schematically): qst = DECODE_QSTR_VALUE; is now (schematically): idx = DECODE_QSTR_INDEX; qst = qstr_table[idx]; That allows the bytecode to be fixed at compile time and not need relinking/rewriting of the qstr values. Only qstr_table needs to be linked when the .mpy is loaded. Incidentally, this helps to reduce the size of bytecode because what used to be 2-byte qstr values in the bytecode are now (mostly) 1-byte indices. If the module uses the same qstr more than two times then the bytecode is smaller than before. The following changes are measured for this commit compared to the previous (the baseline): - average 7%-9% reduction in size of .mpy files - frozen code size is reduced by about 5%-7% - importing .py files uses about 5% less RAM in total - importing .mpy files uses about 4% less RAM in total - importing .py and .mpy files takes about the same time as before The qstr indirection in the bytecode has only a small impact on VM performance. For stm32 on PYBv1.0 the performance change of this commit is: diff of scores (higher is better) N=100 M=100 baseline -&gt; this-commit diff diff% (error%) bm_chaos.py 371.07 -&gt; 357.39 : -13.68 = -3.687% (+/-0.02%) bm_fannkuch.py 78.72 -&gt; 77.49 : -1.23 = -1.563% (+/-0.01%) bm_fft.py 2591.73 -&gt; 2539.28 : -52.45 = -2.024% (+/-0.00%) bm_float.py 6034.93 -&gt; 5908.30 : -126.63 = -2.098% (+/-0.01%) bm_hexiom.py 48.96 -&gt; 47.93 : -1.03 = -2.104% (+/-0.00%) bm_nqueens.py 4510.63 -&gt; 4459.94 : -50.69 = -1.124% (+/-0.00%) bm_pidigits.py 650.28 -&gt; 644.96 : -5.32 = -0.818% (+/-0.23%) core_import_mpy_multi.py 564.77 -&gt; 581.49 : +16.72 = +2.960% (+/-0.01%) core_import_mpy_single.py 68.67 -&gt; 67.16 : -1.51 = -2.199% (+/-0.01%) core_qstr.py 64.16 -&gt; 64.12 : -0.04 = -0.062% (+/-0.00%) core_yield_from.py 362.58 -&gt; 354.50 : -8.08 = -2.228% (+/-0.00%) misc_aes.py 429.69 -&gt; 405.59 : -24.10 = -5.609% (+/-0.01%) misc_mandel.py 3485.13 -&gt; 3416.51 : -68.62 = -1.969% (+/-0.00%) misc_pystone.py 2496.53 -&gt; 2405.56 : -90.97 = -3.644% (+/-0.01%) misc_raytrace.py 381.47 -&gt; 374.01 : -7.46 = -1.956% (+/-0.01%) viper_call0.py 576.73 -&gt; 572.49 : -4.24 = -0.735% (+/-0.04%) viper_call1a.py 550.37 -&gt; 546.21 : -4.16 = -0.756% (+/-0.09%) viper_call1b.py 438.23 -&gt; 435.68 : -2.55 = -0.582% (+/-0.06%) viper_call1c.py 442.84 -&gt; 440.04 : -2.80 = -0.632% (+/-0.08%) viper_call2a.py 536.31 -&gt; 532.35 : -3.96 = -0.738% (+/-0.06%) viper_call2b.py 382.34 -&gt; 377.07 : -5.27 = -1.378% (+/-0.03%) And for unix on x64: diff of scores (higher is better) N=2000 M=2000 baseline -&gt; this-commit diff diff% (error%) bm_chaos.py 13594.20 -&gt; 13073.84 : -520.36 = -3.828% (+/-5.44%) bm_fannkuch.py 60.63 -&gt; 59.58 : -1.05 = -1.732% (+/-3.01%) bm_fft.py 112009.15 -&gt; 111603.32 : -405.83 = -0.362% (+/-4.03%) bm_float.py 246202.55 -&gt; 247923.81 : +1721.26 = +0.699% (+/-2.79%) bm_hexiom.py 615.65 -&gt; 617.21 : +1.56 = +0.253% (+/-1.64%) bm_nqueens.py 215807.95 -&gt; 215600.96 : -206.99 = -0.096% (+/-3.52%) bm_pidigits.py 8246.74 -&gt; 8422.82 : +176.08 = +2.135% (+/-3.64%) misc_aes.py 16133.00 -&gt; 16452.74 : +319.74 = +1.982% (+/-1.50%) misc_mandel.py 128146.69 -&gt; 130796.43 : +2649.74 = +2.068% (+/-3.18%) misc_pystone.py 83811.49 -&gt; 83124.85 : -686.64 = -0.819% (+/-1.03%) misc_raytrace.py 21688.02 -&gt; 21385.10 : -302.92 = -1.397% (+/-3.20%) The code size change is (firmware with a lot of frozen code benefits the most): bare-arm: +396 +0.697% minimal x86: +1595 +0.979% [incl +32(data)] unix x64: +2408 +0.470% [incl +800(data)] unix nanbox: +1396 +0.309% [incl -96(data)] stm32: -1256 -0.318% PYBV10 cc3200: +288 +0.157% esp8266: -260 -0.037% GENERIC esp32: -216 -0.014% GENERIC[incl -1072(data)] nrf: +116 +0.067% pca10040 rp2: -664 -0.135% PICO samd: +844 +0.607% ADAFRUIT_ITSYBITSY_M4_EXPRESS As part of this change the .mpy file format version is bumped to version 6. And mpy-tool.py has been improved to provide a good visualisation of the contents of .mpy files. In summary: this commit changes the bytecode to use qstr indirection, and reworks the .mpy file format to be simpler and allow .mpy files to be executed in-place. Performance is not impacted too much. Eventually it will be possible to store such .mpy files in a linear, read-only, memory- mappable filesystem so they can be executed from flash/ROM. This will essentially be able to replace frozen code for most applications. Signed-off-by: Damien George &lt;damien@micropython.org&gt;
3 years ago
mp_obj_t mp_obj_new_fun_bc(const mp_obj_t *def_args, const byte *code, const mp_module_context_t *context, struct _mp_raw_code_t *const *child_table) {
size_t n_def_args = 0;
size_t n_extra_args = 0;
py: Rework bytecode and .mpy file format to be mostly static data. Background: .mpy files are precompiled .py files, built using mpy-cross, that contain compiled bytecode functions (and can also contain machine code). The benefit of using an .mpy file over a .py file is that they are faster to import and take less memory when importing. They are also smaller on disk. But the real benefit of .mpy files comes when they are frozen into the firmware. This is done by loading the .mpy file during compilation of the firmware and turning it into a set of big C data structures (the job of mpy-tool.py), which are then compiled and downloaded into the ROM of a device. These C data structures can be executed in-place, ie directly from ROM. This makes importing even faster because there is very little to do, and also means such frozen modules take up much less RAM (because their bytecode stays in ROM). The downside of frozen code is that it requires recompiling and reflashing the entire firmware. This can be a big barrier to entry, slows down development time, and makes it harder to do OTA updates of frozen code (because the whole firmware must be updated). This commit attempts to solve this problem by providing a solution that sits between loading .mpy files into RAM and freezing them into the firmware. The .mpy file format has been reworked so that it consists of data and bytecode which is mostly static and ready to run in-place. If these new .mpy files are located in flash/ROM which is memory addressable, the .mpy file can be executed (mostly) in-place. With this approach there is still a small amount of unpacking and linking of the .mpy file that needs to be done when it&#39;s imported, but it&#39;s still much better than loading an .mpy from disk into RAM (although not as good as freezing .mpy files into the firmware). The main trick to make static .mpy files is to adjust the bytecode so any qstrs that it references now go through a lookup table to convert from local qstr number in the module to global qstr number in the firmware. That means the bytecode does not need linking/rewriting of qstrs when it&#39;s loaded. Instead only a small qstr table needs to be built (and put in RAM) at import time. This means the bytecode itself is static/constant and can be used directly if it&#39;s in addressable memory. Also the qstr string data in the .mpy file, and some constant object data, can be used directly. Note that the qstr table is global to the module (ie not per function). In more detail, in the VM what used to be (schematically): qst = DECODE_QSTR_VALUE; is now (schematically): idx = DECODE_QSTR_INDEX; qst = qstr_table[idx]; That allows the bytecode to be fixed at compile time and not need relinking/rewriting of the qstr values. Only qstr_table needs to be linked when the .mpy is loaded. Incidentally, this helps to reduce the size of bytecode because what used to be 2-byte qstr values in the bytecode are now (mostly) 1-byte indices. If the module uses the same qstr more than two times then the bytecode is smaller than before. The following changes are measured for this commit compared to the previous (the baseline): - average 7%-9% reduction in size of .mpy files - frozen code size is reduced by about 5%-7% - importing .py files uses about 5% less RAM in total - importing .mpy files uses about 4% less RAM in total - importing .py and .mpy files takes about the same time as before The qstr indirection in the bytecode has only a small impact on VM performance. For stm32 on PYBv1.0 the performance change of this commit is: diff of scores (higher is better) N=100 M=100 baseline -&gt; this-commit diff diff% (error%) bm_chaos.py 371.07 -&gt; 357.39 : -13.68 = -3.687% (+/-0.02%) bm_fannkuch.py 78.72 -&gt; 77.49 : -1.23 = -1.563% (+/-0.01%) bm_fft.py 2591.73 -&gt; 2539.28 : -52.45 = -2.024% (+/-0.00%) bm_float.py 6034.93 -&gt; 5908.30 : -126.63 = -2.098% (+/-0.01%) bm_hexiom.py 48.96 -&gt; 47.93 : -1.03 = -2.104% (+/-0.00%) bm_nqueens.py 4510.63 -&gt; 4459.94 : -50.69 = -1.124% (+/-0.00%) bm_pidigits.py 650.28 -&gt; 644.96 : -5.32 = -0.818% (+/-0.23%) core_import_mpy_multi.py 564.77 -&gt; 581.49 : +16.72 = +2.960% (+/-0.01%) core_import_mpy_single.py 68.67 -&gt; 67.16 : -1.51 = -2.199% (+/-0.01%) core_qstr.py 64.16 -&gt; 64.12 : -0.04 = -0.062% (+/-0.00%) core_yield_from.py 362.58 -&gt; 354.50 : -8.08 = -2.228% (+/-0.00%) misc_aes.py 429.69 -&gt; 405.59 : -24.10 = -5.609% (+/-0.01%) misc_mandel.py 3485.13 -&gt; 3416.51 : -68.62 = -1.969% (+/-0.00%) misc_pystone.py 2496.53 -&gt; 2405.56 : -90.97 = -3.644% (+/-0.01%) misc_raytrace.py 381.47 -&gt; 374.01 : -7.46 = -1.956% (+/-0.01%) viper_call0.py 576.73 -&gt; 572.49 : -4.24 = -0.735% (+/-0.04%) viper_call1a.py 550.37 -&gt; 546.21 : -4.16 = -0.756% (+/-0.09%) viper_call1b.py 438.23 -&gt; 435.68 : -2.55 = -0.582% (+/-0.06%) viper_call1c.py 442.84 -&gt; 440.04 : -2.80 = -0.632% (+/-0.08%) viper_call2a.py 536.31 -&gt; 532.35 : -3.96 = -0.738% (+/-0.06%) viper_call2b.py 382.34 -&gt; 377.07 : -5.27 = -1.378% (+/-0.03%) And for unix on x64: diff of scores (higher is better) N=2000 M=2000 baseline -&gt; this-commit diff diff% (error%) bm_chaos.py 13594.20 -&gt; 13073.84 : -520.36 = -3.828% (+/-5.44%) bm_fannkuch.py 60.63 -&gt; 59.58 : -1.05 = -1.732% (+/-3.01%) bm_fft.py 112009.15 -&gt; 111603.32 : -405.83 = -0.362% (+/-4.03%) bm_float.py 246202.55 -&gt; 247923.81 : +1721.26 = +0.699% (+/-2.79%) bm_hexiom.py 615.65 -&gt; 617.21 : +1.56 = +0.253% (+/-1.64%) bm_nqueens.py 215807.95 -&gt; 215600.96 : -206.99 = -0.096% (+/-3.52%) bm_pidigits.py 8246.74 -&gt; 8422.82 : +176.08 = +2.135% (+/-3.64%) misc_aes.py 16133.00 -&gt; 16452.74 : +319.74 = +1.982% (+/-1.50%) misc_mandel.py 128146.69 -&gt; 130796.43 : +2649.74 = +2.068% (+/-3.18%) misc_pystone.py 83811.49 -&gt; 83124.85 : -686.64 = -0.819% (+/-1.03%) misc_raytrace.py 21688.02 -&gt; 21385.10 : -302.92 = -1.397% (+/-3.20%) The code size change is (firmware with a lot of frozen code benefits the most): bare-arm: +396 +0.697% minimal x86: +1595 +0.979% [incl +32(data)] unix x64: +2408 +0.470% [incl +800(data)] unix nanbox: +1396 +0.309% [incl -96(data)] stm32: -1256 -0.318% PYBV10 cc3200: +288 +0.157% esp8266: -260 -0.037% GENERIC esp32: -216 -0.014% GENERIC[incl -1072(data)] nrf: +116 +0.067% pca10040 rp2: -664 -0.135% PICO samd: +844 +0.607% ADAFRUIT_ITSYBITSY_M4_EXPRESS As part of this change the .mpy file format version is bumped to version 6. And mpy-tool.py has been improved to provide a good visualisation of the contents of .mpy files. In summary: this commit changes the bytecode to use qstr indirection, and reworks the .mpy file format to be simpler and allow .mpy files to be executed in-place. Performance is not impacted too much. Eventually it will be possible to store such .mpy files in a linear, read-only, memory- mappable filesystem so they can be executed from flash/ROM. This will essentially be able to replace frozen code for most applications. Signed-off-by: Damien George &lt;damien@micropython.org&gt;
3 years ago
mp_obj_tuple_t *def_pos_args = NULL;
mp_obj_t def_kw_args = MP_OBJ_NULL;
if (def_args != NULL && def_args[0] != MP_OBJ_NULL) {
assert(mp_obj_is_type(def_args[0], &mp_type_tuple));
def_pos_args = MP_OBJ_TO_PTR(def_args[0]);
n_def_args = def_pos_args->len;
n_extra_args = def_pos_args->len;
}
py: Rework bytecode and .mpy file format to be mostly static data. Background: .mpy files are precompiled .py files, built using mpy-cross, that contain compiled bytecode functions (and can also contain machine code). The benefit of using an .mpy file over a .py file is that they are faster to import and take less memory when importing. They are also smaller on disk. But the real benefit of .mpy files comes when they are frozen into the firmware. This is done by loading the .mpy file during compilation of the firmware and turning it into a set of big C data structures (the job of mpy-tool.py), which are then compiled and downloaded into the ROM of a device. These C data structures can be executed in-place, ie directly from ROM. This makes importing even faster because there is very little to do, and also means such frozen modules take up much less RAM (because their bytecode stays in ROM). The downside of frozen code is that it requires recompiling and reflashing the entire firmware. This can be a big barrier to entry, slows down development time, and makes it harder to do OTA updates of frozen code (because the whole firmware must be updated). This commit attempts to solve this problem by providing a solution that sits between loading .mpy files into RAM and freezing them into the firmware. The .mpy file format has been reworked so that it consists of data and bytecode which is mostly static and ready to run in-place. If these new .mpy files are located in flash/ROM which is memory addressable, the .mpy file can be executed (mostly) in-place. With this approach there is still a small amount of unpacking and linking of the .mpy file that needs to be done when it&#39;s imported, but it&#39;s still much better than loading an .mpy from disk into RAM (although not as good as freezing .mpy files into the firmware). The main trick to make static .mpy files is to adjust the bytecode so any qstrs that it references now go through a lookup table to convert from local qstr number in the module to global qstr number in the firmware. That means the bytecode does not need linking/rewriting of qstrs when it&#39;s loaded. Instead only a small qstr table needs to be built (and put in RAM) at import time. This means the bytecode itself is static/constant and can be used directly if it&#39;s in addressable memory. Also the qstr string data in the .mpy file, and some constant object data, can be used directly. Note that the qstr table is global to the module (ie not per function). In more detail, in the VM what used to be (schematically): qst = DECODE_QSTR_VALUE; is now (schematically): idx = DECODE_QSTR_INDEX; qst = qstr_table[idx]; That allows the bytecode to be fixed at compile time and not need relinking/rewriting of the qstr values. Only qstr_table needs to be linked when the .mpy is loaded. Incidentally, this helps to reduce the size of bytecode because what used to be 2-byte qstr values in the bytecode are now (mostly) 1-byte indices. If the module uses the same qstr more than two times then the bytecode is smaller than before. The following changes are measured for this commit compared to the previous (the baseline): - average 7%-9% reduction in size of .mpy files - frozen code size is reduced by about 5%-7% - importing .py files uses about 5% less RAM in total - importing .mpy files uses about 4% less RAM in total - importing .py and .mpy files takes about the same time as before The qstr indirection in the bytecode has only a small impact on VM performance. For stm32 on PYBv1.0 the performance change of this commit is: diff of scores (higher is better) N=100 M=100 baseline -&gt; this-commit diff diff% (error%) bm_chaos.py 371.07 -&gt; 357.39 : -13.68 = -3.687% (+/-0.02%) bm_fannkuch.py 78.72 -&gt; 77.49 : -1.23 = -1.563% (+/-0.01%) bm_fft.py 2591.73 -&gt; 2539.28 : -52.45 = -2.024% (+/-0.00%) bm_float.py 6034.93 -&gt; 5908.30 : -126.63 = -2.098% (+/-0.01%) bm_hexiom.py 48.96 -&gt; 47.93 : -1.03 = -2.104% (+/-0.00%) bm_nqueens.py 4510.63 -&gt; 4459.94 : -50.69 = -1.124% (+/-0.00%) bm_pidigits.py 650.28 -&gt; 644.96 : -5.32 = -0.818% (+/-0.23%) core_import_mpy_multi.py 564.77 -&gt; 581.49 : +16.72 = +2.960% (+/-0.01%) core_import_mpy_single.py 68.67 -&gt; 67.16 : -1.51 = -2.199% (+/-0.01%) core_qstr.py 64.16 -&gt; 64.12 : -0.04 = -0.062% (+/-0.00%) core_yield_from.py 362.58 -&gt; 354.50 : -8.08 = -2.228% (+/-0.00%) misc_aes.py 429.69 -&gt; 405.59 : -24.10 = -5.609% (+/-0.01%) misc_mandel.py 3485.13 -&gt; 3416.51 : -68.62 = -1.969% (+/-0.00%) misc_pystone.py 2496.53 -&gt; 2405.56 : -90.97 = -3.644% (+/-0.01%) misc_raytrace.py 381.47 -&gt; 374.01 : -7.46 = -1.956% (+/-0.01%) viper_call0.py 576.73 -&gt; 572.49 : -4.24 = -0.735% (+/-0.04%) viper_call1a.py 550.37 -&gt; 546.21 : -4.16 = -0.756% (+/-0.09%) viper_call1b.py 438.23 -&gt; 435.68 : -2.55 = -0.582% (+/-0.06%) viper_call1c.py 442.84 -&gt; 440.04 : -2.80 = -0.632% (+/-0.08%) viper_call2a.py 536.31 -&gt; 532.35 : -3.96 = -0.738% (+/-0.06%) viper_call2b.py 382.34 -&gt; 377.07 : -5.27 = -1.378% (+/-0.03%) And for unix on x64: diff of scores (higher is better) N=2000 M=2000 baseline -&gt; this-commit diff diff% (error%) bm_chaos.py 13594.20 -&gt; 13073.84 : -520.36 = -3.828% (+/-5.44%) bm_fannkuch.py 60.63 -&gt; 59.58 : -1.05 = -1.732% (+/-3.01%) bm_fft.py 112009.15 -&gt; 111603.32 : -405.83 = -0.362% (+/-4.03%) bm_float.py 246202.55 -&gt; 247923.81 : +1721.26 = +0.699% (+/-2.79%) bm_hexiom.py 615.65 -&gt; 617.21 : +1.56 = +0.253% (+/-1.64%) bm_nqueens.py 215807.95 -&gt; 215600.96 : -206.99 = -0.096% (+/-3.52%) bm_pidigits.py 8246.74 -&gt; 8422.82 : +176.08 = +2.135% (+/-3.64%) misc_aes.py 16133.00 -&gt; 16452.74 : +319.74 = +1.982% (+/-1.50%) misc_mandel.py 128146.69 -&gt; 130796.43 : +2649.74 = +2.068% (+/-3.18%) misc_pystone.py 83811.49 -&gt; 83124.85 : -686.64 = -0.819% (+/-1.03%) misc_raytrace.py 21688.02 -&gt; 21385.10 : -302.92 = -1.397% (+/-3.20%) The code size change is (firmware with a lot of frozen code benefits the most): bare-arm: +396 +0.697% minimal x86: +1595 +0.979% [incl +32(data)] unix x64: +2408 +0.470% [incl +800(data)] unix nanbox: +1396 +0.309% [incl -96(data)] stm32: -1256 -0.318% PYBV10 cc3200: +288 +0.157% esp8266: -260 -0.037% GENERIC esp32: -216 -0.014% GENERIC[incl -1072(data)] nrf: +116 +0.067% pca10040 rp2: -664 -0.135% PICO samd: +844 +0.607% ADAFRUIT_ITSYBITSY_M4_EXPRESS As part of this change the .mpy file format version is bumped to version 6. And mpy-tool.py has been improved to provide a good visualisation of the contents of .mpy files. In summary: this commit changes the bytecode to use qstr indirection, and reworks the .mpy file format to be simpler and allow .mpy files to be executed in-place. Performance is not impacted too much. Eventually it will be possible to store such .mpy files in a linear, read-only, memory- mappable filesystem so they can be executed from flash/ROM. This will essentially be able to replace frozen code for most applications. Signed-off-by: Damien George &lt;damien@micropython.org&gt;
3 years ago
if (def_args != NULL && def_args[1] != MP_OBJ_NULL) {
assert(mp_obj_is_type(def_args[1], &mp_type_dict));
def_kw_args = def_args[1];
n_extra_args += 1;
}
mp_obj_fun_bc_t *o = mp_obj_malloc_var(mp_obj_fun_bc_t, extra_args, mp_obj_t, n_extra_args, &mp_type_fun_bc);
o->bytecode = code;
py: Rework bytecode and .mpy file format to be mostly static data. Background: .mpy files are precompiled .py files, built using mpy-cross, that contain compiled bytecode functions (and can also contain machine code). The benefit of using an .mpy file over a .py file is that they are faster to import and take less memory when importing. They are also smaller on disk. But the real benefit of .mpy files comes when they are frozen into the firmware. This is done by loading the .mpy file during compilation of the firmware and turning it into a set of big C data structures (the job of mpy-tool.py), which are then compiled and downloaded into the ROM of a device. These C data structures can be executed in-place, ie directly from ROM. This makes importing even faster because there is very little to do, and also means such frozen modules take up much less RAM (because their bytecode stays in ROM). The downside of frozen code is that it requires recompiling and reflashing the entire firmware. This can be a big barrier to entry, slows down development time, and makes it harder to do OTA updates of frozen code (because the whole firmware must be updated). This commit attempts to solve this problem by providing a solution that sits between loading .mpy files into RAM and freezing them into the firmware. The .mpy file format has been reworked so that it consists of data and bytecode which is mostly static and ready to run in-place. If these new .mpy files are located in flash/ROM which is memory addressable, the .mpy file can be executed (mostly) in-place. With this approach there is still a small amount of unpacking and linking of the .mpy file that needs to be done when it&#39;s imported, but it&#39;s still much better than loading an .mpy from disk into RAM (although not as good as freezing .mpy files into the firmware). The main trick to make static .mpy files is to adjust the bytecode so any qstrs that it references now go through a lookup table to convert from local qstr number in the module to global qstr number in the firmware. That means the bytecode does not need linking/rewriting of qstrs when it&#39;s loaded. Instead only a small qstr table needs to be built (and put in RAM) at import time. This means the bytecode itself is static/constant and can be used directly if it&#39;s in addressable memory. Also the qstr string data in the .mpy file, and some constant object data, can be used directly. Note that the qstr table is global to the module (ie not per function). In more detail, in the VM what used to be (schematically): qst = DECODE_QSTR_VALUE; is now (schematically): idx = DECODE_QSTR_INDEX; qst = qstr_table[idx]; That allows the bytecode to be fixed at compile time and not need relinking/rewriting of the qstr values. Only qstr_table needs to be linked when the .mpy is loaded. Incidentally, this helps to reduce the size of bytecode because what used to be 2-byte qstr values in the bytecode are now (mostly) 1-byte indices. If the module uses the same qstr more than two times then the bytecode is smaller than before. The following changes are measured for this commit compared to the previous (the baseline): - average 7%-9% reduction in size of .mpy files - frozen code size is reduced by about 5%-7% - importing .py files uses about 5% less RAM in total - importing .mpy files uses about 4% less RAM in total - importing .py and .mpy files takes about the same time as before The qstr indirection in the bytecode has only a small impact on VM performance. For stm32 on PYBv1.0 the performance change of this commit is: diff of scores (higher is better) N=100 M=100 baseline -&gt; this-commit diff diff% (error%) bm_chaos.py 371.07 -&gt; 357.39 : -13.68 = -3.687% (+/-0.02%) bm_fannkuch.py 78.72 -&gt; 77.49 : -1.23 = -1.563% (+/-0.01%) bm_fft.py 2591.73 -&gt; 2539.28 : -52.45 = -2.024% (+/-0.00%) bm_float.py 6034.93 -&gt; 5908.30 : -126.63 = -2.098% (+/-0.01%) bm_hexiom.py 48.96 -&gt; 47.93 : -1.03 = -2.104% (+/-0.00%) bm_nqueens.py 4510.63 -&gt; 4459.94 : -50.69 = -1.124% (+/-0.00%) bm_pidigits.py 650.28 -&gt; 644.96 : -5.32 = -0.818% (+/-0.23%) core_import_mpy_multi.py 564.77 -&gt; 581.49 : +16.72 = +2.960% (+/-0.01%) core_import_mpy_single.py 68.67 -&gt; 67.16 : -1.51 = -2.199% (+/-0.01%) core_qstr.py 64.16 -&gt; 64.12 : -0.04 = -0.062% (+/-0.00%) core_yield_from.py 362.58 -&gt; 354.50 : -8.08 = -2.228% (+/-0.00%) misc_aes.py 429.69 -&gt; 405.59 : -24.10 = -5.609% (+/-0.01%) misc_mandel.py 3485.13 -&gt; 3416.51 : -68.62 = -1.969% (+/-0.00%) misc_pystone.py 2496.53 -&gt; 2405.56 : -90.97 = -3.644% (+/-0.01%) misc_raytrace.py 381.47 -&gt; 374.01 : -7.46 = -1.956% (+/-0.01%) viper_call0.py 576.73 -&gt; 572.49 : -4.24 = -0.735% (+/-0.04%) viper_call1a.py 550.37 -&gt; 546.21 : -4.16 = -0.756% (+/-0.09%) viper_call1b.py 438.23 -&gt; 435.68 : -2.55 = -0.582% (+/-0.06%) viper_call1c.py 442.84 -&gt; 440.04 : -2.80 = -0.632% (+/-0.08%) viper_call2a.py 536.31 -&gt; 532.35 : -3.96 = -0.738% (+/-0.06%) viper_call2b.py 382.34 -&gt; 377.07 : -5.27 = -1.378% (+/-0.03%) And for unix on x64: diff of scores (higher is better) N=2000 M=2000 baseline -&gt; this-commit diff diff% (error%) bm_chaos.py 13594.20 -&gt; 13073.84 : -520.36 = -3.828% (+/-5.44%) bm_fannkuch.py 60.63 -&gt; 59.58 : -1.05 = -1.732% (+/-3.01%) bm_fft.py 112009.15 -&gt; 111603.32 : -405.83 = -0.362% (+/-4.03%) bm_float.py 246202.55 -&gt; 247923.81 : +1721.26 = +0.699% (+/-2.79%) bm_hexiom.py 615.65 -&gt; 617.21 : +1.56 = +0.253% (+/-1.64%) bm_nqueens.py 215807.95 -&gt; 215600.96 : -206.99 = -0.096% (+/-3.52%) bm_pidigits.py 8246.74 -&gt; 8422.82 : +176.08 = +2.135% (+/-3.64%) misc_aes.py 16133.00 -&gt; 16452.74 : +319.74 = +1.982% (+/-1.50%) misc_mandel.py 128146.69 -&gt; 130796.43 : +2649.74 = +2.068% (+/-3.18%) misc_pystone.py 83811.49 -&gt; 83124.85 : -686.64 = -0.819% (+/-1.03%) misc_raytrace.py 21688.02 -&gt; 21385.10 : -302.92 = -1.397% (+/-3.20%) The code size change is (firmware with a lot of frozen code benefits the most): bare-arm: +396 +0.697% minimal x86: +1595 +0.979% [incl +32(data)] unix x64: +2408 +0.470% [incl +800(data)] unix nanbox: +1396 +0.309% [incl -96(data)] stm32: -1256 -0.318% PYBV10 cc3200: +288 +0.157% esp8266: -260 -0.037% GENERIC esp32: -216 -0.014% GENERIC[incl -1072(data)] nrf: +116 +0.067% pca10040 rp2: -664 -0.135% PICO samd: +844 +0.607% ADAFRUIT_ITSYBITSY_M4_EXPRESS As part of this change the .mpy file format version is bumped to version 6. And mpy-tool.py has been improved to provide a good visualisation of the contents of .mpy files. In summary: this commit changes the bytecode to use qstr indirection, and reworks the .mpy file format to be simpler and allow .mpy files to be executed in-place. Performance is not impacted too much. Eventually it will be possible to store such .mpy files in a linear, read-only, memory- mappable filesystem so they can be executed from flash/ROM. This will essentially be able to replace frozen code for most applications. Signed-off-by: Damien George &lt;damien@micropython.org&gt;
3 years ago
o->context = context;
o->child_table = child_table;
if (def_pos_args != NULL) {
memcpy(o->extra_args, def_pos_args->items, n_def_args * sizeof(mp_obj_t));
}
if (def_kw_args != MP_OBJ_NULL) {
o->extra_args[n_def_args] = def_kw_args;
}
return MP_OBJ_FROM_PTR(o);
}
/******************************************************************************/
/* native functions */
#if MICROPY_EMIT_NATIVE
all: Remove the &#34;STATIC&#34; macro and just use &#34;static&#34; instead. The STATIC macro was introduced a very long time ago in commit d5df6cd44a433d6253a61cb0f987835fbc06b2de. The original reason for this was to have the option to define it to nothing so that all static functions become global functions and therefore visible to certain debug tools, so one could do function size comparison and other things. This STATIC feature is rarely (if ever) used. And with the use of LTO and heavy inline optimisation, analysing the size of individual functions when they are not static is not a good representation of the size of code when fully optimised. So the macro does not have much use and it&#39;s simpler to just remove it. Then you know exactly what it&#39;s doing. For example, newcomers don&#39;t have to learn what the STATIC macro is and why it exists. Reading the code is also less &#34;loud&#34; with a lowercase static. One other minor point in favour of removing it, is that it stops bugs with `STATIC inline`, which should always be `static inline`. Methodology for this commit was: 1) git ls-files | egrep &#39;\.[ch]$&#39; | \ xargs sed -Ei &#34;s/(^| )STATIC($| )/\1static\2/&#34; 2) Do some manual cleanup in the diff by searching for the word STATIC in comments and changing those back. 3) &#34;git-grep STATIC docs/&#34;, manually fixed those cases. 4) &#34;rg -t python STATIC&#34;, manually fixed codegen lines that used STATIC. This work was funded through GitHub Sponsors. Signed-off-by: Angus Gratton &lt;angus@redyak.com.au&gt;
8 months ago
static mp_obj_t fun_native_call(mp_obj_t self_in, size_t n_args, size_t n_kw, const mp_obj_t *args) {
mp_cstack_check();
mp_obj_fun_bc_t *self = MP_OBJ_TO_PTR(self_in);
mp_call_fun_t fun = mp_obj_fun_native_get_function_start(self);
return fun(self_in, n_args, n_kw, args);
}
#if MICROPY_CPYTHON_COMPAT
#define FUN_BC_TYPE_PRINT print, fun_bc_print,
#else
#define FUN_BC_TYPE_PRINT
#endif
#if MICROPY_PY_FUNCTION_ATTRS
#define FUN_BC_TYPE_ATTR attr, mp_obj_fun_bc_attr,
#else
#define FUN_BC_TYPE_ATTR
#endif
MP_DEFINE_CONST_OBJ_TYPE(
mp_type_fun_native,
MP_QSTR_function,
MP_TYPE_FLAG_BINDS_SELF,
FUN_BC_TYPE_PRINT
FUN_BC_TYPE_ATTR
call, fun_native_call
);
#endif // MICROPY_EMIT_NATIVE
/******************************************************************************/
/* viper functions */
#if MICROPY_EMIT_NATIVE
all: Remove the &#34;STATIC&#34; macro and just use &#34;static&#34; instead. The STATIC macro was introduced a very long time ago in commit d5df6cd44a433d6253a61cb0f987835fbc06b2de. The original reason for this was to have the option to define it to nothing so that all static functions become global functions and therefore visible to certain debug tools, so one could do function size comparison and other things. This STATIC feature is rarely (if ever) used. And with the use of LTO and heavy inline optimisation, analysing the size of individual functions when they are not static is not a good representation of the size of code when fully optimised. So the macro does not have much use and it&#39;s simpler to just remove it. Then you know exactly what it&#39;s doing. For example, newcomers don&#39;t have to learn what the STATIC macro is and why it exists. Reading the code is also less &#34;loud&#34; with a lowercase static. One other minor point in favour of removing it, is that it stops bugs with `STATIC inline`, which should always be `static inline`. Methodology for this commit was: 1) git ls-files | egrep &#39;\.[ch]$&#39; | \ xargs sed -Ei &#34;s/(^| )STATIC($| )/\1static\2/&#34; 2) Do some manual cleanup in the diff by searching for the word STATIC in comments and changing those back. 3) &#34;git-grep STATIC docs/&#34;, manually fixed those cases. 4) &#34;rg -t python STATIC&#34;, manually fixed codegen lines that used STATIC. This work was funded through GitHub Sponsors. Signed-off-by: Angus Gratton &lt;angus@redyak.com.au&gt;
8 months ago
static mp_obj_t fun_viper_call(mp_obj_t self_in, size_t n_args, size_t n_kw, const mp_obj_t *args) {
mp_cstack_check();
mp_obj_fun_bc_t *self = MP_OBJ_TO_PTR(self_in);
mp_call_fun_t fun = MICROPY_MAKE_POINTER_CALLABLE((void *)self->bytecode);
return fun(self_in, n_args, n_kw, args);
}
MP_DEFINE_CONST_OBJ_TYPE(
mp_type_fun_viper,
MP_QSTR_function,
MP_TYPE_FLAG_BINDS_SELF,
call, fun_viper_call
);
#endif // MICROPY_EMIT_NATIVE
/******************************************************************************/
/* inline assembler functions */
#if MICROPY_EMIT_INLINE_ASM
typedef mp_uint_t (*inline_asm_fun_0_t)(void);
typedef mp_uint_t (*inline_asm_fun_1_t)(mp_uint_t);
typedef mp_uint_t (*inline_asm_fun_2_t)(mp_uint_t, mp_uint_t);
typedef mp_uint_t (*inline_asm_fun_3_t)(mp_uint_t, mp_uint_t, mp_uint_t);
typedef mp_uint_t (*inline_asm_fun_4_t)(mp_uint_t, mp_uint_t, mp_uint_t, mp_uint_t);
// convert a MicroPython object to a sensible value for inline asm
all: Remove the &#34;STATIC&#34; macro and just use &#34;static&#34; instead. The STATIC macro was introduced a very long time ago in commit d5df6cd44a433d6253a61cb0f987835fbc06b2de. The original reason for this was to have the option to define it to nothing so that all static functions become global functions and therefore visible to certain debug tools, so one could do function size comparison and other things. This STATIC feature is rarely (if ever) used. And with the use of LTO and heavy inline optimisation, analysing the size of individual functions when they are not static is not a good representation of the size of code when fully optimised. So the macro does not have much use and it&#39;s simpler to just remove it. Then you know exactly what it&#39;s doing. For example, newcomers don&#39;t have to learn what the STATIC macro is and why it exists. Reading the code is also less &#34;loud&#34; with a lowercase static. One other minor point in favour of removing it, is that it stops bugs with `STATIC inline`, which should always be `static inline`. Methodology for this commit was: 1) git ls-files | egrep &#39;\.[ch]$&#39; | \ xargs sed -Ei &#34;s/(^| )STATIC($| )/\1static\2/&#34; 2) Do some manual cleanup in the diff by searching for the word STATIC in comments and changing those back. 3) &#34;git-grep STATIC docs/&#34;, manually fixed those cases. 4) &#34;rg -t python STATIC&#34;, manually fixed codegen lines that used STATIC. This work was funded through GitHub Sponsors. Signed-off-by: Angus Gratton &lt;angus@redyak.com.au&gt;
8 months ago
static mp_uint_t convert_obj_for_inline_asm(mp_obj_t obj) {
// TODO for byte_array, pass pointer to the array
if (mp_obj_is_small_int(obj)) {
return MP_OBJ_SMALL_INT_VALUE(obj);
} else if (obj == mp_const_none) {
return 0;
} else if (obj == mp_const_false) {
return 0;
} else if (obj == mp_const_true) {
return 1;
py/obj: Add static safety checks to mp_obj_is_type(). Commit d96cfd13e3a464862c introduced a regression by breaking existing users of mp_obj_is_type(.., &amp;mp_obj_bool). This function (and associated helpers like mp_obj_is_int()) have some specific nuances, and mistakes like this one can happen again. This commit adds mp_obj_is_exact_type() which behaves like the the old mp_obj_is_type(). The new mp_obj_is_type() has the same prototype but it attempts to statically assert that it&#39;s not called with types which should be checked using mp_obj_is_type(). If called with any of these types: int, str, bool, NoneType - it will cause a compilation error. Additional checked types (e.g function types) can be added in the future. Existing users of mp_obj_is_type() with the now &#34;invalid&#34; types, were translated to use mp_obj_is_exact_type(). The use of MP_STATIC_ASSERT() is not bulletproof - usually GCC (and other compilers) can&#39;t statically check conditions that are only known during link-time (like variables&#39; addresses comparison). However, in this case, GCC is able to statically detect these conditions, probably because it&#39;s the exact same object - `&amp;mp_type_int == &amp;mp_type_int` is detected. Misuses of this function with runtime-chosen types (e.g: `mp_obj_type_t *x = ...; mp_obj_is_type(..., x);` won&#39;t be detected. MSC is unable to detect this, so we use MP_STATIC_ASSERT_NOT_MSC(). Compiling with this commit and without the fix for d96cfd13e3a464862c shows that it detects the problem. Signed-off-by: Yonatan Goldschmidt &lt;yon.goldschmidt@gmail.com&gt;
5 years ago
} else if (mp_obj_is_exact_type(obj, &mp_type_int)) {
return mp_obj_int_get_truncated(obj);
} else if (mp_obj_is_str(obj)) {
// pointer to the string (it's probably constant though!)
size_t l;
return (mp_uint_t)mp_obj_str_get_data(obj, &l);
} else {
const mp_obj_type_t *type = mp_obj_get_type(obj);
#if MICROPY_PY_BUILTINS_FLOAT
if (type == &mp_type_float) {
// convert float to int (could also pass in float registers)
return (mp_int_t)mp_obj_float_get(obj);
}
#endif
if (type == &mp_type_tuple || type == &mp_type_list) {
// pointer to start of tuple (could pass length, but then could use len(x) for that)
size_t len;
mp_obj_t *items;
mp_obj_get_array(obj, &len, &items);
return (mp_uint_t)items;
} else {
mp_buffer_info_t bufinfo;
if (mp_get_buffer(obj, &bufinfo, MP_BUFFER_READ)) {
// supports the buffer protocol, return a pointer to the data
return (mp_uint_t)bufinfo.buf;
} else {
// just pass along a pointer to the object
return (mp_uint_t)obj;
}
}
}
}
all: Remove the &#34;STATIC&#34; macro and just use &#34;static&#34; instead. The STATIC macro was introduced a very long time ago in commit d5df6cd44a433d6253a61cb0f987835fbc06b2de. The original reason for this was to have the option to define it to nothing so that all static functions become global functions and therefore visible to certain debug tools, so one could do function size comparison and other things. This STATIC feature is rarely (if ever) used. And with the use of LTO and heavy inline optimisation, analysing the size of individual functions when they are not static is not a good representation of the size of code when fully optimised. So the macro does not have much use and it&#39;s simpler to just remove it. Then you know exactly what it&#39;s doing. For example, newcomers don&#39;t have to learn what the STATIC macro is and why it exists. Reading the code is also less &#34;loud&#34; with a lowercase static. One other minor point in favour of removing it, is that it stops bugs with `STATIC inline`, which should always be `static inline`. Methodology for this commit was: 1) git ls-files | egrep &#39;\.[ch]$&#39; | \ xargs sed -Ei &#34;s/(^| )STATIC($| )/\1static\2/&#34; 2) Do some manual cleanup in the diff by searching for the word STATIC in comments and changing those back. 3) &#34;git-grep STATIC docs/&#34;, manually fixed those cases. 4) &#34;rg -t python STATIC&#34;, manually fixed codegen lines that used STATIC. This work was funded through GitHub Sponsors. Signed-off-by: Angus Gratton &lt;angus@redyak.com.au&gt;
8 months ago
static mp_obj_t fun_asm_call(mp_obj_t self_in, size_t n_args, size_t n_kw, const mp_obj_t *args) {
mp_obj_fun_asm_t *self = MP_OBJ_TO_PTR(self_in);
mp_arg_check_num(n_args, n_kw, self->n_args, self->n_args, false);
const void *fun = MICROPY_MAKE_POINTER_CALLABLE(self->fun_data);
mp_uint_t ret;
if (n_args == 0) {
ret = ((inline_asm_fun_0_t)fun)();
} else if (n_args == 1) {
ret = ((inline_asm_fun_1_t)fun)(convert_obj_for_inline_asm(args[0]));
} else if (n_args == 2) {
ret = ((inline_asm_fun_2_t)fun)(convert_obj_for_inline_asm(args[0]), convert_obj_for_inline_asm(args[1]));
} else if (n_args == 3) {
ret = ((inline_asm_fun_3_t)fun)(convert_obj_for_inline_asm(args[0]), convert_obj_for_inline_asm(args[1]), convert_obj_for_inline_asm(args[2]));
} else {
// compiler allows at most 4 arguments
assert(n_args == 4);
ret = ((inline_asm_fun_4_t)fun)(
convert_obj_for_inline_asm(args[0]),
convert_obj_for_inline_asm(args[1]),
convert_obj_for_inline_asm(args[2]),
convert_obj_for_inline_asm(args[3])
);
}
return mp_native_to_obj(ret, self->type_sig);
}
MP_DEFINE_CONST_OBJ_TYPE(
mp_type_fun_asm,
MP_QSTR_function,
MP_TYPE_FLAG_BINDS_SELF,
call, fun_asm_call
);
#endif // MICROPY_EMIT_INLINE_ASM