Browse Source

Implement more thumb branch instructions.

pull/3/head
Damien 11 years ago
parent
commit
1a6633a74d
  1. 40
      py/asmthumb.c
  2. 19
      py/asmthumb.h
  3. 2
      py/emitinlinethumb.c
  4. 20
      py/emitnative.c

40
py/asmthumb.c

@ -301,29 +301,16 @@ void asm_thumb_b_n(asm_thumb_t *as, int label) {
}
}
#define OP_BEQ_N(byte_offset) (0xd000 | (((byte_offset) >> 1) & 0x00ff))
#define OP_BNE_N(byte_offset) (0xd100 | (((byte_offset) >> 1) & 0x00ff))
#define OP_BCS_N(byte_offset) (0xd200 | (((byte_offset) >> 1) & 0x00ff))
#define OP_BCC_N(byte_offset) (0xd300 | (((byte_offset) >> 1) & 0x00ff))
#define OP_BMI_N(byte_offset) (0xd400 | (((byte_offset) >> 1) & 0x00ff))
#define OP_BPL_N(byte_offset) (0xd500 | (((byte_offset) >> 1) & 0x00ff))
#define OP_BVS_N(byte_offset) (0xd600 | (((byte_offset) >> 1) & 0x00ff))
#define OP_BVC_N(byte_offset) (0xd700 | (((byte_offset) >> 1) & 0x00ff))
#define OP_BHI_N(byte_offset) (0xd800 | (((byte_offset) >> 1) & 0x00ff))
#define OP_BLS_N(byte_offset) (0xd900 | (((byte_offset) >> 1) & 0x00ff))
#define OP_BGE_N(byte_offset) (0xda00 | (((byte_offset) >> 1) & 0x00ff))
#define OP_BLT_N(byte_offset) (0xdb00 | (((byte_offset) >> 1) & 0x00ff))
#define OP_BGT_N(byte_offset) (0xdc00 | (((byte_offset) >> 1) & 0x00ff))
#define OP_BLE_N(byte_offset) (0xdd00 | (((byte_offset) >> 1) & 0x00ff))
void asm_thumb_bgt_n(asm_thumb_t *as, int label) {
#define OP_BCC_N(cond, byte_offset) (0xd000 | ((cond) << 8) | (((byte_offset) >> 1) & 0x00ff))
void asm_thumb_bcc_n(asm_thumb_t *as, int cond, int label) {
int dest = get_label_dest(as, label);
int rel = dest - as->code_offset;
rel -= 4; // account for instruction prefetch, PC is 4 bytes ahead of this instruction
if (SIGNED_FIT9(rel)) {
asm_thumb_write_op16(as, OP_BGT_N(rel));
asm_thumb_write_op16(as, OP_BCC_N(cond, rel));
} else {
printf("asm_thumb_bgt: branch does not fit in 9 bits\n");
printf("asm_thumb_bcc_n: branch does not fit in 9 bits\n");
}
}
@ -408,17 +395,10 @@ void asm_thumb_b_label(asm_thumb_t *as, int label) {
}
// all these bit arithmetics need coverage testing!
#define OP_BEQ(byte_offset) (0xd000 | (((byte_offset) >> 1) & 0x00ff))
#define OP_BEQW_HI(byte_offset) (0xf000 | (((byte_offset) >> 10) & 0x0400) | (((byte_offset) >> 14) & 0x003f))
#define OP_BEQW_LO(byte_offset) (0x8000 | ((byte_offset) & 0x2000) | (((byte_offset) >> 1) & 0x0fff))
void asm_thumb_cmp_reg_bz_label(asm_thumb_t *as, uint rlo, int label) {
assert(rlo < REG_R8);
// compare reg with 0
asm_thumb_write_op16(as, OP_CMP_RLO_I8(rlo, 0));
#define OP_BCC_W_HI(cond, byte_offset) (0xf000 | ((cond) << 6) | (((byte_offset) >> 10) & 0x0400) | (((byte_offset) >> 14) & 0x003f))
#define OP_BCC_W_LO(byte_offset) (0x8000 | ((byte_offset) & 0x2000) | (((byte_offset) >> 1) & 0x0fff))
// branch if equal
void asm_thumb_bcc_label(asm_thumb_t *as, int cond, int label) {
int dest = get_label_dest(as, label);
int rel = dest - as->code_offset;
rel -= 4; // account for instruction prefetch, PC is 4 bytes ahead of this instruction
@ -426,14 +406,14 @@ void asm_thumb_cmp_reg_bz_label(asm_thumb_t *as, uint rlo, int label) {
// is a backwards jump, so we know the size of the jump on the first pass
// calculate rel assuming 9 bit relative jump
if (SIGNED_FIT9(rel)) {
asm_thumb_write_op16(as, OP_BEQ(rel));
asm_thumb_write_op16(as, OP_BCC_N(cond, rel));
} else {
goto large_jump;
}
} else {
// is a forwards jump, so need to assume it's large
large_jump:
asm_thumb_write_op32(as, OP_BEQW_HI(rel), OP_BEQW_LO(rel));
asm_thumb_write_op32(as, OP_BCC_W_HI(cond, rel), OP_BCC_W_LO(rel));
}
}

19
py/asmthumb.h

@ -26,6 +26,21 @@
#define REG_ARG_3 REG_R2
#define REG_ARG_4 REG_R3
#define THUMB_CC_EQ (0x0)
#define THUMB_CC_NE (0x1)
#define THUMB_CC_CS (0x2)
#define THUMB_CC_CC (0x3)
#define THUMB_CC_MI (0x4)
#define THUMB_CC_PL (0x5)
#define THUMB_CC_VS (0x6)
#define THUMB_CC_VC (0x7)
#define THUMB_CC_HI (0x8)
#define THUMB_CC_LS (0x9)
#define THUMB_CC_GE (0xa)
#define THUMB_CC_LT (0xb)
#define THUMB_CC_GT (0xc)
#define THUMB_CC_LE (0xd)
typedef struct _asm_thumb_t asm_thumb_t;
asm_thumb_t *asm_thumb_new(uint max_num_labels);
@ -50,7 +65,7 @@ void asm_thumb_mov_reg_reg(asm_thumb_t *as, uint reg_dest, uint reg_src);
void asm_thumb_subs_rlo_rlo_i3(asm_thumb_t *as, uint rlo_dest, uint rlo_src, int i3_src);
void asm_thumb_cmp_rlo_i8(asm_thumb_t *as, uint rlo, int i8);
void asm_thumb_b_n(asm_thumb_t *as, int label);
void asm_thumb_bgt_n(asm_thumb_t *as, int label);
void asm_thumb_bcc_n(asm_thumb_t *as, int cond, int label);
void asm_thumb_mov_reg_i32(asm_thumb_t *as, uint reg_dest, machine_uint_t i32_src); // convenience
void asm_thumb_mov_reg_i32_optimised(asm_thumb_t *as, uint reg_dest, int i32_src); // convenience
@ -63,6 +78,6 @@ void asm_thumb_cmp_reg_reg(asm_thumb_t *as, uint rlo_a, uint rlo_b); // convenie
void asm_thumb_ite_ge(asm_thumb_t *as); // convenience ?
void asm_thumb_b_label(asm_thumb_t *as, int label); // convenience ?
void asm_thumb_cmp_reg_bz_label(asm_thumb_t *as, uint rlo, int label); // convenience ?
void asm_thumb_bcc_label(asm_thumb_t *as, int cc, int label); // convenience: picks narrow or wide branch
void asm_thumb_bl_ind(asm_thumb_t *as, void *fun_ptr, uint fun_id, uint reg_temp); // convenience ?

2
py/emitinlinethumb.c

@ -154,7 +154,7 @@ static void emit_inline_thumb_op(emit_inline_asm_t *emit, qstr op, int n_args, p
}
int label_num = get_arg_label(emit, op, pn_args, 0);
// TODO check that this succeeded, ie branch was within range
asm_thumb_bgt_n(emit->as, label_num);
asm_thumb_bcc_n(emit->as, THUMB_CC_GT, label_num);
// 2 args
} else if (strcmp(qstr_str(op), "movs") == 0) {

20
py/emitnative.c

@ -871,7 +871,7 @@ static void emit_native_jump(emit_t *emit, int label) {
emit_post(emit);
}
static void emit_native_pop_jump_if_false(emit_t *emit, int label) {
static void emit_native_pop_jump_pre_helper(emit_t *emit, int label) {
vtype_kind_t vtype = peek_vtype(emit);
if (vtype == VTYPE_BOOL) {
emit_pre_pop_reg(emit, &vtype, REG_RET);
@ -882,18 +882,32 @@ static void emit_native_pop_jump_if_false(emit_t *emit, int label) {
printf("ViperTypeError: expecting a bool or pyobj, got %d\n", vtype);
assert(0);
}
}
static void emit_native_pop_jump_if_false(emit_t *emit, int label) {
emit_native_pop_jump_pre_helper(emit, label);
#if N_X64
asm_x64_test_r8_with_r8(emit->as, REG_RET, REG_RET);
asm_x64_jcc_label(emit->as, JCC_JZ, label);
#elif N_THUMB
asm_thumb_cmp_reg_bz_label(emit->as, REG_RET, label);
asm_thumb_cmp_rlo_i8(emit->as, REG_RET, 0);
asm_thumb_bcc_label(emit->as, THUMB_CC_EQ, label);
#endif
emit_post(emit);
}
static void emit_native_pop_jump_if_true(emit_t *emit, int label) {
assert(0);
emit_native_pop_jump_pre_helper(emit, label);
#if N_X64
asm_x64_test_r8_with_r8(emit->as, REG_RET, REG_RET);
asm_x64_jcc_label(emit->as, JCC_JNZ, label);
#elif N_THUMB
asm_thumb_cmp_rlo_i8(emit->as, REG_RET, 0);
asm_thumb_bcc_label(emit->as, THUMB_CC_NE, label);
#endif
emit_post(emit);
}
static void emit_native_jump_if_true_or_pop(emit_t *emit, int label) {
assert(0);
}

Loading…
Cancel
Save