@ -42,31 +42,31 @@ pub(crate) fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
match op {
Opcode ::Iconst | Opcode ::Bconst | Opcode ::Null = > {
let value = ctx . get_constant ( insn ) . unwrap ( ) ;
let rd = output_to _reg ( ctx , outputs [ 0 ] ) ;
let rd = get_ output_reg( ctx , outputs [ 0 ] ) ;
lower_constant_u64 ( ctx , rd , value ) ;
}
Opcode ::F32const = > {
let value = f32 ::from_bits ( ctx . get_constant ( insn ) . unwrap ( ) as u32 ) ;
let rd = output_to _reg ( ctx , outputs [ 0 ] ) ;
let rd = get_ output_reg( ctx , outputs [ 0 ] ) ;
lower_constant_f32 ( ctx , rd , value ) ;
}
Opcode ::F64const = > {
let value = f64 ::from_bits ( ctx . get_constant ( insn ) . unwrap ( ) ) ;
let rd = output_to _reg ( ctx , outputs [ 0 ] ) ;
let rd = get_ output_reg( ctx , outputs [ 0 ] ) ;
lower_constant_f64 ( ctx , rd , value ) ;
}
Opcode ::Iadd = > {
let rd = output_to _reg ( ctx , outputs [ 0 ] ) ;
let rn = input_to _reg( ctx , inputs [ 0 ] , NarrowValueMode ::None ) ;
let rm = input_to _rse_imm12( ctx , inputs [ 1 ] , NarrowValueMode ::None ) ;
let rd = get_ output_reg( ctx , outputs [ 0 ] ) ;
let rn = put_input_in _reg( ctx , inputs [ 0 ] , NarrowValueMode ::None ) ;
let rm = put_input_in _rse_imm12( ctx , inputs [ 1 ] , NarrowValueMode ::None ) ;
let ty = ty . unwrap ( ) ;
let alu_op = choose_32_64 ( ty , ALUOp ::Add32 , ALUOp ::Add64 ) ;
ctx . emit ( alu_inst_imm12 ( alu_op , rd , rn , rm ) ) ;
}
Opcode ::Isub = > {
let rd = output_to _reg ( ctx , outputs [ 0 ] ) ;
let rn = input_to _reg( ctx , inputs [ 0 ] , NarrowValueMode ::None ) ;
let rm = input_to _rse_imm12( ctx , inputs [ 1 ] , NarrowValueMode ::None ) ;
let rd = get_ output_reg( ctx , outputs [ 0 ] ) ;
let rn = put_input_in _reg( ctx , inputs [ 0 ] , NarrowValueMode ::None ) ;
let rm = put_input_in _rse_imm12( ctx , inputs [ 1 ] , NarrowValueMode ::None ) ;
let ty = ty . unwrap ( ) ;
let alu_op = choose_32_64 ( ty , ALUOp ::Sub32 , ALUOp ::Sub64 ) ;
ctx . emit ( alu_inst_imm12 ( alu_op , rd , rn , rm ) ) ;
@ -87,9 +87,9 @@ pub(crate) fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
} ;
let va = ctx . alloc_tmp ( RegClass ::V128 , I128 ) ;
let vb = ctx . alloc_tmp ( RegClass ::V128 , I128 ) ;
let ra = input_to _reg( ctx , inputs [ 0 ] , narrow_mode ) ;
let rb = input_to _reg( ctx , inputs [ 1 ] , narrow_mode ) ;
let rd = output_to _reg ( ctx , outputs [ 0 ] ) ;
let ra = put_input_in _reg( ctx , inputs [ 0 ] , narrow_mode ) ;
let rb = put_input_in _reg( ctx , inputs [ 1 ] , narrow_mode ) ;
let rd = get_ output_reg( ctx , outputs [ 0 ] ) ;
ctx . emit ( Inst ::MovToVec64 { rd : va , rn : ra } ) ;
ctx . emit ( Inst ::MovToVec64 { rd : vb , rn : rb } ) ;
ctx . emit ( Inst ::VecRRR {
@ -121,9 +121,9 @@ pub(crate) fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
} ;
let va = ctx . alloc_tmp ( RegClass ::V128 , I128 ) ;
let vb = ctx . alloc_tmp ( RegClass ::V128 , I128 ) ;
let ra = input_to _reg( ctx , inputs [ 0 ] , narrow_mode ) ;
let rb = input_to _reg( ctx , inputs [ 1 ] , narrow_mode ) ;
let rd = output_to _reg ( ctx , outputs [ 0 ] ) ;
let ra = put_input_in _reg( ctx , inputs [ 0 ] , narrow_mode ) ;
let rb = put_input_in _reg( ctx , inputs [ 1 ] , narrow_mode ) ;
let rd = get_ output_reg( ctx , outputs [ 0 ] ) ;
ctx . emit ( Inst ::MovToVec64 { rd : va , rn : ra } ) ;
ctx . emit ( Inst ::MovToVec64 { rd : vb , rn : rb } ) ;
ctx . emit ( Inst ::VecRRR {
@ -142,18 +142,18 @@ pub(crate) fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
}
Opcode ::Ineg = > {
let rd = output_to _reg ( ctx , outputs [ 0 ] ) ;
let rd = get_ output_reg( ctx , outputs [ 0 ] ) ;
let rn = zero_reg ( ) ;
let rm = input_to _rse_imm12( ctx , inputs [ 0 ] , NarrowValueMode ::None ) ;
let rm = put_input_in _rse_imm12( ctx , inputs [ 0 ] , NarrowValueMode ::None ) ;
let ty = ty . unwrap ( ) ;
let alu_op = choose_32_64 ( ty , ALUOp ::Sub32 , ALUOp ::Sub64 ) ;
ctx . emit ( alu_inst_imm12 ( alu_op , rd , rn , rm ) ) ;
}
Opcode ::Imul = > {
let rd = output_to _reg ( ctx , outputs [ 0 ] ) ;
let rn = input_to _reg( ctx , inputs [ 0 ] , NarrowValueMode ::None ) ;
let rm = input_to _reg( ctx , inputs [ 1 ] , NarrowValueMode ::None ) ;
let rd = get_ output_reg( ctx , outputs [ 0 ] ) ;
let rn = put_input_in _reg( ctx , inputs [ 0 ] , NarrowValueMode ::None ) ;
let rm = put_input_in _reg( ctx , inputs [ 1 ] , NarrowValueMode ::None ) ;
let ty = ty . unwrap ( ) ;
let alu_op = choose_32_64 ( ty , ALUOp ::MAdd32 , ALUOp ::MAdd64 ) ;
ctx . emit ( Inst ::AluRRRR {
@ -166,7 +166,7 @@ pub(crate) fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
}
Opcode ::Umulhi | Opcode ::Smulhi = > {
let rd = output_to _reg ( ctx , outputs [ 0 ] ) ;
let rd = get_ output_reg( ctx , outputs [ 0 ] ) ;
let is_signed = op = = Opcode ::Smulhi ;
let input_ty = ctx . input_ty ( insn , 0 ) ;
assert ! ( ctx . input_ty ( insn , 1 ) = = input_ty ) ;
@ -174,8 +174,8 @@ pub(crate) fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
match input_ty {
I64 = > {
let rn = input_to _reg( ctx , inputs [ 0 ] , NarrowValueMode ::None ) ;
let rm = input_to _reg( ctx , inputs [ 1 ] , NarrowValueMode ::None ) ;
let rn = put_input_in _reg( ctx , inputs [ 0 ] , NarrowValueMode ::None ) ;
let rm = put_input_in _reg( ctx , inputs [ 1 ] , NarrowValueMode ::None ) ;
let ra = zero_reg ( ) ;
let alu_op = if is_signed {
ALUOp ::SMulH
@ -196,8 +196,8 @@ pub(crate) fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
} else {
NarrowValueMode ::ZeroExtend64
} ;
let rn = input_to _reg( ctx , inputs [ 0 ] , narrow_mode ) ;
let rm = input_to _reg( ctx , inputs [ 1 ] , narrow_mode ) ;
let rn = put_input_in _reg( ctx , inputs [ 0 ] , narrow_mode ) ;
let rm = put_input_in _reg( ctx , inputs [ 1 ] , narrow_mode ) ;
let ra = zero_reg ( ) ;
ctx . emit ( Inst ::AluRRRR {
alu_op : ALUOp ::MAdd64 ,
@ -254,9 +254,9 @@ pub(crate) fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
ALUOp ::UDiv64
} ;
let rd = output_to _reg ( ctx , outputs [ 0 ] ) ;
let rn = input_to _reg( ctx , inputs [ 0 ] , narrow_mode ) ;
let rm = input_to _reg( ctx , inputs [ 1 ] , narrow_mode ) ;
let rd = get_ output_reg( ctx , outputs [ 0 ] ) ;
let rn = put_input_in _reg( ctx , inputs [ 0 ] , narrow_mode ) ;
let rm = put_input_in _reg( ctx , inputs [ 1 ] , narrow_mode ) ;
// The div instruction does not trap on divide by zero or signed overflow
// so checks are inserted below.
//
@ -372,8 +372,8 @@ pub(crate) fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
// If we reach this point, we weren't able to incorporate the extend as
// a register-mode on another instruction, so we have a 'None'
// narrow-value/extend mode here, and we emit the explicit instruction.
let rn = input_to _reg( ctx , inputs [ 0 ] , NarrowValueMode ::None ) ;
let rd = output_to _reg ( ctx , outputs [ 0 ] ) ;
let rn = put_input_in _reg( ctx , inputs [ 0 ] , NarrowValueMode ::None ) ;
let rd = get_ output_reg( ctx , outputs [ 0 ] ) ;
ctx . emit ( Inst ::Extend {
rd ,
rn ,
@ -385,15 +385,15 @@ pub(crate) fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
}
Opcode ::Bnot = > {
let rd = output_to _reg ( ctx , outputs [ 0 ] ) ;
let rd = get_ output_reg( ctx , outputs [ 0 ] ) ;
let ty = ty . unwrap ( ) ;
if ty_bits ( ty ) < 128 {
let rm = input_to _rs_immlogic( ctx , inputs [ 0 ] , NarrowValueMode ::None ) ;
let rm = put_input_in _rs_immlogic( ctx , inputs [ 0 ] , NarrowValueMode ::None ) ;
let alu_op = choose_32_64 ( ty , ALUOp ::OrrNot32 , ALUOp ::OrrNot64 ) ;
// NOT rd, rm ==> ORR_NOT rd, zero, rm
ctx . emit ( alu_inst_immlogic ( alu_op , rd , zero_reg ( ) , rm ) ) ;
} else {
let rm = input_to _reg( ctx , inputs [ 0 ] , NarrowValueMode ::None ) ;
let rm = put_input_in _reg( ctx , inputs [ 0 ] , NarrowValueMode ::None ) ;
ctx . emit ( Inst ::VecMisc {
op : VecMisc2 ::Not ,
rd ,
@ -409,11 +409,11 @@ pub(crate) fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
| Opcode ::BandNot
| Opcode ::BorNot
| Opcode ::BxorNot = > {
let rd = output_to _reg ( ctx , outputs [ 0 ] ) ;
let rd = get_ output_reg( ctx , outputs [ 0 ] ) ;
let ty = ty . unwrap ( ) ;
if ty_bits ( ty ) < 128 {
let rn = input_to _reg( ctx , inputs [ 0 ] , NarrowValueMode ::None ) ;
let rm = input_to _rs_immlogic( ctx , inputs [ 1 ] , NarrowValueMode ::None ) ;
let rn = put_input_in _reg( ctx , inputs [ 0 ] , NarrowValueMode ::None ) ;
let rm = put_input_in _rs_immlogic( ctx , inputs [ 1 ] , NarrowValueMode ::None ) ;
let alu_op = match op {
Opcode ::Band = > choose_32_64 ( ty , ALUOp ::And32 , ALUOp ::And64 ) ,
Opcode ::Bor = > choose_32_64 ( ty , ALUOp ::Orr32 , ALUOp ::Orr64 ) ,
@ -433,9 +433,9 @@ pub(crate) fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
_ = > unreachable ! ( ) ,
} ;
let rn = input_to _reg( ctx , inputs [ 0 ] , NarrowValueMode ::None ) ;
let rm = input_to _reg( ctx , inputs [ 1 ] , NarrowValueMode ::None ) ;
let rd = output_to _reg ( ctx , outputs [ 0 ] ) ;
let rn = put_input_in _reg( ctx , inputs [ 0 ] , NarrowValueMode ::None ) ;
let rm = put_input_in _reg( ctx , inputs [ 1 ] , NarrowValueMode ::None ) ;
let rd = get_ output_reg( ctx , outputs [ 0 ] ) ;
ctx . emit ( Inst ::VecRRR {
alu_op ,
@ -458,9 +458,9 @@ pub(crate) fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
( Opcode ::Sshr , InstSize ::Size32 ) = > NarrowValueMode ::SignExtend32 ,
_ = > unreachable ! ( ) ,
} ;
let rd = output_to _reg ( ctx , outputs [ 0 ] ) ;
let rn = input_to _reg( ctx , inputs [ 0 ] , narrow_mode ) ;
let rm = input_to _reg_immshift( ctx , inputs [ 1 ] ) ;
let rd = get_ output_reg( ctx , outputs [ 0 ] ) ;
let rn = put_input_in _reg( ctx , inputs [ 0 ] , narrow_mode ) ;
let rm = put_input_in _reg_immshift( ctx , inputs [ 1 ] ) ;
let alu_op = match op {
Opcode ::Ishl = > choose_32_64 ( ty , ALUOp ::Lsl32 , ALUOp ::Lsl64 ) ,
Opcode ::Ushr = > choose_32_64 ( ty , ALUOp ::Lsr32 , ALUOp ::Lsr64 ) ,
@ -503,8 +503,8 @@ pub(crate) fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
let ty = ty . unwrap ( ) ;
let ty_bits_size = ty_bits ( ty ) as u8 ;
let rd = output_to _reg ( ctx , outputs [ 0 ] ) ;
let rn = input_to _reg(
let rd = get_ output_reg( ctx , outputs [ 0 ] ) ;
let rn = put_input_in _reg(
ctx ,
inputs [ 0 ] ,
if ty_bits_size < = 32 {
@ -513,7 +513,7 @@ pub(crate) fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
NarrowValueMode ::ZeroExtend64
} ,
) ;
let rm = input_to _reg_immshift( ctx , inputs [ 1 ] ) ;
let rm = put_input_in _reg_immshift( ctx , inputs [ 1 ] ) ;
if ty_bits_size = = 32 | | ty_bits_size = = 64 {
let alu_op = choose_32_64 ( ty , ALUOp ::RotR32 , ALUOp ::RotR64 ) ;
@ -652,7 +652,7 @@ pub(crate) fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
}
Opcode ::Bitrev | Opcode ::Clz | Opcode ::Cls | Opcode ::Ctz = > {
let rd = output_to _reg ( ctx , outputs [ 0 ] ) ;
let rd = get_ output_reg( ctx , outputs [ 0 ] ) ;
let needs_zext = match op {
Opcode ::Bitrev | Opcode ::Ctz = > false ,
Opcode ::Clz | Opcode ::Cls = > true ,
@ -666,7 +666,7 @@ pub(crate) fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
} else {
NarrowValueMode ::None
} ;
let rn = input_to _reg( ctx , inputs [ 0 ] , narrow_mode ) ;
let rn = put_input_in _reg( ctx , inputs [ 0 ] , narrow_mode ) ;
let op_ty = match ty {
I8 | I16 | I32 = > I32 ,
I64 = > I64 ,
@ -722,11 +722,11 @@ pub(crate) fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
// x += x << 32
// x >> 56
let ty = ty . unwrap ( ) ;
let rd = output_to _reg ( ctx , outputs [ 0 ] ) ;
let rd = get_ output_reg( ctx , outputs [ 0 ] ) ;
// FIXME(#1537): zero-extend 8/16/32-bit operands only to 32 bits,
// and fix the sequence below to work properly for this.
let narrow_mode = NarrowValueMode ::ZeroExtend64 ;
let rn = input_to _reg( ctx , inputs [ 0 ] , narrow_mode ) ;
let rn = put_input_in _reg( ctx , inputs [ 0 ] , narrow_mode ) ;
let tmp = ctx . alloc_tmp ( RegClass ::I64 , I64 ) ;
// If this is a 32-bit Popcnt, use Lsr32 to clear the top 32 bits of the register, then
@ -903,7 +903,7 @@ pub(crate) fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
let is_float = ty_is_float ( elem_ty ) ;
let mem = lower_address ( ctx , elem_ty , & inputs [ . . ] , off ) ;
let rd = output_to _reg ( ctx , outputs [ 0 ] ) ;
let rd = get_ output_reg( ctx , outputs [ 0 ] ) ;
let memflags = ctx . memflags ( insn ) . expect ( "memory flags" ) ;
let srcloc = if ! memflags . notrap ( ) {
@ -967,7 +967,7 @@ pub(crate) fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
let is_float = ty_is_float ( elem_ty ) ;
let mem = lower_address ( ctx , elem_ty , & inputs [ 1 . . ] , off ) ;
let rd = input_to _reg( ctx , inputs [ 0 ] , NarrowValueMode ::None ) ;
let rd = put_input_in _reg( ctx , inputs [ 0 ] , NarrowValueMode ::None ) ;
let memflags = ctx . memflags ( insn ) . expect ( "memory flags" ) ;
let srcloc = if ! memflags . notrap ( ) {
@ -997,7 +997,7 @@ pub(crate) fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
} = > ( stack_slot , offset ) ,
_ = > unreachable ! ( ) ,
} ;
let rd = output_to _reg ( ctx , outputs [ 0 ] ) ;
let rd = get_ output_reg( ctx , outputs [ 0 ] ) ;
let offset : i32 = offset . into ( ) ;
let inst = ctx
. abi ( )
@ -1031,7 +1031,7 @@ pub(crate) fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
( ALUOp ::SubS32 , NarrowValueMode ::ZeroExtend32 )
} ;
let rcond = input_to _reg( ctx , inputs [ 0 ] , narrow_mode ) ;
let rcond = put_input_in _reg( ctx , inputs [ 0 ] , narrow_mode ) ;
// cmp rcond, #0
ctx . emit ( Inst ::AluRRR {
alu_op : cmp_op ,
@ -1052,9 +1052,9 @@ pub(crate) fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
} ;
// csel.COND rd, rn, rm
let rd = output_to _reg ( ctx , outputs [ 0 ] ) ;
let rn = input_to _reg( ctx , inputs [ 1 ] , NarrowValueMode ::None ) ;
let rm = input_to _reg( ctx , inputs [ 2 ] , NarrowValueMode ::None ) ;
let rd = get_ output_reg( ctx , outputs [ 0 ] ) ;
let rn = put_input_in _reg( ctx , inputs [ 1 ] , NarrowValueMode ::None ) ;
let rm = put_input_in _reg( ctx , inputs [ 2 ] , NarrowValueMode ::None ) ;
let ty = ctx . output_ty ( insn , 0 ) ;
let bits = ty_bits ( ty ) ;
if ty_is_float ( ty ) & & bits = = 32 {
@ -1070,10 +1070,10 @@ pub(crate) fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
let ty = ty . unwrap ( ) ;
if ty_bits ( ty ) < 128 {
let tmp = ctx . alloc_tmp ( RegClass ::I64 , I64 ) ;
let rd = output_to _reg ( ctx , outputs [ 0 ] ) ;
let rcond = input_to _reg( ctx , inputs [ 0 ] , NarrowValueMode ::None ) ;
let rn = input_to _reg( ctx , inputs [ 1 ] , NarrowValueMode ::None ) ;
let rm = input_to _reg( ctx , inputs [ 2 ] , NarrowValueMode ::None ) ;
let rd = get_ output_reg( ctx , outputs [ 0 ] ) ;
let rcond = put_input_in _reg( ctx , inputs [ 0 ] , NarrowValueMode ::None ) ;
let rn = put_input_in _reg( ctx , inputs [ 1 ] , NarrowValueMode ::None ) ;
let rm = put_input_in _reg( ctx , inputs [ 2 ] , NarrowValueMode ::None ) ;
// AND rTmp, rn, rcond
ctx . emit ( Inst ::AluRRR {
alu_op : ALUOp ::And64 ,
@ -1096,10 +1096,10 @@ pub(crate) fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
rm : tmp . to_reg ( ) ,
} ) ;
} else {
let rcond = input_to _reg( ctx , inputs [ 0 ] , NarrowValueMode ::None ) ;
let rn = input_to _reg( ctx , inputs [ 1 ] , NarrowValueMode ::None ) ;
let rm = input_to _reg( ctx , inputs [ 2 ] , NarrowValueMode ::None ) ;
let rd = output_to _reg ( ctx , outputs [ 0 ] ) ;
let rcond = put_input_in _reg( ctx , inputs [ 0 ] , NarrowValueMode ::None ) ;
let rn = put_input_in _reg( ctx , inputs [ 1 ] , NarrowValueMode ::None ) ;
let rm = put_input_in _reg( ctx , inputs [ 2 ] , NarrowValueMode ::None ) ;
let rd = get_ output_reg( ctx , outputs [ 0 ] ) ;
ctx . emit ( Inst ::gen_move ( rd , rcond , ty ) ) ;
ctx . emit ( Inst ::VecRRR {
@ -1120,7 +1120,7 @@ pub(crate) fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
// single-def ifcmp.
let ifcmp_insn = maybe_input_insn ( ctx , inputs [ 0 ] , Opcode ::Ifcmp ) . unwrap ( ) ;
lower_icmp_or_ifcmp_to_flags ( ctx , ifcmp_insn , is_signed ) ;
let rd = output_to _reg ( ctx , outputs [ 0 ] ) ;
let rd = get_ output_reg( ctx , outputs [ 0 ] ) ;
ctx . emit ( Inst ::CSet { rd , cond } ) ;
}
@ -1129,7 +1129,7 @@ pub(crate) fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
let cond = lower_fp_condcode ( condcode ) ;
let ffcmp_insn = maybe_input_insn ( ctx , inputs [ 0 ] , Opcode ::Ffcmp ) . unwrap ( ) ;
lower_fcmp_or_ffcmp_to_flags ( ctx , ffcmp_insn ) ;
let rd = output_to _reg ( ctx , outputs [ 0 ] ) ;
let rd = get_ output_reg( ctx , outputs [ 0 ] ) ;
ctx . emit ( Inst ::CSet { rd , cond } ) ;
}
@ -1138,8 +1138,8 @@ pub(crate) fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
}
Opcode ::Copy = > {
let rd = output_to _reg ( ctx , outputs [ 0 ] ) ;
let rn = input_to _reg( ctx , inputs [ 0 ] , NarrowValueMode ::None ) ;
let rd = get_ output_reg( ctx , outputs [ 0 ] ) ;
let rn = put_input_in _reg( ctx , inputs [ 0 ] , NarrowValueMode ::None ) ;
let ty = ctx . input_ty ( insn , 0 ) ;
ctx . emit ( Inst ::gen_move ( rd , rn , ty ) ) ;
}
@ -1157,16 +1157,16 @@ pub(crate) fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
// - Ireduce: changing width of an integer. Smaller ints are stored
// with undefined high-order bits, so we can simply do a copy.
let rn = input_to _reg( ctx , inputs [ 0 ] , NarrowValueMode ::ZeroExtend64 ) ;
let rd = output_to _reg ( ctx , outputs [ 0 ] ) ;
let rn = put_input_in _reg( ctx , inputs [ 0 ] , NarrowValueMode ::ZeroExtend64 ) ;
let rd = get_ output_reg( ctx , outputs [ 0 ] ) ;
let ty = ctx . input_ty ( insn , 0 ) ;
ctx . emit ( Inst ::gen_move ( rd , rn , ty ) ) ;
}
Opcode ::Bmask = > {
// Bool is {0, 1}, so we can subtract from 0 to get all-1s.
let rd = output_to _reg ( ctx , outputs [ 0 ] ) ;
let rm = input_to _reg( ctx , inputs [ 0 ] , NarrowValueMode ::ZeroExtend64 ) ;
let rd = get_ output_reg( ctx , outputs [ 0 ] ) ;
let rm = put_input_in _reg( ctx , inputs [ 0 ] , NarrowValueMode ::ZeroExtend64 ) ;
ctx . emit ( Inst ::AluRRR {
alu_op : ALUOp ::Sub64 ,
rd ,
@ -1176,7 +1176,7 @@ pub(crate) fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
}
Opcode ::Bitcast = > {
let rd = output_to _reg ( ctx , outputs [ 0 ] ) ;
let rd = get_ output_reg( ctx , outputs [ 0 ] ) ;
let ity = ctx . input_ty ( insn , 0 ) ;
let oty = ctx . output_ty ( insn , 0 ) ;
match ( ty_is_float ( ity ) , ty_is_float ( oty ) ) {
@ -1186,19 +1186,19 @@ pub(crate) fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
} else {
NarrowValueMode ::ZeroExtend64
} ;
let rm = input_to _reg( ctx , inputs [ 0 ] , narrow_mode ) ;
let rm = put_input_in _reg( ctx , inputs [ 0 ] , narrow_mode ) ;
ctx . emit ( Inst ::gen_move ( rd , rm , oty ) ) ;
}
( false , false ) = > {
let rm = input_to _reg( ctx , inputs [ 0 ] , NarrowValueMode ::None ) ;
let rm = put_input_in _reg( ctx , inputs [ 0 ] , NarrowValueMode ::None ) ;
ctx . emit ( Inst ::gen_move ( rd , rm , oty ) ) ;
}
( false , true ) = > {
let rn = input_to _reg( ctx , inputs [ 0 ] , NarrowValueMode ::ZeroExtend64 ) ;
let rn = put_input_in _reg( ctx , inputs [ 0 ] , NarrowValueMode ::ZeroExtend64 ) ;
ctx . emit ( Inst ::MovToVec64 { rd , rn } ) ;
}
( true , false ) = > {
let rn = input_to _reg( ctx , inputs [ 0 ] , NarrowValueMode ::None ) ;
let rn = put_input_in _reg( ctx , inputs [ 0 ] , NarrowValueMode ::None ) ;
ctx . emit ( Inst ::MovFromVec {
rd ,
rn ,
@ -1214,7 +1214,7 @@ pub(crate) fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
// N.B.: according to the AArch64 ABI, the top bits of a register
// (above the bits for the value's type) are undefined, so we
// need not extend the return values.
let reg = input_to _reg( ctx , * input , NarrowValueMode ::None ) ;
let reg = put_input_in _reg( ctx , * input , NarrowValueMode ::None ) ;
let retval_reg = ctx . retval ( i ) ;
let ty = ctx . input_ty ( insn , i ) ;
ctx . emit ( Inst ::gen_move ( retval_reg , reg , ty ) ) ;
@ -1234,7 +1234,7 @@ pub(crate) fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
let condcode = inst_condcode ( ctx . data ( insn ) ) . unwrap ( ) ;
let cond = lower_condcode ( condcode ) ;
let is_signed = condcode_is_signed ( condcode ) ;
let rd = output_to _reg ( ctx , outputs [ 0 ] ) ;
let rd = get_ output_reg( ctx , outputs [ 0 ] ) ;
let ty = ctx . input_ty ( insn , 0 ) ;
let bits = ty_bits ( ty ) ;
let narrow_mode = match ( bits < = 32 , is_signed ) {
@ -1243,15 +1243,15 @@ pub(crate) fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
( false , true ) = > NarrowValueMode ::SignExtend64 ,
( false , false ) = > NarrowValueMode ::ZeroExtend64 ,
} ;
let rn = input_to _reg( ctx , inputs [ 0 ] , narrow_mode ) ;
let rn = put_input_in _reg( ctx , inputs [ 0 ] , narrow_mode ) ;
if ty_bits ( ty ) < 128 {
let alu_op = choose_32_64 ( ty , ALUOp ::SubS32 , ALUOp ::SubS64 ) ;
let rm = input_to _rse_imm12( ctx , inputs [ 1 ] , narrow_mode ) ;
let rm = put_input_in _rse_imm12( ctx , inputs [ 1 ] , narrow_mode ) ;
ctx . emit ( alu_inst_imm12 ( alu_op , writable_zero_reg ( ) , rn , rm ) ) ;
ctx . emit ( Inst ::CondSet { cond , rd } ) ;
} else {
let rm = input_to _reg( ctx , inputs [ 1 ] , narrow_mode ) ;
let rm = put_input_in _reg( ctx , inputs [ 1 ] , narrow_mode ) ;
lower_vector_compare ( ctx , rd , rn , rm , ty , cond ) ? ;
}
}
@ -1260,9 +1260,9 @@ pub(crate) fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
let condcode = inst_fp_condcode ( ctx . data ( insn ) ) . unwrap ( ) ;
let cond = lower_fp_condcode ( condcode ) ;
let ty = ctx . input_ty ( insn , 0 ) ;
let rn = input_to _reg( ctx , inputs [ 0 ] , NarrowValueMode ::None ) ;
let rm = input_to _reg( ctx , inputs [ 1 ] , NarrowValueMode ::None ) ;
let rd = output_to _reg ( ctx , outputs [ 0 ] ) ;
let rn = put_input_in _reg( ctx , inputs [ 0 ] , NarrowValueMode ::None ) ;
let rm = put_input_in _reg( ctx , inputs [ 1 ] , NarrowValueMode ::None ) ;
let rd = get_ output_reg( ctx , outputs [ 0 ] ) ;
if ty_bits ( ty ) < 128 {
match ty_bits ( ty ) {
@ -1344,7 +1344,7 @@ pub(crate) fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
}
Opcode ::FuncAddr = > {
let rd = output_to _reg ( ctx , outputs [ 0 ] ) ;
let rd = get_ output_reg( ctx , outputs [ 0 ] ) ;
let ( extname , _ ) = ctx . call_target ( insn ) . unwrap ( ) ;
let extname = extname . clone ( ) ;
let loc = ctx . srcloc ( insn ) ;
@ -1361,7 +1361,7 @@ pub(crate) fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
}
Opcode ::SymbolValue = > {
let rd = output_to _reg ( ctx , outputs [ 0 ] ) ;
let rd = get_ output_reg( ctx , outputs [ 0 ] ) ;
let ( extname , _ , offset ) = ctx . symbol_value ( insn ) . unwrap ( ) ;
let extname = extname . clone ( ) ;
let loc = ctx . srcloc ( insn ) ;
@ -1388,7 +1388,7 @@ pub(crate) fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
)
}
Opcode ::CallIndirect = > {
let ptr = input_to _reg( ctx , inputs [ 0 ] , NarrowValueMode ::ZeroExtend64 ) ;
let ptr = put_input_in _reg( ctx , inputs [ 0 ] , NarrowValueMode ::ZeroExtend64 ) ;
let sig = ctx . call_sig ( insn ) . unwrap ( ) ;
assert ! ( inputs . len ( ) - 1 = = sig . params . len ( ) ) ;
assert ! ( outputs . len ( ) = = sig . returns . len ( ) ) ;
@ -1400,24 +1400,24 @@ pub(crate) fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
abi . emit_stack_pre_adjust ( ctx ) ;
assert ! ( inputs . len ( ) = = abi . num_args ( ) ) ;
for ( i , input ) in inputs . iter ( ) . enumerate ( ) {
let arg_reg = input_to _reg( ctx , * input , NarrowValueMode ::None ) ;
let arg_reg = put_input_in _reg( ctx , * input , NarrowValueMode ::None ) ;
abi . emit_copy_reg_to_arg ( ctx , i , arg_reg ) ;
}
abi . emit_call ( ctx ) ;
for ( i , output ) in outputs . iter ( ) . enumerate ( ) {
let retval_reg = output_to _reg ( ctx , * output ) ;
let retval_reg = get_ output_reg( ctx , * output ) ;
abi . emit_copy_retval_to_reg ( ctx , i , retval_reg ) ;
}
abi . emit_stack_post_adjust ( ctx ) ;
}
Opcode ::GetPinnedReg = > {
let rd = output_to _reg ( ctx , outputs [ 0 ] ) ;
let rd = get_ output_reg( ctx , outputs [ 0 ] ) ;
ctx . emit ( Inst ::mov ( rd , xreg ( PINNED_REG ) ) ) ;
}
Opcode ::SetPinnedReg = > {
let rm = input_to _reg( ctx , inputs [ 0 ] , NarrowValueMode ::None ) ;
let rm = put_input_in _reg( ctx , inputs [ 0 ] , NarrowValueMode ::None ) ;
ctx . emit ( Inst ::mov ( writable_xreg ( PINNED_REG ) , rm ) ) ;
}
@ -1451,13 +1451,13 @@ pub(crate) fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
Opcode ::Vconst = > {
let value = output_to_const_f128 ( ctx , outputs [ 0 ] ) . unwrap ( ) ;
let rd = output_to _reg ( ctx , outputs [ 0 ] ) ;
let rd = get_ output_reg( ctx , outputs [ 0 ] ) ;
lower_constant_f128 ( ctx , rd , value ) ;
}
Opcode ::RawBitcast = > {
let rm = input_to _reg( ctx , inputs [ 0 ] , NarrowValueMode ::None ) ;
let rd = output_to _reg ( ctx , outputs [ 0 ] ) ;
let rm = put_input_in _reg( ctx , inputs [ 0 ] , NarrowValueMode ::None ) ;
let rd = get_ output_reg( ctx , outputs [ 0 ] ) ;
let ty = ctx . input_ty ( insn , 0 ) ;
ctx . emit ( Inst ::gen_move ( rd , rm , ty ) ) ;
}
@ -1465,8 +1465,8 @@ pub(crate) fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
Opcode ::Extractlane = > {
if let InstructionData ::BinaryImm8 { imm , . . } = ctx . data ( insn ) {
let idx = * imm ;
let rd = output_to _reg ( ctx , outputs [ 0 ] ) ;
let rn = input_to _reg( ctx , inputs [ 0 ] , NarrowValueMode ::None ) ;
let rd = get_ output_reg( ctx , outputs [ 0 ] ) ;
let rn = put_input_in _reg( ctx , inputs [ 0 ] , NarrowValueMode ::None ) ;
let ty = ty . unwrap ( ) ;
if ty_is_int ( ty ) {
@ -1483,8 +1483,8 @@ pub(crate) fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
}
Opcode ::Splat = > {
let rn = input_to _reg( ctx , inputs [ 0 ] , NarrowValueMode ::None ) ;
let rd = output_to _reg ( ctx , outputs [ 0 ] ) ;
let rn = put_input_in _reg( ctx , inputs [ 0 ] , NarrowValueMode ::None ) ;
let rd = get_ output_reg( ctx , outputs [ 0 ] ) ;
let ty = ctx . input_ty ( insn , 0 ) ;
let inst = if ty_is_int ( ty ) {
Inst ::VecDup { rd , rn , ty }
@ -1495,8 +1495,8 @@ pub(crate) fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
}
Opcode ::VanyTrue | Opcode ::VallTrue = > {
let rd = output_to _reg ( ctx , outputs [ 0 ] ) ;
let rm = input_to _reg( ctx , inputs [ 0 ] , NarrowValueMode ::None ) ;
let rd = get_ output_reg( ctx , outputs [ 0 ] ) ;
let rm = put_input_in _reg( ctx , inputs [ 0 ] , NarrowValueMode ::None ) ;
let tmp = ctx . alloc_tmp ( RegClass ::V128 , ty . unwrap ( ) ) ;
// This operation is implemented by using umaxp or uminv to
@ -1581,9 +1581,9 @@ pub(crate) fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
( Opcode ::Fmax , 64 ) = > FPUOp2 ::Max64 ,
_ = > panic ! ( "Unknown op/bits combination" ) ,
} ;
let rn = input_to _reg( ctx , inputs [ 0 ] , NarrowValueMode ::None ) ;
let rm = input_to _reg( ctx , inputs [ 1 ] , NarrowValueMode ::None ) ;
let rd = output_to _reg ( ctx , outputs [ 0 ] ) ;
let rn = put_input_in _reg( ctx , inputs [ 0 ] , NarrowValueMode ::None ) ;
let rm = put_input_in _reg( ctx , inputs [ 1 ] , NarrowValueMode ::None ) ;
let rd = get_ output_reg( ctx , outputs [ 0 ] ) ;
ctx . emit ( Inst ::FpuRRR { fpu_op , rd , rn , rm } ) ;
}
@ -1602,8 +1602,8 @@ pub(crate) fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
( Opcode ::Fdemote , 64 ) = > panic ! ( "Cannot demote to 64 bits" ) ,
_ = > panic ! ( "Unknown op/bits combination" ) ,
} ;
let rn = input_to _reg( ctx , inputs [ 0 ] , NarrowValueMode ::None ) ;
let rd = output_to _reg ( ctx , outputs [ 0 ] ) ;
let rn = put_input_in _reg( ctx , inputs [ 0 ] , NarrowValueMode ::None ) ;
let rd = get_ output_reg( ctx , outputs [ 0 ] ) ;
ctx . emit ( Inst ::FpuRR { fpu_op , rd , rn } ) ;
}
@ -1620,8 +1620,8 @@ pub(crate) fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
( Opcode ::Nearest , 64 ) = > FpuRoundMode ::Nearest64 ,
_ = > panic ! ( "Unknown op/bits combination" ) ,
} ;
let rn = input_to _reg( ctx , inputs [ 0 ] , NarrowValueMode ::None ) ;
let rd = output_to _reg ( ctx , outputs [ 0 ] ) ;
let rn = put_input_in _reg( ctx , inputs [ 0 ] , NarrowValueMode ::None ) ;
let rd = get_ output_reg( ctx , outputs [ 0 ] ) ;
ctx . emit ( Inst ::FpuRound { op , rd , rn } ) ;
}
@ -1632,10 +1632,10 @@ pub(crate) fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
64 = > FPUOp3 ::MAdd64 ,
_ = > panic ! ( "Unknown op size" ) ,
} ;
let rn = input_to _reg( ctx , inputs [ 0 ] , NarrowValueMode ::None ) ;
let rm = input_to _reg( ctx , inputs [ 1 ] , NarrowValueMode ::None ) ;
let ra = input_to _reg( ctx , inputs [ 2 ] , NarrowValueMode ::None ) ;
let rd = output_to _reg ( ctx , outputs [ 0 ] ) ;
let rn = put_input_in _reg( ctx , inputs [ 0 ] , NarrowValueMode ::None ) ;
let rm = put_input_in _reg( ctx , inputs [ 1 ] , NarrowValueMode ::None ) ;
let ra = put_input_in _reg( ctx , inputs [ 2 ] , NarrowValueMode ::None ) ;
let rd = get_ output_reg( ctx , outputs [ 0 ] ) ;
ctx . emit ( Inst ::FpuRRRR {
fpu_op ,
rn ,
@ -1658,9 +1658,9 @@ pub(crate) fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
let ty = ctx . output_ty ( insn , 0 ) ;
let bits = ty_bits ( ty ) as u8 ;
assert ! ( bits = = 32 | | bits = = 64 ) ;
let rn = input_to _reg( ctx , inputs [ 0 ] , NarrowValueMode ::None ) ;
let rm = input_to _reg( ctx , inputs [ 1 ] , NarrowValueMode ::None ) ;
let rd = output_to _reg ( ctx , outputs [ 0 ] ) ;
let rn = put_input_in _reg( ctx , inputs [ 0 ] , NarrowValueMode ::None ) ;
let rm = put_input_in _reg( ctx , inputs [ 1 ] , NarrowValueMode ::None ) ;
let rd = get_ output_reg( ctx , outputs [ 0 ] ) ;
let tmp = ctx . alloc_tmp ( RegClass ::V128 , F64 ) ;
// Copy LHS to rd.
@ -1699,8 +1699,8 @@ pub(crate) fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
_ = > panic ! ( "Unknown input/output-bits combination" ) ,
} ;
let rn = input_to _reg( ctx , inputs [ 0 ] , NarrowValueMode ::None ) ;
let rd = output_to _reg ( ctx , outputs [ 0 ] ) ;
let rn = put_input_in _reg( ctx , inputs [ 0 ] , NarrowValueMode ::None ) ;
let rd = get_ output_reg( ctx , outputs [ 0 ] ) ;
// First, check the output: it's important to carry the NaN conversion before the
// in-bounds conversion, per wasm semantics.
@ -1842,8 +1842,8 @@ pub(crate) fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
( true , 64 ) = > NarrowValueMode ::SignExtend64 ,
_ = > panic ! ( "Unknown input size" ) ,
} ;
let rn = input_to _reg( ctx , inputs [ 0 ] , narrow_mode ) ;
let rd = output_to _reg ( ctx , outputs [ 0 ] ) ;
let rn = put_input_in _reg( ctx , inputs [ 0 ] , narrow_mode ) ;
let rd = get_ output_reg( ctx , outputs [ 0 ] ) ;
ctx . emit ( Inst ::IntToFpu { op , rd , rn } ) ;
}
@ -1853,8 +1853,8 @@ pub(crate) fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
let out_ty = ctx . output_ty ( insn , 0 ) ;
let out_bits = ty_bits ( out_ty ) ;
let out_signed = op = = Opcode ::FcvtToSintSat ;
let rn = input_to _reg( ctx , inputs [ 0 ] , NarrowValueMode ::None ) ;
let rd = output_to _reg ( ctx , outputs [ 0 ] ) ;
let rn = put_input_in _reg( ctx , inputs [ 0 ] , NarrowValueMode ::None ) ;
let rd = get_ output_reg( ctx , outputs [ 0 ] ) ;
// FIMM Vtmp1, u32::MAX or u64::MAX or i32::MAX or i64::MAX
// FMIN Vtmp2, Vin, Vtmp1
@ -1991,9 +1991,9 @@ pub(crate) fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
// Now handle the iadd as above, except use an AddS opcode that sets
// flags.
let rd = output_to _reg ( ctx , outputs [ 0 ] ) ;
let rn = input_to _reg( ctx , inputs [ 0 ] , NarrowValueMode ::None ) ;
let rm = input_to _rse_imm12( ctx , inputs [ 1 ] , NarrowValueMode ::None ) ;
let rd = get_ output_reg( ctx , outputs [ 0 ] ) ;
let rn = put_input_in _reg( ctx , inputs [ 0 ] , NarrowValueMode ::None ) ;
let rm = put_input_in _rse_imm12( ctx , inputs [ 1 ] , NarrowValueMode ::None ) ;
let ty = ty . unwrap ( ) ;
let alu_op = choose_32_64 ( ty , ALUOp ::AddS32 , ALUOp ::AddS64 ) ;
ctx . emit ( alu_inst_imm12 ( alu_op , rd , rn , rm ) ) ;
@ -2139,7 +2139,7 @@ pub(crate) fn lower_branch<C: LowerCtx<I = Inst>>(
kind : CondBrKind ::Cond ( cond ) ,
} ) ;
} else {
let rt = input_to _reg(
let rt = put_input_in _reg(
ctx ,
InsnInput {
insn : branches [ 0 ] ,
@ -2173,7 +2173,7 @@ pub(crate) fn lower_branch<C: LowerCtx<I = Inst>>(
( false , true ) = > NarrowValueMode ::SignExtend64 ,
( false , false ) = > NarrowValueMode ::ZeroExtend64 ,
} ;
let rn = input_to _reg(
let rn = put_input_in _reg(
ctx ,
InsnInput {
insn : branches [ 0 ] ,
@ -2181,7 +2181,7 @@ pub(crate) fn lower_branch<C: LowerCtx<I = Inst>>(
} ,
narrow_mode ,
) ;
let rm = input_to _rse_imm12(
let rm = put_input_in _rse_imm12(
ctx ,
InsnInput {
insn : branches [ 0 ] ,
@ -2220,7 +2220,7 @@ pub(crate) fn lower_branch<C: LowerCtx<I = Inst>>(
} else {
// If the ifcmp result is actually placed in a
// register, we need to move it back into the flags.
let rn = input_to _reg( ctx , flag_input , NarrowValueMode ::None ) ;
let rn = put_input_in _reg( ctx , flag_input , NarrowValueMode ::None ) ;
ctx . emit ( Inst ::MovToNZCV { rn } ) ;
ctx . emit ( Inst ::CondBr {
taken ,
@ -2248,7 +2248,7 @@ pub(crate) fn lower_branch<C: LowerCtx<I = Inst>>(
} else {
// If the ffcmp result is actually placed in a
// register, we need to move it back into the flags.
let rn = input_to _reg( ctx , flag_input , NarrowValueMode ::None ) ;
let rn = put_input_in _reg( ctx , flag_input , NarrowValueMode ::None ) ;
ctx . emit ( Inst ::MovToNZCV { rn } ) ;
ctx . emit ( Inst ::CondBr {
taken ,
@ -2294,7 +2294,7 @@ pub(crate) fn lower_branch<C: LowerCtx<I = Inst>>(
needed_space : 4 * ( 6 + jt_size ) as CodeOffset ,
} ) ;
let ridx = input_to _reg(
let ridx = put_input_in _reg(
ctx ,
InsnInput {
insn : branches [ 0 ] ,