From 79dfac5514c14c62a77995730a9716bee668757e Mon Sep 17 00:00:00 2001 From: Anton Kirilov Date: Thu, 11 Jun 2020 14:20:49 +0100 Subject: [PATCH] Refactor the InstSize enum in the AArch64 backend The main issue with the InstSize enum was that it was used both for GPR and SIMD & FP operands, even though machine instructions do not mix them in general (as in a destination register is either a GPR or not). As a result it had methods such as sf_bit() that made sense only for one type of operand. Another issue was that the enum name was not reflecting its purpose accurately - it was meant to represent an instruction operand size, not an instruction size, which is fixed in A64 (always 4 bytes). Now the enum is split into one for GPR operands and another for scalar SIMD & FP operands. Copyright (c) 2020, Arm Limited. --- .../codegen/src/isa/aarch64/inst/args.rs | 93 ++++-- .../codegen/src/isa/aarch64/inst/emit.rs | 37 +-- .../src/isa/aarch64/inst/emit_tests.rs | 8 +- .../codegen/src/isa/aarch64/inst/imms.rs | 34 +- cranelift/codegen/src/isa/aarch64/inst/mod.rs | 297 +++++++++--------- .../codegen/src/isa/aarch64/inst/regs.rs | 15 +- .../codegen/src/isa/aarch64/lower_inst.rs | 15 +- 7 files changed, 268 insertions(+), 231 deletions(-) diff --git a/cranelift/codegen/src/isa/aarch64/inst/args.rs b/cranelift/codegen/src/isa/aarch64/inst/args.rs index 2b416145bc..6bbd618685 100644 --- a/cranelift/codegen/src/isa/aarch64/inst/args.rs +++ b/cranelift/codegen/src/isa/aarch64/inst/args.rs @@ -403,8 +403,8 @@ impl ShowWithRRU for MemArg { &MemArg::RegScaledExtended(r1, r2, ty, op) => { let shift = shift_for_type(ty); let size = match op { - ExtendOp::SXTW | ExtendOp::UXTW => InstSize::Size32, - _ => InstSize::Size64, + ExtendOp::SXTW | ExtendOp::UXTW => OperandSize::Size32, + _ => OperandSize::Size64, }; let op = op.show_rru(mb_rru); format!( @@ -417,8 +417,8 @@ impl ShowWithRRU for MemArg { } &MemArg::RegExtended(r1, r2, op) => { let size = match op { - ExtendOp::SXTW | ExtendOp::UXTW => InstSize::Size32, - _ => InstSize::Size64, + ExtendOp::SXTW | ExtendOp::UXTW => OperandSize::Size32, + _ => OperandSize::Size64, }; let op = op.show_rru(mb_rru); format!( @@ -492,67 +492,98 @@ impl ShowWithRRU for BranchTarget { } /// Type used to communicate the operand size of a machine instruction, as AArch64 has 32- and -/// 64-bit variants of many instructions (and integer and floating-point registers) and 128-bit -/// variants of vector instructions. -/// TODO: Create a separate type for SIMD & floating-point operands. +/// 64-bit variants of many instructions (and integer registers). #[derive(Clone, Copy, Debug, PartialEq, Eq)] -pub enum InstSize { +pub enum OperandSize { Size32, Size64, - Size128, } -impl InstSize { +impl OperandSize { /// 32-bit case? pub fn is32(self) -> bool { - self == InstSize::Size32 + self == OperandSize::Size32 } /// 64-bit case? pub fn is64(self) -> bool { - self == InstSize::Size64 + self == OperandSize::Size64 } - /// Convert from an `is32` boolean flag to an `InstSize`. - pub fn from_is32(is32: bool) -> InstSize { + /// Convert from an `is32` boolean flag to an `OperandSize`. + pub fn from_is32(is32: bool) -> OperandSize { if is32 { - InstSize::Size32 + OperandSize::Size32 } else { - InstSize::Size64 + OperandSize::Size64 } } /// Convert from a needed width to the smallest size that fits. - pub fn from_bits>(bits: I) -> InstSize { + pub fn from_bits>(bits: I) -> OperandSize { let bits: usize = bits.into(); - assert!(bits <= 128); + assert!(bits <= 64); if bits <= 32 { - InstSize::Size32 - } else if bits <= 64 { - InstSize::Size64 + OperandSize::Size32 } else { - InstSize::Size128 + OperandSize::Size64 } } /// Convert from an integer type into the smallest size that fits. - pub fn from_ty(ty: Type) -> InstSize { + pub fn from_ty(ty: Type) -> OperandSize { Self::from_bits(ty_bits(ty)) } /// Convert to I32, I64, or I128. pub fn to_ty(self) -> Type { match self { - InstSize::Size32 => I32, - InstSize::Size64 => I64, - InstSize::Size128 => I128, + OperandSize::Size32 => I32, + OperandSize::Size64 => I64, } } pub fn sf_bit(&self) -> u32 { match self { - InstSize::Size32 => 0, - InstSize::Size64 => 1, - _ => { - panic!("Unexpected size"); - } + OperandSize::Size32 => 0, + OperandSize::Size64 => 1, + } + } +} + +/// Type used to communicate the size of a scalar SIMD & FP operand. +#[derive(Clone, Copy, Debug, PartialEq, Eq)] +pub enum ScalarSize { + Size8, + Size16, + Size32, + Size64, + Size128, +} + +impl ScalarSize { + /// Convert from a needed width to the smallest size that fits. + pub fn from_bits>(bits: I) -> ScalarSize { + match bits.into().next_power_of_two() { + 8 => ScalarSize::Size8, + 16 => ScalarSize::Size16, + 32 => ScalarSize::Size32, + 64 => ScalarSize::Size64, + 128 => ScalarSize::Size128, + _ => panic!("Unexpected type width"), + } + } + + /// Convert from a type into the smallest size that fits. + pub fn from_ty(ty: Type) -> ScalarSize { + Self::from_bits(ty_bits(ty)) + } + + /// Return the encoding bits that are used by some scalar FP instructions + /// for a particular operand size. + pub fn ftype(&self) -> u32 { + match self { + ScalarSize::Size16 => 0b11, + ScalarSize::Size32 => 0b00, + ScalarSize::Size64 => 0b01, + _ => panic!("Unexpected scalar FP operand size"), } } } diff --git a/cranelift/codegen/src/isa/aarch64/inst/emit.rs b/cranelift/codegen/src/isa/aarch64/inst/emit.rs index e872acd18c..79f7624bcc 100644 --- a/cranelift/codegen/src/isa/aarch64/inst/emit.rs +++ b/cranelift/codegen/src/isa/aarch64/inst/emit.rs @@ -282,14 +282,13 @@ fn enc_csel(rd: Writable, rn: Reg, rm: Reg, cond: Cond) -> u32 { | (cond.bits() << 12) } -fn enc_fcsel(rd: Writable, rn: Reg, rm: Reg, cond: Cond, size: InstSize) -> u32 { - let ty_bit = if size.is32() { 0 } else { 1 }; +fn enc_fcsel(rd: Writable, rn: Reg, rm: Reg, cond: Cond, size: ScalarSize) -> u32 { 0b000_11110_00_1_00000_0000_11_00000_00000 + | (size.ftype() << 22) | (machreg_to_vec(rm) << 16) | (machreg_to_vec(rn) << 5) | machreg_to_vec(rd.to_reg()) | (cond.bits() << 12) - | (ty_bit << 22) } fn enc_cset(rd: Writable, cond: Cond) -> u32 { @@ -298,7 +297,7 @@ fn enc_cset(rd: Writable, cond: Cond) -> u32 { | (cond.invert().bits() << 12) } -fn enc_ccmp_imm(size: InstSize, rn: Reg, imm: UImm5, nzcv: NZCV, cond: Cond) -> u32 { +fn enc_ccmp_imm(size: OperandSize, rn: Reg, imm: UImm5, nzcv: NZCV, cond: Cond) -> u32 { 0b0_1_1_11010010_00000_0000_10_00000_0_0000 | size.sf_bit() << 31 | imm.bits() << 16 @@ -334,13 +333,11 @@ fn enc_fpurrrr(top17: u32, rd: Writable, rn: Reg, rm: Reg, ra: Reg) -> u32 | machreg_to_vec(rd.to_reg()) } -fn enc_fcmp(size: InstSize, rn: Reg, rm: Reg) -> u32 { - let bits = if size.is32() { - 0b000_11110_00_1_00000_00_1000_00000_00000 - } else { - 0b000_11110_01_1_00000_00_1000_00000_00000 - }; - bits | (machreg_to_vec(rm) << 16) | (machreg_to_vec(rn) << 5) +fn enc_fcmp(size: ScalarSize, rn: Reg, rm: Reg) -> u32 { + 0b000_11110_00_1_00000_00_1000_00000_00000 + | (size.ftype() << 22) + | (machreg_to_vec(rm) << 16) + | (machreg_to_vec(rn) << 5) } fn enc_fputoint(top16: u32, rd: Writable, rn: Reg) -> u32 { @@ -613,7 +610,7 @@ impl MachInstEmit for Inst { } &Inst::BitRR { op, rd, rn, .. } => { - let size = if op.inst_size().is32() { 0b0 } else { 0b1 }; + let size = if op.operand_size().is32() { 0b0 } else { 0b1 }; let (op1, op2) = match op { BitOp::RBit32 | BitOp::RBit64 => (0b00000, 0b000000), BitOp::Clz32 | BitOp::Clz64 => (0b00000, 0b000100), @@ -979,10 +976,10 @@ impl MachInstEmit for Inst { &Inst::FpuMove128 { rd, rn } => { sink.put4(enc_vecmov(/* 16b = */ true, rd, rn)); } - &Inst::FpuMoveFromVec { rd, rn, idx, ty } => { - let (imm5, shift, mask) = match ty { - F32 => (0b00100, 3, 0b011), - F64 => (0b01000, 4, 0b001), + &Inst::FpuMoveFromVec { rd, rn, idx, size } => { + let (imm5, shift, mask) = match size { + ScalarSize::Size32 => (0b00100, 3, 0b011), + ScalarSize::Size64 => (0b01000, 4, 0b001), _ => unimplemented!(), }; debug_assert_eq!(idx & mask, idx); @@ -1108,10 +1105,10 @@ impl MachInstEmit for Inst { sink.put4(enc_vec_lanes(q, u, size, opcode, rd, rn)); } &Inst::FpuCmp32 { rn, rm } => { - sink.put4(enc_fcmp(InstSize::Size32, rn, rm)); + sink.put4(enc_fcmp(ScalarSize::Size32, rn, rm)); } &Inst::FpuCmp64 { rn, rm } => { - sink.put4(enc_fcmp(InstSize::Size64, rn, rm)); + sink.put4(enc_fcmp(ScalarSize::Size64, rn, rm)); } &Inst::FpuToInt { op, rd, rn } => { let top16 = match op { @@ -1198,10 +1195,10 @@ impl MachInstEmit for Inst { } } &Inst::FpuCSel32 { rd, rn, rm, cond } => { - sink.put4(enc_fcsel(rd, rn, rm, cond, InstSize::Size32)); + sink.put4(enc_fcsel(rd, rn, rm, cond, ScalarSize::Size32)); } &Inst::FpuCSel64 { rd, rn, rm, cond } => { - sink.put4(enc_fcsel(rd, rn, rm, cond, InstSize::Size64)); + sink.put4(enc_fcsel(rd, rn, rm, cond, ScalarSize::Size64)); } &Inst::FpuRound { op, rd, rn } => { let top22 = match op { diff --git a/cranelift/codegen/src/isa/aarch64/inst/emit_tests.rs b/cranelift/codegen/src/isa/aarch64/inst/emit_tests.rs index 8f25043e6f..d790e390b1 100644 --- a/cranelift/codegen/src/isa/aarch64/inst/emit_tests.rs +++ b/cranelift/codegen/src/isa/aarch64/inst/emit_tests.rs @@ -1808,7 +1808,7 @@ fn test_aarch64_binemit() { )); insns.push(( Inst::CCmpImm { - size: InstSize::Size64, + size: OperandSize::Size64, rn: xreg(22), imm: UImm5::maybe_from_u8(5).unwrap(), nzcv: NZCV::new(false, false, true, true), @@ -1819,7 +1819,7 @@ fn test_aarch64_binemit() { )); insns.push(( Inst::CCmpImm { - size: InstSize::Size32, + size: OperandSize::Size32, rn: xreg(3), imm: UImm5::maybe_from_u8(30).unwrap(), nzcv: NZCV::new(true, true, true, true), @@ -3022,7 +3022,7 @@ fn test_aarch64_binemit() { rd: writable_vreg(1), rn: vreg(30), idx: 2, - ty: F32, + size: ScalarSize::Size32, }, "C107145E", "mov s1, v30.s[2]", @@ -3033,7 +3033,7 @@ fn test_aarch64_binemit() { rd: writable_vreg(23), rn: vreg(11), idx: 0, - ty: F64, + size: ScalarSize::Size64, }, "7705085E", "mov d23, v11.d[0]", diff --git a/cranelift/codegen/src/isa/aarch64/inst/imms.rs b/cranelift/codegen/src/isa/aarch64/inst/imms.rs index 961559cc9f..7561d5ff46 100644 --- a/cranelift/codegen/src/isa/aarch64/inst/imms.rs +++ b/cranelift/codegen/src/isa/aarch64/inst/imms.rs @@ -4,7 +4,7 @@ #[allow(dead_code)] use crate::ir::types::*; use crate::ir::Type; -use crate::isa::aarch64::inst::InstSize; +use crate::isa::aarch64::inst::OperandSize; use crate::machinst::*; use regalloc::RealRegUniverse; @@ -340,7 +340,7 @@ pub struct ImmLogic { /// `R` field: rotate amount. pub s: u8, /// Was this constructed for a 32-bit or 64-bit instruction? - pub size: InstSize, + pub size: OperandSize, } impl ImmLogic { @@ -351,7 +351,7 @@ impl ImmLogic { if ty != I64 && ty != I32 { return None; } - let inst_size = InstSize::from_ty(ty); + let operand_size = OperandSize::from_ty(ty); let original_value = value; @@ -532,7 +532,7 @@ impl ImmLogic { n: out_n != 0, r: r as u8, s: s as u8, - size: inst_size, + size: operand_size, }) } @@ -732,7 +732,7 @@ mod test { n: true, r: 0, s: 0, - size: InstSize::Size64, + size: OperandSize::Size64, }), ImmLogic::maybe_from_u64(1, I64) ); @@ -743,7 +743,7 @@ mod test { n: true, r: 63, s: 0, - size: InstSize::Size64, + size: OperandSize::Size64, }), ImmLogic::maybe_from_u64(2, I64) ); @@ -758,7 +758,7 @@ mod test { n: true, r: 61, s: 4, - size: InstSize::Size64, + size: OperandSize::Size64, }), ImmLogic::maybe_from_u64(248, I64) ); @@ -771,7 +771,7 @@ mod test { n: true, r: 57, s: 3, - size: InstSize::Size64, + size: OperandSize::Size64, }), ImmLogic::maybe_from_u64(1920, I64) ); @@ -782,7 +782,7 @@ mod test { n: true, r: 63, s: 13, - size: InstSize::Size64, + size: OperandSize::Size64, }), ImmLogic::maybe_from_u64(0x7ffe, I64) ); @@ -793,7 +793,7 @@ mod test { n: true, r: 48, s: 1, - size: InstSize::Size64, + size: OperandSize::Size64, }), ImmLogic::maybe_from_u64(0x30000, I64) ); @@ -804,7 +804,7 @@ mod test { n: true, r: 44, s: 0, - size: InstSize::Size64, + size: OperandSize::Size64, }), ImmLogic::maybe_from_u64(0x100000, I64) ); @@ -815,7 +815,7 @@ mod test { n: true, r: 63, s: 62, - size: InstSize::Size64, + size: OperandSize::Size64, }), ImmLogic::maybe_from_u64(u64::max_value() - 1, I64) ); @@ -826,7 +826,7 @@ mod test { n: false, r: 1, s: 60, - size: InstSize::Size64, + size: OperandSize::Size64, }), ImmLogic::maybe_from_u64(0xaaaaaaaaaaaaaaaa, I64) ); @@ -837,7 +837,7 @@ mod test { n: false, r: 1, s: 49, - size: InstSize::Size64, + size: OperandSize::Size64, }), ImmLogic::maybe_from_u64(0x8181818181818181, I64) ); @@ -848,7 +848,7 @@ mod test { n: false, r: 10, s: 43, - size: InstSize::Size64, + size: OperandSize::Size64, }), ImmLogic::maybe_from_u64(0xffc3ffc3ffc3ffc3, I64) ); @@ -859,7 +859,7 @@ mod test { n: false, r: 0, s: 0, - size: InstSize::Size64, + size: OperandSize::Size64, }), ImmLogic::maybe_from_u64(0x100000001, I64) ); @@ -870,7 +870,7 @@ mod test { n: false, r: 0, s: 56, - size: InstSize::Size64, + size: OperandSize::Size64, }), ImmLogic::maybe_from_u64(0x1111111111111111, I64) ); diff --git a/cranelift/codegen/src/isa/aarch64/inst/mod.rs b/cranelift/codegen/src/isa/aarch64/inst/mod.rs index 3f1f849336..3039fa2f50 100644 --- a/cranelift/codegen/src/isa/aarch64/inst/mod.rs +++ b/cranelift/codegen/src/isa/aarch64/inst/mod.rs @@ -5,8 +5,8 @@ use crate::binemit::CodeOffset; use crate::ir::types::{ - B1, B16, B16X8, B32, B32X4, B64, B64X2, B8, B8X16, F32, F32X2, F32X4, F64, F64X2, FFLAGS, I128, - I16, I16X4, I16X8, I32, I32X2, I32X4, I64, I64X2, I8, I8X16, I8X8, IFLAGS, + B1, B16, B16X8, B32, B32X4, B64, B64X2, B8, B8X16, F32, F32X2, F32X4, F64, F64X2, FFLAGS, I16, + I16X4, I16X8, I32, I32X2, I32X4, I64, I64X2, I8, I8X16, I8X8, IFLAGS, }; use crate::ir::{ExternalName, Opcode, SourceLoc, TrapCode, Type}; use crate::machinst::*; @@ -287,10 +287,10 @@ pub enum BitOp { impl BitOp { /// What is the opcode's native width? - pub fn inst_size(&self) -> InstSize { + pub fn operand_size(&self) -> OperandSize { match self { - BitOp::RBit32 | BitOp::Clz32 | BitOp::Cls32 => InstSize::Size32, - _ => InstSize::Size64, + BitOp::RBit32 | BitOp::Clz32 | BitOp::Cls32 => OperandSize::Size32, + _ => OperandSize::Size64, } } @@ -561,7 +561,7 @@ pub enum Inst { /// A conditional comparison with an immediate. CCmpImm { - size: InstSize, + size: OperandSize, rn: Reg, imm: UImm5, nzcv: NZCV, @@ -586,7 +586,7 @@ pub enum Inst { rd: Writable, rn: Reg, idx: u8, - ty: Type, + size: ScalarSize, }, /// 1-op FPU instruction. @@ -2156,45 +2156,45 @@ fn mem_finalize_for_show(mem: &MemArg, mb_rru: Option<&RealRegUniverse>) -> (Str impl ShowWithRRU for Inst { fn show_rru(&self, mb_rru: Option<&RealRegUniverse>) -> String { - fn op_name_size(alu_op: ALUOp) -> (&'static str, InstSize) { + fn op_name_size(alu_op: ALUOp) -> (&'static str, OperandSize) { match alu_op { - ALUOp::Add32 => ("add", InstSize::Size32), - ALUOp::Add64 => ("add", InstSize::Size64), - ALUOp::Sub32 => ("sub", InstSize::Size32), - ALUOp::Sub64 => ("sub", InstSize::Size64), - ALUOp::Orr32 => ("orr", InstSize::Size32), - ALUOp::Orr64 => ("orr", InstSize::Size64), - ALUOp::And32 => ("and", InstSize::Size32), - ALUOp::And64 => ("and", InstSize::Size64), - ALUOp::Eor32 => ("eor", InstSize::Size32), - ALUOp::Eor64 => ("eor", InstSize::Size64), - ALUOp::AddS32 => ("adds", InstSize::Size32), - ALUOp::AddS64 => ("adds", InstSize::Size64), - ALUOp::SubS32 => ("subs", InstSize::Size32), - ALUOp::SubS64 => ("subs", InstSize::Size64), - ALUOp::SubS64XR => ("subs", InstSize::Size64), - ALUOp::MAdd32 => ("madd", InstSize::Size32), - ALUOp::MAdd64 => ("madd", InstSize::Size64), - ALUOp::MSub32 => ("msub", InstSize::Size32), - ALUOp::MSub64 => ("msub", InstSize::Size64), - ALUOp::SMulH => ("smulh", InstSize::Size64), - ALUOp::UMulH => ("umulh", InstSize::Size64), - ALUOp::SDiv64 => ("sdiv", InstSize::Size64), - ALUOp::UDiv64 => ("udiv", InstSize::Size64), - ALUOp::AndNot32 => ("bic", InstSize::Size32), - ALUOp::AndNot64 => ("bic", InstSize::Size64), - ALUOp::OrrNot32 => ("orn", InstSize::Size32), - ALUOp::OrrNot64 => ("orn", InstSize::Size64), - ALUOp::EorNot32 => ("eon", InstSize::Size32), - ALUOp::EorNot64 => ("eon", InstSize::Size64), - ALUOp::RotR32 => ("ror", InstSize::Size32), - ALUOp::RotR64 => ("ror", InstSize::Size64), - ALUOp::Lsr32 => ("lsr", InstSize::Size32), - ALUOp::Lsr64 => ("lsr", InstSize::Size64), - ALUOp::Asr32 => ("asr", InstSize::Size32), - ALUOp::Asr64 => ("asr", InstSize::Size64), - ALUOp::Lsl32 => ("lsl", InstSize::Size32), - ALUOp::Lsl64 => ("lsl", InstSize::Size64), + ALUOp::Add32 => ("add", OperandSize::Size32), + ALUOp::Add64 => ("add", OperandSize::Size64), + ALUOp::Sub32 => ("sub", OperandSize::Size32), + ALUOp::Sub64 => ("sub", OperandSize::Size64), + ALUOp::Orr32 => ("orr", OperandSize::Size32), + ALUOp::Orr64 => ("orr", OperandSize::Size64), + ALUOp::And32 => ("and", OperandSize::Size32), + ALUOp::And64 => ("and", OperandSize::Size64), + ALUOp::Eor32 => ("eor", OperandSize::Size32), + ALUOp::Eor64 => ("eor", OperandSize::Size64), + ALUOp::AddS32 => ("adds", OperandSize::Size32), + ALUOp::AddS64 => ("adds", OperandSize::Size64), + ALUOp::SubS32 => ("subs", OperandSize::Size32), + ALUOp::SubS64 => ("subs", OperandSize::Size64), + ALUOp::SubS64XR => ("subs", OperandSize::Size64), + ALUOp::MAdd32 => ("madd", OperandSize::Size32), + ALUOp::MAdd64 => ("madd", OperandSize::Size64), + ALUOp::MSub32 => ("msub", OperandSize::Size32), + ALUOp::MSub64 => ("msub", OperandSize::Size64), + ALUOp::SMulH => ("smulh", OperandSize::Size64), + ALUOp::UMulH => ("umulh", OperandSize::Size64), + ALUOp::SDiv64 => ("sdiv", OperandSize::Size64), + ALUOp::UDiv64 => ("udiv", OperandSize::Size64), + ALUOp::AndNot32 => ("bic", OperandSize::Size32), + ALUOp::AndNot64 => ("bic", OperandSize::Size64), + ALUOp::OrrNot32 => ("orn", OperandSize::Size32), + ALUOp::OrrNot64 => ("orn", OperandSize::Size64), + ALUOp::EorNot32 => ("eon", OperandSize::Size32), + ALUOp::EorNot64 => ("eon", OperandSize::Size64), + ALUOp::RotR32 => ("ror", OperandSize::Size32), + ALUOp::RotR64 => ("ror", OperandSize::Size64), + ALUOp::Lsr32 => ("lsr", OperandSize::Size32), + ALUOp::Lsr64 => ("lsr", OperandSize::Size64), + ALUOp::Asr32 => ("asr", OperandSize::Size32), + ALUOp::Asr64 => ("asr", OperandSize::Size64), + ALUOp::Lsl32 => ("lsl", OperandSize::Size32), + ALUOp::Lsl64 => ("lsl", OperandSize::Size64), } } @@ -2300,7 +2300,7 @@ impl ShowWithRRU for Inst { format!("{} {}, {}, {}, {}", op, rd, rn, rm, extendop) } &Inst::BitRR { op, rd, rn } => { - let size = op.inst_size(); + let size = op.operand_size(); let op = op.op_str(); let rd = show_ireg_sized(rd.to_reg(), mb_rru, size); let rn = show_ireg_sized(rn, mb_rru, size); @@ -2349,20 +2349,20 @@ impl ShowWithRRU for Inst { _ => false, }; let (op, size) = match (self, is_unscaled) { - (&Inst::ULoad8 { .. }, false) => ("ldrb", InstSize::Size32), - (&Inst::ULoad8 { .. }, true) => ("ldurb", InstSize::Size32), - (&Inst::SLoad8 { .. }, false) => ("ldrsb", InstSize::Size64), - (&Inst::SLoad8 { .. }, true) => ("ldursb", InstSize::Size64), - (&Inst::ULoad16 { .. }, false) => ("ldrh", InstSize::Size32), - (&Inst::ULoad16 { .. }, true) => ("ldurh", InstSize::Size32), - (&Inst::SLoad16 { .. }, false) => ("ldrsh", InstSize::Size64), - (&Inst::SLoad16 { .. }, true) => ("ldursh", InstSize::Size64), - (&Inst::ULoad32 { .. }, false) => ("ldr", InstSize::Size32), - (&Inst::ULoad32 { .. }, true) => ("ldur", InstSize::Size32), - (&Inst::SLoad32 { .. }, false) => ("ldrsw", InstSize::Size64), - (&Inst::SLoad32 { .. }, true) => ("ldursw", InstSize::Size64), - (&Inst::ULoad64 { .. }, false) => ("ldr", InstSize::Size64), - (&Inst::ULoad64 { .. }, true) => ("ldur", InstSize::Size64), + (&Inst::ULoad8 { .. }, false) => ("ldrb", OperandSize::Size32), + (&Inst::ULoad8 { .. }, true) => ("ldurb", OperandSize::Size32), + (&Inst::SLoad8 { .. }, false) => ("ldrsb", OperandSize::Size64), + (&Inst::SLoad8 { .. }, true) => ("ldursb", OperandSize::Size64), + (&Inst::ULoad16 { .. }, false) => ("ldrh", OperandSize::Size32), + (&Inst::ULoad16 { .. }, true) => ("ldurh", OperandSize::Size32), + (&Inst::SLoad16 { .. }, false) => ("ldrsh", OperandSize::Size64), + (&Inst::SLoad16 { .. }, true) => ("ldursh", OperandSize::Size64), + (&Inst::ULoad32 { .. }, false) => ("ldr", OperandSize::Size32), + (&Inst::ULoad32 { .. }, true) => ("ldur", OperandSize::Size32), + (&Inst::SLoad32 { .. }, false) => ("ldrsw", OperandSize::Size64), + (&Inst::SLoad32 { .. }, true) => ("ldursw", OperandSize::Size64), + (&Inst::ULoad64 { .. }, false) => ("ldr", OperandSize::Size64), + (&Inst::ULoad64 { .. }, true) => ("ldur", OperandSize::Size64), _ => unreachable!(), }; let rd = show_ireg_sized(rd.to_reg(), mb_rru, size); @@ -2397,14 +2397,14 @@ impl ShowWithRRU for Inst { _ => false, }; let (op, size) = match (self, is_unscaled) { - (&Inst::Store8 { .. }, false) => ("strb", InstSize::Size32), - (&Inst::Store8 { .. }, true) => ("sturb", InstSize::Size32), - (&Inst::Store16 { .. }, false) => ("strh", InstSize::Size32), - (&Inst::Store16 { .. }, true) => ("sturh", InstSize::Size32), - (&Inst::Store32 { .. }, false) => ("str", InstSize::Size32), - (&Inst::Store32 { .. }, true) => ("stur", InstSize::Size32), - (&Inst::Store64 { .. }, false) => ("str", InstSize::Size64), - (&Inst::Store64 { .. }, true) => ("stur", InstSize::Size64), + (&Inst::Store8 { .. }, false) => ("strb", OperandSize::Size32), + (&Inst::Store8 { .. }, true) => ("sturb", OperandSize::Size32), + (&Inst::Store16 { .. }, false) => ("strh", OperandSize::Size32), + (&Inst::Store16 { .. }, true) => ("sturh", OperandSize::Size32), + (&Inst::Store32 { .. }, false) => ("str", OperandSize::Size32), + (&Inst::Store32 { .. }, true) => ("stur", OperandSize::Size32), + (&Inst::Store64 { .. }, false) => ("str", OperandSize::Size64), + (&Inst::Store64 { .. }, true) => ("stur", OperandSize::Size64), _ => unreachable!(), }; let rd = show_ireg_sized(rd, mb_rru, size); @@ -2429,8 +2429,8 @@ impl ShowWithRRU for Inst { format!("mov {}, {}", rd, rm) } &Inst::Mov32 { rd, rm } => { - let rd = show_ireg_sized(rd.to_reg(), mb_rru, InstSize::Size32); - let rm = show_ireg_sized(rm, mb_rru, InstSize::Size32); + let rd = show_ireg_sized(rd.to_reg(), mb_rru, OperandSize::Size32); + let rm = show_ireg_sized(rm, mb_rru, OperandSize::Size32); format!("mov {}, {}", rd, rm) } &Inst::MovZ { rd, ref imm } => { @@ -2483,21 +2483,26 @@ impl ShowWithRRU for Inst { let rn = rn.show_rru(mb_rru); format!("mov {}.16b, {}.16b", rd, rn) } - &Inst::FpuMoveFromVec { rd, rn, idx, ty } => { - let rd = show_freg_sized(rd.to_reg(), mb_rru, InstSize::from_ty(ty)); - let rn = show_vreg_element(rn, mb_rru, idx, ty); + &Inst::FpuMoveFromVec { rd, rn, idx, size } => { + let vector_type = match size { + ScalarSize::Size32 => F32, + ScalarSize::Size64 => F64, + _ => unimplemented!(), + }; + let rd = show_freg_sized(rd.to_reg(), mb_rru, size); + let rn = show_vreg_element(rn, mb_rru, idx, vector_type); format!("mov {}, {}", rd, rn) } &Inst::FpuRR { fpu_op, rd, rn } => { let (op, sizesrc, sizedest) = match fpu_op { - FPUOp1::Abs32 => ("fabs", InstSize::Size32, InstSize::Size32), - FPUOp1::Abs64 => ("fabs", InstSize::Size64, InstSize::Size64), - FPUOp1::Neg32 => ("fneg", InstSize::Size32, InstSize::Size32), - FPUOp1::Neg64 => ("fneg", InstSize::Size64, InstSize::Size64), - FPUOp1::Sqrt32 => ("fsqrt", InstSize::Size32, InstSize::Size32), - FPUOp1::Sqrt64 => ("fsqrt", InstSize::Size64, InstSize::Size64), - FPUOp1::Cvt32To64 => ("fcvt", InstSize::Size32, InstSize::Size64), - FPUOp1::Cvt64To32 => ("fcvt", InstSize::Size64, InstSize::Size32), + FPUOp1::Abs32 => ("fabs", ScalarSize::Size32, ScalarSize::Size32), + FPUOp1::Abs64 => ("fabs", ScalarSize::Size64, ScalarSize::Size64), + FPUOp1::Neg32 => ("fneg", ScalarSize::Size32, ScalarSize::Size32), + FPUOp1::Neg64 => ("fneg", ScalarSize::Size64, ScalarSize::Size64), + FPUOp1::Sqrt32 => ("fsqrt", ScalarSize::Size32, ScalarSize::Size32), + FPUOp1::Sqrt64 => ("fsqrt", ScalarSize::Size64, ScalarSize::Size64), + FPUOp1::Cvt32To64 => ("fcvt", ScalarSize::Size32, ScalarSize::Size64), + FPUOp1::Cvt64To32 => ("fcvt", ScalarSize::Size64, ScalarSize::Size32), }; let rd = show_freg_sized(rd.to_reg(), mb_rru, sizedest); let rn = show_freg_sized(rn, mb_rru, sizesrc); @@ -2505,18 +2510,18 @@ impl ShowWithRRU for Inst { } &Inst::FpuRRR { fpu_op, rd, rn, rm } => { let (op, size) = match fpu_op { - FPUOp2::Add32 => ("fadd", InstSize::Size32), - FPUOp2::Add64 => ("fadd", InstSize::Size64), - FPUOp2::Sub32 => ("fsub", InstSize::Size32), - FPUOp2::Sub64 => ("fsub", InstSize::Size64), - FPUOp2::Mul32 => ("fmul", InstSize::Size32), - FPUOp2::Mul64 => ("fmul", InstSize::Size64), - FPUOp2::Div32 => ("fdiv", InstSize::Size32), - FPUOp2::Div64 => ("fdiv", InstSize::Size64), - FPUOp2::Max32 => ("fmax", InstSize::Size32), - FPUOp2::Max64 => ("fmax", InstSize::Size64), - FPUOp2::Min32 => ("fmin", InstSize::Size32), - FPUOp2::Min64 => ("fmin", InstSize::Size64), + FPUOp2::Add32 => ("fadd", ScalarSize::Size32), + FPUOp2::Add64 => ("fadd", ScalarSize::Size64), + FPUOp2::Sub32 => ("fsub", ScalarSize::Size32), + FPUOp2::Sub64 => ("fsub", ScalarSize::Size64), + FPUOp2::Mul32 => ("fmul", ScalarSize::Size32), + FPUOp2::Mul64 => ("fmul", ScalarSize::Size64), + FPUOp2::Div32 => ("fdiv", ScalarSize::Size32), + FPUOp2::Div64 => ("fdiv", ScalarSize::Size64), + FPUOp2::Max32 => ("fmax", ScalarSize::Size32), + FPUOp2::Max64 => ("fmax", ScalarSize::Size64), + FPUOp2::Min32 => ("fmin", ScalarSize::Size32), + FPUOp2::Min64 => ("fmin", ScalarSize::Size64), }; let rd = show_freg_sized(rd.to_reg(), mb_rru, size); let rn = show_freg_sized(rn, mb_rru, size); @@ -2548,8 +2553,8 @@ impl ShowWithRRU for Inst { ra, } => { let (op, size) = match fpu_op { - FPUOp3::MAdd32 => ("fmadd", InstSize::Size32), - FPUOp3::MAdd64 => ("fmadd", InstSize::Size64), + FPUOp3::MAdd32 => ("fmadd", ScalarSize::Size32), + FPUOp3::MAdd64 => ("fmadd", ScalarSize::Size64), }; let rd = show_freg_sized(rd.to_reg(), mb_rru, size); let rn = show_freg_sized(rn, mb_rru, size); @@ -2558,23 +2563,23 @@ impl ShowWithRRU for Inst { format!("{} {}, {}, {}, {}", op, rd, rn, rm, ra) } &Inst::FpuCmp32 { rn, rm } => { - let rn = show_freg_sized(rn, mb_rru, InstSize::Size32); - let rm = show_freg_sized(rm, mb_rru, InstSize::Size32); + let rn = show_freg_sized(rn, mb_rru, ScalarSize::Size32); + let rm = show_freg_sized(rm, mb_rru, ScalarSize::Size32); format!("fcmp {}, {}", rn, rm) } &Inst::FpuCmp64 { rn, rm } => { - let rn = show_freg_sized(rn, mb_rru, InstSize::Size64); - let rm = show_freg_sized(rm, mb_rru, InstSize::Size64); + let rn = show_freg_sized(rn, mb_rru, ScalarSize::Size64); + let rm = show_freg_sized(rm, mb_rru, ScalarSize::Size64); format!("fcmp {}, {}", rn, rm) } &Inst::FpuLoad32 { rd, ref mem, .. } => { - let rd = show_freg_sized(rd.to_reg(), mb_rru, InstSize::Size32); + let rd = show_freg_sized(rd.to_reg(), mb_rru, ScalarSize::Size32); let (mem_str, mem) = mem_finalize_for_show(mem, mb_rru); let mem = mem.show_rru(mb_rru); format!("{}ldr {}, {}", mem_str, rd, mem) } &Inst::FpuLoad64 { rd, ref mem, .. } => { - let rd = show_freg_sized(rd.to_reg(), mb_rru, InstSize::Size64); + let rd = show_freg_sized(rd.to_reg(), mb_rru, ScalarSize::Size64); let (mem_str, mem) = mem_finalize_for_show(mem, mb_rru); let mem = mem.show_rru(mb_rru); format!("{}ldr {}, {}", mem_str, rd, mem) @@ -2587,13 +2592,13 @@ impl ShowWithRRU for Inst { format!("{}ldr {}, {}", mem_str, rd, mem) } &Inst::FpuStore32 { rd, ref mem, .. } => { - let rd = show_freg_sized(rd, mb_rru, InstSize::Size32); + let rd = show_freg_sized(rd, mb_rru, ScalarSize::Size32); let (mem_str, mem) = mem_finalize_for_show(mem, mb_rru); let mem = mem.show_rru(mb_rru); format!("{}str {}, {}", mem_str, rd, mem) } &Inst::FpuStore64 { rd, ref mem, .. } => { - let rd = show_freg_sized(rd, mb_rru, InstSize::Size64); + let rd = show_freg_sized(rd, mb_rru, ScalarSize::Size64); let (mem_str, mem) = mem_finalize_for_show(mem, mb_rru); let mem = mem.show_rru(mb_rru); format!("{}str {}, {}", mem_str, rd, mem) @@ -2606,27 +2611,27 @@ impl ShowWithRRU for Inst { format!("{}str {}, {}", mem_str, rd, mem) } &Inst::LoadFpuConst32 { rd, const_data } => { - let rd = show_freg_sized(rd.to_reg(), mb_rru, InstSize::Size32); + let rd = show_freg_sized(rd.to_reg(), mb_rru, ScalarSize::Size32); format!("ldr {}, pc+8 ; b 8 ; data.f32 {}", rd, const_data) } &Inst::LoadFpuConst64 { rd, const_data } => { - let rd = show_freg_sized(rd.to_reg(), mb_rru, InstSize::Size64); + let rd = show_freg_sized(rd.to_reg(), mb_rru, ScalarSize::Size64); format!("ldr {}, pc+8 ; b 12 ; data.f64 {}", rd, const_data) } &Inst::LoadFpuConst128 { rd, const_data } => { - let rd = show_freg_sized(rd.to_reg(), mb_rru, InstSize::Size128); + let rd = show_freg_sized(rd.to_reg(), mb_rru, ScalarSize::Size128); format!("ldr {}, pc+8 ; b 20 ; data.f128 0x{:032x}", rd, const_data) } &Inst::FpuToInt { op, rd, rn } => { let (op, sizesrc, sizedest) = match op { - FpuToIntOp::F32ToI32 => ("fcvtzs", InstSize::Size32, InstSize::Size32), - FpuToIntOp::F32ToU32 => ("fcvtzu", InstSize::Size32, InstSize::Size32), - FpuToIntOp::F32ToI64 => ("fcvtzs", InstSize::Size32, InstSize::Size64), - FpuToIntOp::F32ToU64 => ("fcvtzu", InstSize::Size32, InstSize::Size64), - FpuToIntOp::F64ToI32 => ("fcvtzs", InstSize::Size64, InstSize::Size32), - FpuToIntOp::F64ToU32 => ("fcvtzu", InstSize::Size64, InstSize::Size32), - FpuToIntOp::F64ToI64 => ("fcvtzs", InstSize::Size64, InstSize::Size64), - FpuToIntOp::F64ToU64 => ("fcvtzu", InstSize::Size64, InstSize::Size64), + FpuToIntOp::F32ToI32 => ("fcvtzs", ScalarSize::Size32, OperandSize::Size32), + FpuToIntOp::F32ToU32 => ("fcvtzu", ScalarSize::Size32, OperandSize::Size32), + FpuToIntOp::F32ToI64 => ("fcvtzs", ScalarSize::Size32, OperandSize::Size64), + FpuToIntOp::F32ToU64 => ("fcvtzu", ScalarSize::Size32, OperandSize::Size64), + FpuToIntOp::F64ToI32 => ("fcvtzs", ScalarSize::Size64, OperandSize::Size32), + FpuToIntOp::F64ToU32 => ("fcvtzu", ScalarSize::Size64, OperandSize::Size32), + FpuToIntOp::F64ToI64 => ("fcvtzs", ScalarSize::Size64, OperandSize::Size64), + FpuToIntOp::F64ToU64 => ("fcvtzu", ScalarSize::Size64, OperandSize::Size64), }; let rd = show_ireg_sized(rd.to_reg(), mb_rru, sizedest); let rn = show_freg_sized(rn, mb_rru, sizesrc); @@ -2634,43 +2639,43 @@ impl ShowWithRRU for Inst { } &Inst::IntToFpu { op, rd, rn } => { let (op, sizesrc, sizedest) = match op { - IntToFpuOp::I32ToF32 => ("scvtf", InstSize::Size32, InstSize::Size32), - IntToFpuOp::U32ToF32 => ("ucvtf", InstSize::Size32, InstSize::Size32), - IntToFpuOp::I64ToF32 => ("scvtf", InstSize::Size64, InstSize::Size32), - IntToFpuOp::U64ToF32 => ("ucvtf", InstSize::Size64, InstSize::Size32), - IntToFpuOp::I32ToF64 => ("scvtf", InstSize::Size32, InstSize::Size64), - IntToFpuOp::U32ToF64 => ("ucvtf", InstSize::Size32, InstSize::Size64), - IntToFpuOp::I64ToF64 => ("scvtf", InstSize::Size64, InstSize::Size64), - IntToFpuOp::U64ToF64 => ("ucvtf", InstSize::Size64, InstSize::Size64), + IntToFpuOp::I32ToF32 => ("scvtf", OperandSize::Size32, ScalarSize::Size32), + IntToFpuOp::U32ToF32 => ("ucvtf", OperandSize::Size32, ScalarSize::Size32), + IntToFpuOp::I64ToF32 => ("scvtf", OperandSize::Size64, ScalarSize::Size32), + IntToFpuOp::U64ToF32 => ("ucvtf", OperandSize::Size64, ScalarSize::Size32), + IntToFpuOp::I32ToF64 => ("scvtf", OperandSize::Size32, ScalarSize::Size64), + IntToFpuOp::U32ToF64 => ("ucvtf", OperandSize::Size32, ScalarSize::Size64), + IntToFpuOp::I64ToF64 => ("scvtf", OperandSize::Size64, ScalarSize::Size64), + IntToFpuOp::U64ToF64 => ("ucvtf", OperandSize::Size64, ScalarSize::Size64), }; let rd = show_freg_sized(rd.to_reg(), mb_rru, sizedest); let rn = show_ireg_sized(rn, mb_rru, sizesrc); format!("{} {}, {}", op, rd, rn) } &Inst::FpuCSel32 { rd, rn, rm, cond } => { - let rd = show_freg_sized(rd.to_reg(), mb_rru, InstSize::Size32); - let rn = show_freg_sized(rn, mb_rru, InstSize::Size32); - let rm = show_freg_sized(rm, mb_rru, InstSize::Size32); + let rd = show_freg_sized(rd.to_reg(), mb_rru, ScalarSize::Size32); + let rn = show_freg_sized(rn, mb_rru, ScalarSize::Size32); + let rm = show_freg_sized(rm, mb_rru, ScalarSize::Size32); let cond = cond.show_rru(mb_rru); format!("fcsel {}, {}, {}, {}", rd, rn, rm, cond) } &Inst::FpuCSel64 { rd, rn, rm, cond } => { - let rd = show_freg_sized(rd.to_reg(), mb_rru, InstSize::Size64); - let rn = show_freg_sized(rn, mb_rru, InstSize::Size64); - let rm = show_freg_sized(rm, mb_rru, InstSize::Size64); + let rd = show_freg_sized(rd.to_reg(), mb_rru, ScalarSize::Size64); + let rn = show_freg_sized(rn, mb_rru, ScalarSize::Size64); + let rm = show_freg_sized(rm, mb_rru, ScalarSize::Size64); let cond = cond.show_rru(mb_rru); format!("fcsel {}, {}, {}, {}", rd, rn, rm, cond) } &Inst::FpuRound { op, rd, rn } => { let (inst, size) = match op { - FpuRoundMode::Minus32 => ("frintm", InstSize::Size32), - FpuRoundMode::Minus64 => ("frintm", InstSize::Size64), - FpuRoundMode::Plus32 => ("frintp", InstSize::Size32), - FpuRoundMode::Plus64 => ("frintp", InstSize::Size64), - FpuRoundMode::Zero32 => ("frintz", InstSize::Size32), - FpuRoundMode::Zero64 => ("frintz", InstSize::Size64), - FpuRoundMode::Nearest32 => ("frintn", InstSize::Size32), - FpuRoundMode::Nearest64 => ("frintn", InstSize::Size64), + FpuRoundMode::Minus32 => ("frintm", ScalarSize::Size32), + FpuRoundMode::Minus64 => ("frintm", ScalarSize::Size64), + FpuRoundMode::Plus32 => ("frintp", ScalarSize::Size32), + FpuRoundMode::Plus64 => ("frintp", ScalarSize::Size64), + FpuRoundMode::Zero32 => ("frintz", ScalarSize::Size32), + FpuRoundMode::Zero64 => ("frintz", ScalarSize::Size64), + FpuRoundMode::Nearest32 => ("frintn", ScalarSize::Size32), + FpuRoundMode::Nearest64 => ("frintn", ScalarSize::Size64), }; let rd = show_freg_sized(rd.to_reg(), mb_rru, size); let rn = show_freg_sized(rn, mb_rru, size); @@ -2686,7 +2691,7 @@ impl ShowWithRRU for Inst { I32 | I64 => "mov", _ => "umov", }; - let rd = show_ireg_sized(rd.to_reg(), mb_rru, InstSize::from_ty(ty)); + let rd = show_ireg_sized(rd.to_reg(), mb_rru, OperandSize::from_ty(ty)); let rn = show_vreg_element(rn, mb_rru, idx, ty); format!("{} {}, {}", op, rd, rn) } @@ -2699,7 +2704,7 @@ impl ShowWithRRU for Inst { _ => unimplemented!(), }; let rd = show_vreg_vector(rd.to_reg(), mb_rru, vector_type); - let rn = show_ireg_sized(rn, mb_rru, InstSize::from_ty(ty)); + let rn = show_ireg_sized(rn, mb_rru, OperandSize::from_ty(ty)); format!("dup {}, {}", rd, rn) } &Inst::VecDupFromFpu { rd, rn, ty } => { @@ -2813,12 +2818,12 @@ impl ShowWithRRU for Inst { // 32-to-64-bit extension, which is implemented with a "mov" to a // 32-bit (W-reg) dest, because this zeroes the top 32 bits. let dest_size = if !signed && from_bits == 32 && to_bits == 64 { - InstSize::Size32 + OperandSize::Size32 } else { - InstSize::from_bits(to_bits) + OperandSize::from_bits(to_bits) }; let rd = show_ireg_sized(rd.to_reg(), mb_rru, dest_size); - let rn = show_ireg_sized(rn, mb_rru, InstSize::from_bits(from_bits)); + let rn = show_ireg_sized(rn, mb_rru, OperandSize::from_bits(from_bits)); let op = match (signed, from_bits, to_bits) { (false, 8, 32) => "uxtb", (true, 8, 32) => "sxtb", @@ -2841,11 +2846,11 @@ impl ShowWithRRU for Inst { from_bits, to_bits, } if from_bits == 1 && signed => { - let dest_size = InstSize::from_bits(to_bits); + let dest_size = OperandSize::from_bits(to_bits); let zr = if dest_size.is32() { "wzr" } else { "xzr" }; - let rd32 = show_ireg_sized(rd.to_reg(), mb_rru, InstSize::Size32); + let rd32 = show_ireg_sized(rd.to_reg(), mb_rru, OperandSize::Size32); let rd = show_ireg_sized(rd.to_reg(), mb_rru, dest_size); - let rn = show_ireg_sized(rn, mb_rru, InstSize::Size32); + let rn = show_ireg_sized(rn, mb_rru, OperandSize::Size32); format!("and {}, {}, #1 ; sub {}, {}, {}", rd32, rn, rd, zr, rd) } &Inst::Extend { @@ -2855,8 +2860,8 @@ impl ShowWithRRU for Inst { from_bits, .. } if from_bits == 1 && !signed => { - let rd = show_ireg_sized(rd.to_reg(), mb_rru, InstSize::Size32); - let rn = show_ireg_sized(rn, mb_rru, InstSize::Size32); + let rd = show_ireg_sized(rd.to_reg(), mb_rru, OperandSize::Size32); + let rn = show_ireg_sized(rn, mb_rru, OperandSize::Size32); format!("and {}, {}, #1", rd, rn) } &Inst::Extend { .. } => { diff --git a/cranelift/codegen/src/isa/aarch64/inst/regs.rs b/cranelift/codegen/src/isa/aarch64/inst/regs.rs index b92b0b70c9..ba8d2e212a 100644 --- a/cranelift/codegen/src/isa/aarch64/inst/regs.rs +++ b/cranelift/codegen/src/isa/aarch64/inst/regs.rs @@ -1,7 +1,8 @@ //! AArch64 ISA definitions: registers. use crate::ir::types::*; -use crate::isa::aarch64::inst::InstSize; +use crate::isa::aarch64::inst::OperandSize; +use crate::isa::aarch64::inst::ScalarSize; use crate::machinst::*; use crate::settings; @@ -255,7 +256,7 @@ pub fn create_reg_universe(flags: &settings::Flags) -> RealRegUniverse { /// If `ireg` denotes an I64-classed reg, make a best-effort attempt to show /// its name at the 32-bit size. -pub fn show_ireg_sized(reg: Reg, mb_rru: Option<&RealRegUniverse>, size: InstSize) -> String { +pub fn show_ireg_sized(reg: Reg, mb_rru: Option<&RealRegUniverse>, size: OperandSize) -> String { let mut s = reg.show_rru(mb_rru); if reg.get_class() != RegClass::I64 || !size.is32() { // We can't do any better. @@ -277,15 +278,17 @@ pub fn show_ireg_sized(reg: Reg, mb_rru: Option<&RealRegUniverse>, size: InstSiz } /// Show a vector register. -pub fn show_freg_sized(reg: Reg, mb_rru: Option<&RealRegUniverse>, size: InstSize) -> String { +pub fn show_freg_sized(reg: Reg, mb_rru: Option<&RealRegUniverse>, size: ScalarSize) -> String { let mut s = reg.show_rru(mb_rru); if reg.get_class() != RegClass::V128 { return s; } let prefix = match size { - InstSize::Size32 => "s", - InstSize::Size64 => "d", - InstSize::Size128 => "q", + ScalarSize::Size8 => "b", + ScalarSize::Size16 => "h", + ScalarSize::Size32 => "s", + ScalarSize::Size64 => "d", + ScalarSize::Size128 => "q", }; s.replace_range(0..1, prefix); s diff --git a/cranelift/codegen/src/isa/aarch64/lower_inst.rs b/cranelift/codegen/src/isa/aarch64/lower_inst.rs index 664f2729a3..a24a6eec3d 100644 --- a/cranelift/codegen/src/isa/aarch64/lower_inst.rs +++ b/cranelift/codegen/src/isa/aarch64/lower_inst.rs @@ -358,7 +358,7 @@ pub(crate) fn lower_insn_to_regs>( // The following checks must be done in 32-bit or 64-bit, depending // on the input type. Even though the initial div instruction is // always done in 64-bit currently. - let size = InstSize::from_ty(ty); + let size = OperandSize::from_ty(ty); // Check RHS is -1. ctx.emit(Inst::AluRRImm12 { alu_op: choose_32_64(ty, ALUOp::AddS32, ALUOp::AddS64), @@ -483,15 +483,15 @@ pub(crate) fn lower_insn_to_regs>( Opcode::Ishl | Opcode::Ushr | Opcode::Sshr => { let ty = ty.unwrap(); - let size = InstSize::from_bits(ty_bits(ty)); let rd = get_output_reg(ctx, outputs[0]); if ty_bits(ty) < 128 { + let size = OperandSize::from_bits(ty_bits(ty)); let narrow_mode = match (op, size) { (Opcode::Ishl, _) => NarrowValueMode::None, - (Opcode::Ushr, InstSize::Size64) => NarrowValueMode::ZeroExtend64, - (Opcode::Ushr, InstSize::Size32) => NarrowValueMode::ZeroExtend32, - (Opcode::Sshr, InstSize::Size64) => NarrowValueMode::SignExtend64, - (Opcode::Sshr, InstSize::Size32) => NarrowValueMode::SignExtend32, + (Opcode::Ushr, OperandSize::Size64) => NarrowValueMode::ZeroExtend64, + (Opcode::Ushr, OperandSize::Size32) => NarrowValueMode::ZeroExtend32, + (Opcode::Sshr, OperandSize::Size64) => NarrowValueMode::SignExtend64, + (Opcode::Sshr, OperandSize::Size32) => NarrowValueMode::SignExtend32, _ => unreachable!(), }; let rn = put_input_in_reg(ctx, inputs[0], narrow_mode); @@ -1540,7 +1540,8 @@ pub(crate) fn lower_insn_to_regs>( } else if idx == 0 { ctx.emit(Inst::gen_move(rd, rn, ty)); } else { - ctx.emit(Inst::FpuMoveFromVec { rd, rn, idx, ty }); + let size = ScalarSize::from_ty(ty); + ctx.emit(Inst::FpuMoveFromVec { rd, rn, idx, size }); } } else { unreachable!();