Browse Source

Merge pull request from GHSA-ff4p-7xrq-q5r8

* x64: Remove incorrect `amode_add` lowering rules

This commit removes two incorrect rules as part of the x64 backend's
computation of addressing modes. These two rules folded a zero-extended
32-bit computation into the address mode operand, but this isn't correct
as the 32-bit computation should be truncated to 32-bits but when folded
into the address mode computation it happens with 64-bit operands,
meaning truncation doesn't happen.

* Add release notes for 5.0.1
release-5.0.0
Alex Crichton 2 years ago
committed by GitHub
parent
commit
bc6b2de30a
No known key found for this signature in database GPG Key ID: 4AEE18F83AFDEB23
  1. 14
      RELEASES.md
  2. 14
      cranelift/codegen/src/isa/x64/inst.isle
  3. 9
      cranelift/filetests/filetests/isa/x64/amode-opt.clif

14
RELEASES.md

@ -1,5 +1,19 @@
--------------------------------------------------------------------------------
## 5.0.1
Released 2023-03-08.
### Fixed
* Guest-controlled out-of-bounds read/write on x86\_64
[GHSA-ff4p-7xrq-q5r8](https://github.com/bytecodealliance/wasmtime/security/advisories/GHSA-ff4p-7xrq-q5r8)
* Miscompilation of `i8x16.select` with the same inputs on x86\_64
[GHSA-xm67-587q-r2vw](https://github.com/bytecodealliance/wasmtime/security/advisories/GHSA-xm67-587q-r2vw)
--------------------------------------------------------------------------------
## 5.0.0
Released 2023-01-20.

14
cranelift/codegen/src/isa/x64/inst.isle

@ -975,20 +975,6 @@
(rule 2 (amode_add (Amode.ImmReg off (valid_reg base) flags) (ishl index (iconst (uimm8 shift))))
(if (u32_lteq (u8_as_u32 shift) 3))
(Amode.ImmRegRegShift off base index shift flags))
(rule 2 (amode_add (Amode.ImmReg off (valid_reg base) flags) (uextend (ishl index (iconst (uimm8 shift)))))
(if (u32_lteq (u8_as_u32 shift) 3))
(Amode.ImmRegRegShift off base (extend_to_gpr index $I64 (ExtendKind.Zero)) shift flags))
;; Same, but with a uextend of a shift of a 32-bit add. This is valid
;; because we know our lowering of a narrower-than-64-bit `iadd` will
;; always write the full register width, so we can effectively ignore
;; the `uextend` and look through it to the `ishl`.
;;
;; Priority 3 to avoid conflict with the previous rule.
(rule 3 (amode_add (Amode.ImmReg off (valid_reg base) flags)
(uextend (ishl index @ (iadd _ _) (iconst (uimm8 shift)))))
(if (u32_lteq (u8_as_u32 shift) 3))
(Amode.ImmRegRegShift off base index shift flags))
;; -- Case 4 (absorbing constant offsets).
;;

9
cranelift/filetests/filetests/isa/x64/amode-opt.clif

@ -132,8 +132,9 @@ block0(v0: i64, v1: i32):
; pushq %rbp
; movq %rsp, %rbp
; block0:
; movl %esi, %ecx
; movq -1(%rdi,%rcx,8), %rax
; movq %rsi, %rdx
; shll $3, %edx, %edx
; movq -1(%rdi,%rdx,1), %rax
; movq %rbp, %rsp
; popq %rbp
; ret
@ -155,8 +156,8 @@ block0(v0: i64, v1: i32, v2: i32):
; block0:
; movq %rsi, %r8
; addl %r8d, %edx, %r8d
; movq -1(%rdi,%r8,4), %rax
; shll $2, %r8d, %r8d
; movq -1(%rdi,%r8,1), %rax
; movq %rbp, %rsp
; popq %rbp
; ret

Loading…
Cancel
Save