mirror of
https://github.com/c64scene-ar/llvm-6502.git
synced 2024-12-27 13:30:05 +00:00
5c9bb7119a
Moving these patterns from TableGen files to PerformDAGCombine() should allow us to generate better code by eliminating unnecessary shifts and extensions earlier. This also fixes a bug where the MAD pattern was calling SimplifyDemandedBits with a 24-bit mask on the first operand even when the full pattern wasn't being matched. This occasionally resulted in some instructions being incorrectly deleted from the program. v2: - Fix bug with 64-bit mul git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@205731 91177308-0d34-0410-b5e6-96231b3b80d8
23 lines
755 B
LLVM
23 lines
755 B
LLVM
; RUN: llc < %s -march=r600 -mcpu=redwood | FileCheck %s --check-prefix=EG --check-prefix=FUNC
|
|
; RUN: llc < %s -march=r600 -mcpu=cayman | FileCheck %s --check-prefix=CM --check-prefix=FUNC
|
|
; RUN: llc < %s -march=r600 -mcpu=SI -verify-machineinstrs | FileCheck %s --check-prefix=SI --check-prefix=FUNC
|
|
|
|
; FUNC-LABEL: @i32_mul24
|
|
; Signed 24-bit multiply is not supported on pre-Cayman GPUs.
|
|
; EG: MULLO_INT
|
|
; Make sure we are not masking the inputs
|
|
; CM-NOT: AND
|
|
; CM: MUL_INT24
|
|
; SI-NOT: AND
|
|
; SI: V_MUL_I32_I24
|
|
define void @i32_mul24(i32 addrspace(1)* %out, i32 %a, i32 %b) {
|
|
entry:
|
|
%0 = shl i32 %a, 8
|
|
%a_24 = ashr i32 %0, 8
|
|
%1 = shl i32 %b, 8
|
|
%b_24 = ashr i32 %1, 8
|
|
%2 = mul i32 %a_24, %b_24
|
|
store i32 %2, i32 addrspace(1)* %out
|
|
ret void
|
|
}
|