mirror of
https://github.com/c64scene-ar/llvm-6502.git
synced 2024-12-16 11:30:51 +00:00
5c9bb7119a
Moving these patterns from TableGen files to PerformDAGCombine() should allow us to generate better code by eliminating unnecessary shifts and extensions earlier. This also fixes a bug where the MAD pattern was calling SimplifyDemandedBits with a 24-bit mask on the first operand even when the full pattern wasn't being matched. This occasionally resulted in some instructions being incorrectly deleted from the program. v2: - Fix bug with 64-bit mul git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@205731 91177308-0d34-0410-b5e6-96231b3b80d8
24 lines
788 B
LLVM
24 lines
788 B
LLVM
; RUN: llc < %s -march=r600 -mcpu=redwood | FileCheck %s --check-prefix=EG --check-prefix=FUNC
|
|
; RUN: llc < %s -march=r600 -mcpu=cayman | FileCheck %s --check-prefix=CM --check-prefix=FUNC
|
|
; RUN: llc < %s -march=r600 -mcpu=SI -verify-machineinstrs | FileCheck %s --check-prefix=SI --check-prefix=FUNC
|
|
|
|
; FUNC-LABEL: @i32_mad24
|
|
; Signed 24-bit multiply is not supported on pre-Cayman GPUs.
|
|
; EG: MULLO_INT
|
|
; Make sure we aren't masking the inputs.
|
|
; CM-NOT: AND
|
|
; CM: MULADD_INT24
|
|
; SI-NOT: AND
|
|
; SI: V_MAD_I32_I24
|
|
define void @i32_mad24(i32 addrspace(1)* %out, i32 %a, i32 %b, i32 %c) {
|
|
entry:
|
|
%0 = shl i32 %a, 8
|
|
%a_24 = ashr i32 %0, 8
|
|
%1 = shl i32 %b, 8
|
|
%b_24 = ashr i32 %1, 8
|
|
%2 = mul i32 %a_24, %b_24
|
|
%3 = add i32 %2, %c
|
|
store i32 %3, i32 addrspace(1)* %out
|
|
ret void
|
|
}
|