mirror of
https://github.com/c64scene-ar/llvm-6502.git
synced 2024-12-14 11:32:34 +00:00
13141f04d3
PowerPC uses itineraries to describe processor pipelines (and dispatch-group restrictions for P7/P8 cores). Unfortunately, the target-independent implementation of TII.getInstrLatency calls ItinData->getStageLatency, and that looks for the largest cycle count in the pipeline for any given instruction. This, however, yields the wrong answer for the PPC itineraries, because we don't encode the full pipeline. Because the functional units are fully pipelined, we only model the initial stages (there are no relevant hazards in the later stages to model), and so the technique employed by getStageLatency does not really work. Instead, we should take the maximum output operand latency, and that's what PPCInstrInfo::getInstrLatency now does. This caused some test-case churn, including two unfortunate side effects. First, the new arrangement of copies we get from function parameters now sometimes blocks VSX FMA mutation (a FIXME has been added to the code and the test cases), and we have one significant test-suite regression: SingleSource/Benchmarks/BenchmarkGame/spectral-norm 56.4185% +/- 18.9398% In this benchmark we have a loop with a vectorized FP divide, and it with the new scheduling both divides end up in the same dispatch group (which in this case seems to cause a problem, although why is not exactly clear). The grouping structure is hard to predict from the bottom of the loop, and there may not be much we can do to fix this. Very few other test-suite performance effects were really significant, but almost all weakly favor this change. However, in light of the issues highlighted above, I've left the old behavior available via a command-line flag. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@242188 91177308-0d34-0410-b5e6-96231b3b80d8
37 lines
1.3 KiB
LLVM
37 lines
1.3 KiB
LLVM
; RUN: llc -march=ppc64 -mcpu=pwr7 -O2 -relocation-model=pic < %s | FileCheck %s
|
|
|
|
target datalayout = "e-m:e-i64:64-n32:64"
|
|
target triple = "powerpc64le-unknown-linux-gnu"
|
|
|
|
; Test back-to-back stores of TLS variables to ensure call sequences no
|
|
; longer overlap.
|
|
|
|
@__once_callable = external thread_local global i8**
|
|
@__once_call = external thread_local global void ()*
|
|
|
|
define i64 @call_once(i64 %flag, i8* %ptr) {
|
|
entry:
|
|
%var = alloca i8*, align 8
|
|
store i8* %ptr, i8** %var, align 8
|
|
store i8** %var, i8*** @__once_callable, align 8
|
|
store void ()* @__once_call_impl, void ()** @__once_call, align 8
|
|
ret i64 %flag
|
|
}
|
|
|
|
; CHECK-LABEL: call_once:
|
|
; CHECK: addi 3, {{[0-9]+}}, __once_callable@got@tlsgd@l
|
|
; CHECK: bl __tls_get_addr(__once_callable@tlsgd)
|
|
; CHECK-NEXT: nop
|
|
; FIXME: We could check here for 'std {{[0-9]+}}, 0(3)', but that no longer
|
|
; works because, with new scheduling freedom, we create a copy of R3 based on the
|
|
; initial scheduling, but don't coalesce it again after we move the instructions
|
|
; so that the copy is no longer necessary.
|
|
; CHECK: addi 3, {{[0-9]+}}, __once_call@got@tlsgd@l
|
|
; CHECK: bl __tls_get_addr(__once_call@tlsgd)
|
|
; CHECK-NEXT: nop
|
|
; FIXME: We don't really need the copy here either, we could move the store up.
|
|
; CHECK: mr [[REG1:[0-9]+]], 3
|
|
; CHECK: std {{[0-9]+}}, 0([[REG1]])
|
|
|
|
declare void @__once_call_impl()
|