diff --git a/lib/Target/ARM64/ARM64CallingConvention.td b/lib/Target/ARM64/ARM64CallingConvention.td index 9ac888ff242..f86c9c714ac 100644 --- a/lib/Target/ARM64/ARM64CallingConvention.td +++ b/lib/Target/ARM64/ARM64CallingConvention.td @@ -36,7 +36,7 @@ def CC_ARM64_AAPCS : CallingConv<[ [X0, X1, X3, X5]>>>, // i128 is split to two i64s, and its stack alignment is 16 bytes. - CCIfType<[i64], CCIfSplit>>, + CCIfType<[i64], CCIfSplit>>, CCIfType<[i64], CCAssignToRegWithShadow<[X0, X1, X2, X3, X4, X5, X6, X7], [W0, W1, W2, W3, W4, W5, W6, W7]>>, diff --git a/test/CodeGen/ARM64/aapcs.ll b/test/CodeGen/ARM64/aapcs.ll index 27d2aa7b773..fc1266ccfed 100644 --- a/test/CodeGen/ARM64/aapcs.ll +++ b/test/CodeGen/ARM64/aapcs.ll @@ -84,3 +84,13 @@ define void @test_variadic() { ; CHECK: bl variadic ret void } + +; We weren't marking x7 as used after deciding that the i128 didn't fit into +; registers and putting the first half on the stack, so the *second* half went +; into x7. Yuck! +define i128 @test_i128_shadow([7 x i64] %x0_x6, i128 %sp) { +; CHECK-LABEL: test_i128_shadow: +; CHECK: ldp x0, x1, [sp] + + ret i128 %sp +}