From fc430a662ff3a3b9075558d32ebe00ffe1a23f35 Mon Sep 17 00:00:00 2001 From: Eli Friedman Date: Tue, 9 Aug 2011 22:17:39 +0000 Subject: [PATCH] Fix a couple ridiculous copy-paste errors. rdar://9914773 . git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@137160 91177308-0d34-0410-b5e6-96231b3b80d8 --- lib/Target/X86/X86InstrCompiler.td | 4 +- test/CodeGen/X86/lock-inst-encoding.ll | 55 ++++++++++++++++++++------ 2 files changed, 46 insertions(+), 13 deletions(-) diff --git a/lib/Target/X86/X86InstrCompiler.td b/lib/Target/X86/X86InstrCompiler.td index 17464f380bc..745adc6e54d 100644 --- a/lib/Target/X86/X86InstrCompiler.td +++ b/lib/Target/X86/X86InstrCompiler.td @@ -630,8 +630,8 @@ def #NAME#64mi8 : RIi8<{ImmOpc8{7}, ImmOpc8{6}, ImmOpc8{5}, ImmOpc8{4}, defm LOCK_ADD : LOCK_ArithBinOp<0x00, 0x80, 0x83, MRM0m, "add">; defm LOCK_SUB : LOCK_ArithBinOp<0x28, 0x80, 0x83, MRM5m, "sub">; defm LOCK_OR : LOCK_ArithBinOp<0x08, 0x80, 0x83, MRM1m, "or">; -defm LOCK_AND : LOCK_ArithBinOp<0x08, 0x80, 0x83, MRM4m, "and">; -defm LOCK_XOR : LOCK_ArithBinOp<0x08, 0x80, 0x83, MRM6m, "xor">; +defm LOCK_AND : LOCK_ArithBinOp<0x20, 0x80, 0x83, MRM4m, "and">; +defm LOCK_XOR : LOCK_ArithBinOp<0x30, 0x80, 0x83, MRM6m, "xor">; // Optimized codegen when the non-memory output is not used. let Defs = [EFLAGS], mayLoad = 1, mayStore = 1, isCodeGenOnly = 1 in { diff --git a/test/CodeGen/X86/lock-inst-encoding.ll b/test/CodeGen/X86/lock-inst-encoding.ll index 2d10fbc611d..6e93cf998b8 100644 --- a/test/CodeGen/X86/lock-inst-encoding.ll +++ b/test/CodeGen/X86/lock-inst-encoding.ll @@ -3,19 +3,52 @@ target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64" target triple = "x86_64-apple-darwin10.0.0" -; CHECK: f0: -; CHECK: addq %{{.*}}, ({{.*}}){{.*}}encoding: [0xf0, +; CHECK: f1: +; CHECK: addq %{{.*}}, ({{.*}}){{.*}}encoding: [0xf0,0x48,0x01,0x37] ; CHECK: ret -define void @f0(i64* %a0) nounwind { - %t0 = and i64 1, 1 - call void @llvm.memory.barrier(i1 true, i1 true, i1 true, i1 true, i1 true) nounwind - %1 = call i64 @llvm.atomic.load.add.i64.p0i64(i64* %a0, i64 %t0) nounwind - call void @llvm.memory.barrier(i1 true, i1 true, i1 true, i1 true, i1 true) nounwind +define void @f1(i64* %a, i64 %b) nounwind { + call i64 @llvm.atomic.load.add.i64.p0i64(i64* %a, i64 %b) nounwind ret void } -declare void @llvm.memory.barrier(i1, i1, i1, i1, i1) nounwind - -declare i32 @llvm.atomic.load.and.i32.p0i32(i32* nocapture, i32) nounwind - declare i64 @llvm.atomic.load.add.i64.p0i64(i64* nocapture, i64) nounwind + +; CHECK: f2: +; CHECK: subq %{{.*}}, ({{.*}}){{.*}}encoding: [0xf0,0x48,0x29,0x37] +; CHECK: ret +define void @f2(i64* %a, i64 %b) nounwind { + call i64 @llvm.atomic.load.sub.i64.p0i64(i64* %a, i64 %b) nounwind + ret void +} + +declare i64 @llvm.atomic.load.sub.i64.p0i64(i64* nocapture, i64) nounwind + +; CHECK: f3: +; CHECK: andq %{{.*}}, ({{.*}}){{.*}}encoding: [0xf0,0x48,0x21,0x37] +; CHECK: ret +define void @f3(i64* %a, i64 %b) nounwind { + call i64 @llvm.atomic.load.and.i64.p0i64(i64* %a, i64 %b) nounwind + ret void +} + +declare i64 @llvm.atomic.load.and.i64.p0i64(i64* nocapture, i64) nounwind + +; CHECK: f4: +; CHECK: orq %{{.*}}, ({{.*}}){{.*}}encoding: [0xf0,0x48,0x09,0x37] +; CHECK: ret +define void @f4(i64* %a, i64 %b) nounwind { + call i64 @llvm.atomic.load.or.i64.p0i64(i64* %a, i64 %b) nounwind + ret void +} + +declare i64 @llvm.atomic.load.or.i64.p0i64(i64* nocapture, i64) nounwind + +; CHECK: f5: +; CHECK: xorq %{{.*}}, ({{.*}}){{.*}}encoding: [0xf0,0x48,0x31,0x37] +; CHECK: ret +define void @f5(i64* %a, i64 %b) nounwind { + call i64 @llvm.atomic.load.xor.i64.p0i64(i64* %a, i64 %b) nounwind + ret void +} + +declare i64 @llvm.atomic.load.xor.i64.p0i64(i64* nocapture, i64) nounwind