diff --git a/docs/LangRef.rst b/docs/LangRef.rst index 8fd8b3f02b1..1a854695501 100644 --- a/docs/LangRef.rst +++ b/docs/LangRef.rst @@ -837,8 +837,16 @@ example: If a function that has an ``sspreq`` attribute is inlined into a function that doesn't have an ``sspreq`` attribute or which has an - ``ssp`` attribute, then the resulting function will have an - ``sspreq`` attribute. + ``ssp`` or ``sspstrong`` attribute, then the resulting function will have + an ``sspreq`` attribute. +``sspstrong`` + This attribute indicates that the function should emit a stack smashing + protector. Currently this attribute has the same effect as + ``sspreq``. This overrides the ``ssp`` function attribute. + + If a function that has an ``sspstrong`` attribute is inlined into a + function that doesn't have an ``sspstrong`` attribute, then the + resulting function will have an ``sspstrong`` attribute. ``uwtable`` This attribute indicates that the ABI being targeted requires that an unwind table entry be produce for this function even if we can diff --git a/include/llvm-c/Core.h b/include/llvm-c/Core.h index f2fe7645774..5b7422ff666 100644 --- a/include/llvm-c/Core.h +++ b/include/llvm-c/Core.h @@ -173,10 +173,11 @@ typedef enum { LLVMUWTable = 1 << 30, LLVMNonLazyBind = 1 << 31 - /* FIXME: This attribute is currently not included in the C API as + /* FIXME: These attributes are currently not included in the C API as a temporary measure until the API/ABI impact to the C API is understood and the path forward agreed upon. - LLVMAddressSafety = 1ULL << 32 + LLVMAddressSafety = 1ULL << 32, + LLVMStackProtectStrongAttribute = 1ULL<<33 */ } LLVMAttribute; diff --git a/include/llvm/IR/Attributes.h b/include/llvm/IR/Attributes.h index de214d17c6d..813718fdf77 100644 --- a/include/llvm/IR/Attributes.h +++ b/include/llvm/IR/Attributes.h @@ -90,6 +90,7 @@ public: ///< alignstack=(1)) StackProtect, ///< Stack protection. StackProtectReq, ///< Stack protection required. + StackProtectStrong, ///< Strong Stack protection. StructRet, ///< Hidden pointer to structure to return UWTable, ///< Function must be in a unwind table ZExt, ///< Zero extended before/after call @@ -463,6 +464,7 @@ public: .removeAttribute(Attribute::OptimizeForSize) .removeAttribute(Attribute::StackProtect) .removeAttribute(Attribute::StackProtectReq) + .removeAttribute(Attribute::StackProtectStrong) .removeAttribute(Attribute::NoRedZone) .removeAttribute(Attribute::NoImplicitFloat) .removeAttribute(Attribute::Naked) diff --git a/lib/AsmParser/LLLexer.cpp b/lib/AsmParser/LLLexer.cpp index 2efa1f01caf..85e7574c97e 100644 --- a/lib/AsmParser/LLLexer.cpp +++ b/lib/AsmParser/LLLexer.cpp @@ -549,6 +549,7 @@ lltok::Kind LLLexer::LexIdentifier() { KEYWORD(optsize); KEYWORD(ssp); KEYWORD(sspreq); + KEYWORD(sspstrong); KEYWORD(noredzone); KEYWORD(noimplicitfloat); KEYWORD(naked); diff --git a/lib/AsmParser/LLParser.cpp b/lib/AsmParser/LLParser.cpp index fea5ec84f9d..0eb6023272c 100644 --- a/lib/AsmParser/LLParser.cpp +++ b/lib/AsmParser/LLParser.cpp @@ -956,6 +956,7 @@ bool LLParser::ParseOptionalFuncAttrs(AttrBuilder &B) { case lltok::kw_returns_twice: B.addAttribute(Attribute::ReturnsTwice); break; case lltok::kw_ssp: B.addAttribute(Attribute::StackProtect); break; case lltok::kw_sspreq: B.addAttribute(Attribute::StackProtectReq); break; + case lltok::kw_sspstrong: B.addAttribute(Attribute::StackProtectStrong); break; case lltok::kw_uwtable: B.addAttribute(Attribute::UWTable); break; case lltok::kw_noduplicate: B.addAttribute(Attribute::NoDuplicate); break; @@ -1050,11 +1051,11 @@ bool LLParser::ParseOptionalReturnAttrs(AttrBuilder &B) { case lltok::kw_readonly: case lltok::kw_inlinehint: case lltok::kw_alwaysinline: case lltok::kw_optsize: case lltok::kw_ssp: case lltok::kw_sspreq: - case lltok::kw_noredzone: case lltok::kw_noimplicitfloat: - case lltok::kw_naked: case lltok::kw_nonlazybind: - case lltok::kw_address_safety: case lltok::kw_minsize: - case lltok::kw_alignstack: case lltok::kw_align: - case lltok::kw_noduplicate: + case lltok::kw_sspstrong: case lltok::kw_noimplicitfloat: + case lltok::kw_noredzone: case lltok::kw_naked: + case lltok::kw_nonlazybind: case lltok::kw_address_safety: + case lltok::kw_minsize: case lltok::kw_alignstack: + case lltok::kw_align: case lltok::kw_noduplicate: HaveError |= Error(Lex.getLoc(), "invalid use of function-only attribute"); break; } diff --git a/lib/AsmParser/LLToken.h b/lib/AsmParser/LLToken.h index 5b4d415d8ed..fea5f75c48f 100644 --- a/lib/AsmParser/LLToken.h +++ b/lib/AsmParser/LLToken.h @@ -110,6 +110,7 @@ namespace lltok { kw_optsize, kw_ssp, kw_sspreq, + kw_sspstrong, kw_noredzone, kw_noimplicitfloat, kw_naked, diff --git a/lib/CodeGen/StackProtector.cpp b/lib/CodeGen/StackProtector.cpp index e2428044171..049efc1c861 100644 --- a/lib/CodeGen/StackProtector.cpp +++ b/lib/CodeGen/StackProtector.cpp @@ -141,6 +141,12 @@ bool StackProtector::RequiresStackProtector() const { Attribute::StackProtectReq)) return true; + // FIXME: Dummy SSP-strong implementation. Default to required until + // strong heuristic is implemented. + if (F->getAttributes().hasAttribute(AttributeSet::FunctionIndex, + Attribute::StackProtectStrong)) + return true; + if (!F->getAttributes().hasAttribute(AttributeSet::FunctionIndex, Attribute::StackProtect)) return false; diff --git a/lib/IR/Attributes.cpp b/lib/IR/Attributes.cpp index 4bd2391dfdb..964a404bffc 100644 --- a/lib/IR/Attributes.cpp +++ b/lib/IR/Attributes.cpp @@ -206,6 +206,8 @@ std::string Attribute::getAsString() const { Result += "ssp "; if (hasAttribute(Attribute::StackProtectReq)) Result += "sspreq "; + if (hasAttribute(Attribute::StackProtectStrong)) + Result += "sspstrong "; if (hasAttribute(Attribute::NoRedZone)) Result += "noredzone "; if (hasAttribute(Attribute::NoImplicitFloat)) @@ -487,6 +489,7 @@ uint64_t AttributeImpl::getAttrMask(Attribute::AttrKind Val) { case Attribute::AddressSafety: return 1ULL << 32; case Attribute::MinSize: return 1ULL << 33; case Attribute::NoDuplicate: return 1ULL << 34; + case Attribute::StackProtectStrong: return 1ULL << 35; } llvm_unreachable("Unsupported attribute type"); } diff --git a/lib/Target/CppBackend/CPPBackend.cpp b/lib/Target/CppBackend/CPPBackend.cpp index f4688614342..50bfef5127a 100644 --- a/lib/Target/CppBackend/CPPBackend.cpp +++ b/lib/Target/CppBackend/CPPBackend.cpp @@ -499,6 +499,7 @@ void CppWriter::printAttributes(const AttributeSet &PAL, HANDLE_ATTR(OptimizeForSize); HANDLE_ATTR(StackProtect); HANDLE_ATTR(StackProtectReq); + HANDLE_ATTR(StackProtectStrong); HANDLE_ATTR(NoCapture); HANDLE_ATTR(NoRedZone); HANDLE_ATTR(NoImplicitFloat); diff --git a/lib/Transforms/IPO/Inliner.cpp b/lib/Transforms/IPO/Inliner.cpp index 2187a2a8eee..663ddb75f42 100644 --- a/lib/Transforms/IPO/Inliner.cpp +++ b/lib/Transforms/IPO/Inliner.cpp @@ -72,6 +72,40 @@ void Inliner::getAnalysisUsage(AnalysisUsage &AU) const { typedef DenseMap > InlinedArrayAllocasTy; +/// \brief If the inlined function had a higher stack protection level than the +/// calling function, then bump up the caller's stack protection level. +static void AdjustCallerSSPLevel(Function *Caller, Function *Callee) { + // If upgrading the SSP attribute, clear out the old SSP Attributes first. + // Having multiple SSP attributes doesn't actually hurt, but it adds useless + // clutter to the IR. + AttrBuilder B; + B.addAttribute(Attribute::StackProtect) + .addAttribute(Attribute::StackProtectStrong); + AttributeSet OldSSPAttr = AttributeSet::get(Caller->getContext(), + AttributeSet::FunctionIndex, + B); + AttributeSet CallerAttr = Caller->getAttributes(), + CalleeAttr = Callee->getAttributes(); + + if (CalleeAttr.hasAttribute(AttributeSet::FunctionIndex, + Attribute::StackProtectReq)) { + Caller->removeAttributes(AttributeSet::FunctionIndex, OldSSPAttr); + Caller->addFnAttr(Attribute::StackProtectReq); + } else if (CalleeAttr.hasAttribute(AttributeSet::FunctionIndex, + Attribute::StackProtectStrong) && + !CallerAttr.hasAttribute(AttributeSet::FunctionIndex, + Attribute::StackProtectReq)) { + Caller->removeAttributes(AttributeSet::FunctionIndex, OldSSPAttr); + Caller->addFnAttr(Attribute::StackProtectStrong); + } else if (CalleeAttr.hasAttribute(AttributeSet::FunctionIndex, + Attribute::StackProtect) && + !CallerAttr.hasAttribute(AttributeSet::FunctionIndex, + Attribute::StackProtectReq) && + !CallerAttr.hasAttribute(AttributeSet::FunctionIndex, + Attribute::StackProtectStrong)) + Caller->addFnAttr(Attribute::StackProtect); +} + /// InlineCallIfPossible - If it is possible to inline the specified call site, /// do so and update the CallGraph for this operation. /// @@ -91,16 +125,7 @@ static bool InlineCallIfPossible(CallSite CS, InlineFunctionInfo &IFI, if (!InlineFunction(CS, IFI, InsertLifetime)) return false; - // If the inlined function had a higher stack protection level than the - // calling function, then bump up the caller's stack protection level. - if (Callee->getAttributes().hasAttribute(AttributeSet::FunctionIndex, - Attribute::StackProtectReq)) - Caller->addFnAttr(Attribute::StackProtectReq); - else if (Callee->getAttributes().hasAttribute(AttributeSet::FunctionIndex, - Attribute::StackProtect) && - !Caller->getAttributes().hasAttribute(AttributeSet::FunctionIndex, - Attribute::StackProtectReq)) - Caller->addFnAttr(Attribute::StackProtect); + AdjustCallerSSPLevel(Caller, Callee); // Look at all of the allocas that we inlined through this call site. If we // have already inlined other allocas through other calls into this function, diff --git a/test/CodeGen/X86/stack-protector.ll b/test/CodeGen/X86/stack-protector.ll index c07511443bc..afe5ab81bc5 100644 --- a/test/CodeGen/X86/stack-protector.ll +++ b/test/CodeGen/X86/stack-protector.ll @@ -1,28 +1,635 @@ -; RUN: llc -mtriple=i386-pc-linux-gnu < %s -o - | grep %gs: -; RUN: llc -mtriple=x86_64-pc-linux-gnu < %s -o - | grep %fs: -; RUN: llc -code-model=kernel -mtriple=x86_64-pc-linux-gnu < %s -o - | grep %gs: -; RUN: llc -mtriple=x86_64-apple-darwin < %s -o - | grep "__stack_chk_guard" -; RUN: llc -mtriple=x86_64-apple-darwin < %s -o - | grep "__stack_chk_fail" +; RUN: llc -mtriple=i386-pc-linux-gnu < %s -o - | FileCheck --check-prefix=LINUX-I386 %s +; RUN: llc -mtriple=x86_64-pc-linux-gnu < %s -o - | FileCheck --check-prefix=LINUX-X64 %s +; RUN: llc -code-model=kernel -mtriple=x86_64-pc-linux-gnu < %s -o - | FileCheck --check-prefix=LINUX-KERNEL-X64 %s +; RUN: llc -mtriple=x86_64-apple-darwin < %s -o - | FileCheck --check-prefix=DARWIN-X64 %s +; FIXME: Update and expand test when strong heuristic is implemented. -@"\01LC" = internal constant [11 x i8] c"buf == %s\0A\00" ; <[11 x i8]*> [#uses=1] +%struct.foo = type { [16 x i8] } +%struct.foo.0 = type { [4 x i8] } -define void @test(i8* %a) nounwind ssp { +@.str = private unnamed_addr constant [4 x i8] c"%s\0A\00", align 1 + +; test1a: array of [16 x i8] +; no ssp attribute +; Requires no protector. +define void @test1a(i8* %a) nounwind uwtable { entry: - %a_addr = alloca i8* ; [#uses=2] - %buf = alloca [8 x i8] ; <[8 x i8]*> [#uses=2] - %"alloca point" = bitcast i32 0 to i32 ; [#uses=0] - store i8* %a, i8** %a_addr - %buf1 = bitcast [8 x i8]* %buf to i8* ; [#uses=1] - %0 = load i8** %a_addr, align 4 ; [#uses=1] - %1 = call i8* @strcpy(i8* %buf1, i8* %0) nounwind ; [#uses=0] - %buf2 = bitcast [8 x i8]* %buf to i8* ; [#uses=1] - %2 = call i32 (i8*, ...)* @printf(i8* getelementptr ([11 x i8]* @"\01LC", i32 0, i32 0), i8* %buf2) nounwind ; [#uses=0] - br label %return +; LINUX-I386: test1a: +; LINUX-I386-NOT: calll __stack_chk_fail +; LINUX-I386: .cfi_endproc -return: ; preds = %entry - ret void +; LINUX-X64: test1a: +; LINUX-X64-NOT: callq __stack_chk_fail +; LINUX-X64: .cfi_endproc + +; LINUX-KERNEL-X64: test1a: +; LINUX-KERNEL-X64-NOT: callq __stack_chk_fail +; LINUX-KERNEL-X64: .cfi_endproc + +; DARWIN-X64: test1a: +; DARWIN-X64-NOT: callq ___stack_chk_fail +; DARWIN-X64: .cfi_endproc + %a.addr = alloca i8*, align 8 + %buf = alloca [16 x i8], align 16 + store i8* %a, i8** %a.addr, align 8 + %arraydecay = getelementptr inbounds [16 x i8]* %buf, i32 0, i32 0 + %0 = load i8** %a.addr, align 8 + %call = call i8* @strcpy(i8* %arraydecay, i8* %0) + %arraydecay1 = getelementptr inbounds [16 x i8]* %buf, i32 0, i32 0 + %call2 = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([4 x i8]* @.str, i32 0, i32 0), i8* %arraydecay1) + ret void } -declare i8* @strcpy(i8*, i8*) nounwind +; test1b: array of [16 x i8] +; ssp attribute +; Requires protector. +define void @test1b(i8* %a) nounwind uwtable ssp { +entry: +; LINUX-I386: test1b: +; LINUX-I386: mov{{l|q}} %gs: +; LINUX-I386: calll __stack_chk_fail -declare i32 @printf(i8*, ...) nounwind +; LINUX-X64: test1b: +; LINUX-X64: mov{{l|q}} %fs: +; LINUX-X64: callq __stack_chk_fail + +; LINUX-KERNEL-X64: test1b: +; LINUX-KERNEL-X64: mov{{l|q}} %gs: +; LINUX-KERNEL-X64: callq __stack_chk_fail + +; DARWIN-X64: test1b: +; DARWIN-X64: mov{{l|q}} ___stack_chk_guard +; DARWIN-X64: callq ___stack_chk_fail + %a.addr = alloca i8*, align 8 + %buf = alloca [16 x i8], align 16 + store i8* %a, i8** %a.addr, align 8 + %arraydecay = getelementptr inbounds [16 x i8]* %buf, i32 0, i32 0 + %0 = load i8** %a.addr, align 8 + %call = call i8* @strcpy(i8* %arraydecay, i8* %0) + %arraydecay1 = getelementptr inbounds [16 x i8]* %buf, i32 0, i32 0 + %call2 = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([4 x i8]* @.str, i32 0, i32 0), i8* %arraydecay1) + ret void +} + +; test1c: array of [16 x i8] +; sspstrong attribute +; Requires protector. +define void @test1c(i8* %a) nounwind uwtable sspstrong { +entry: +; LINUX-I386: test1c: +; LINUX-I386: mov{{l|q}} %gs: +; LINUX-I386: calll __stack_chk_fail + +; LINUX-X64: test1c: +; LINUX-X64: mov{{l|q}} %fs: +; LINUX-X64: callq __stack_chk_fail + +; LINUX-KERNEL-X64: test1c: +; LINUX-KERNEL-X64: mov{{l|q}} %gs: +; LINUX-KERNEL-X64: callq __stack_chk_fail + +; DARWIN-X64: test1c: +; DARWIN-X64: mov{{l|q}} ___stack_chk_guard +; DARWIN-X64: callq ___stack_chk_fail + %a.addr = alloca i8*, align 8 + %buf = alloca [16 x i8], align 16 + store i8* %a, i8** %a.addr, align 8 + %arraydecay = getelementptr inbounds [16 x i8]* %buf, i32 0, i32 0 + %0 = load i8** %a.addr, align 8 + %call = call i8* @strcpy(i8* %arraydecay, i8* %0) + %arraydecay1 = getelementptr inbounds [16 x i8]* %buf, i32 0, i32 0 + %call2 = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([4 x i8]* @.str, i32 0, i32 0), i8* %arraydecay1) + ret void +} + +; test1d: array of [16 x i8] +; sspreq attribute +; Requires protector. +define void @test1d(i8* %a) nounwind uwtable sspreq { +entry: +; LINUX-I386: test1d: +; LINUX-I386: mov{{l|q}} %gs: +; LINUX-I386: calll __stack_chk_fail + +; LINUX-X64: test1d: +; LINUX-X64: mov{{l|q}} %fs: +; LINUX-X64: callq __stack_chk_fail + +; LINUX-KERNEL-X64: test1d: +; LINUX-KERNEL-X64: mov{{l|q}} %gs: +; LINUX-KERNEL-X64: callq __stack_chk_fail + +; DARWIN-X64: test1d: +; DARWIN-X64: mov{{l|q}} ___stack_chk_guard +; DARWIN-X64: callq ___stack_chk_fail + %a.addr = alloca i8*, align 8 + %buf = alloca [16 x i8], align 16 + store i8* %a, i8** %a.addr, align 8 + %arraydecay = getelementptr inbounds [16 x i8]* %buf, i32 0, i32 0 + %0 = load i8** %a.addr, align 8 + %call = call i8* @strcpy(i8* %arraydecay, i8* %0) + %arraydecay1 = getelementptr inbounds [16 x i8]* %buf, i32 0, i32 0 + %call2 = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([4 x i8]* @.str, i32 0, i32 0), i8* %arraydecay1) + ret void +} + +; test2a: struct { [16 x i8] } +; no ssp attribute +; Requires no protector. +define void @test2a(i8* %a) nounwind uwtable { +entry: +; LINUX-I386: test2a: +; LINUX-I386-NOT: calll __stack_chk_fail +; LINUX-I386: .cfi_endproc + +; LINUX-X64: test2a: +; LINUX-X64-NOT: callq __stack_chk_fail +; LINUX-X64: .cfi_endproc + +; LINUX-KERNEL-X64: test2a: +; LINUX-KERNEL-X64-NOT: callq __stack_chk_fail +; LINUX-KERNEL-X64: .cfi_endproc + +; DARWIN-X64: test2a: +; DARWIN-X64-NOT: callq ___stack_chk_fail +; DARWIN-X64: .cfi_endproc + %a.addr = alloca i8*, align 8 + %b = alloca %struct.foo, align 1 + store i8* %a, i8** %a.addr, align 8 + %buf = getelementptr inbounds %struct.foo* %b, i32 0, i32 0 + %arraydecay = getelementptr inbounds [16 x i8]* %buf, i32 0, i32 0 + %0 = load i8** %a.addr, align 8 + %call = call i8* @strcpy(i8* %arraydecay, i8* %0) + %buf1 = getelementptr inbounds %struct.foo* %b, i32 0, i32 0 + %arraydecay2 = getelementptr inbounds [16 x i8]* %buf1, i32 0, i32 0 + %call3 = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([4 x i8]* @.str, i32 0, i32 0), i8* %arraydecay2) + ret void +} + +; test2b: struct { [16 x i8] } +; ssp attribute +; Requires protector. +define void @test2b(i8* %a) nounwind uwtable ssp { +entry: +; LINUX-I386: test2b: +; LINUX-I386: mov{{l|q}} %gs: +; LINUX-I386: calll __stack_chk_fail + +; LINUX-X64: test2b: +; LINUX-X64: mov{{l|q}} %fs: +; LINUX-X64: callq __stack_chk_fail + +; LINUX-KERNEL-X64: test2b: +; LINUX-KERNEL-X64: mov{{l|q}} %gs: +; LINUX-KERNEL-X64: callq __stack_chk_fail + +; DARWIN-X64: test2b: +; DARWIN-X64: mov{{l|q}} ___stack_chk_guard +; DARWIN-X64: callq ___stack_chk_fail + %a.addr = alloca i8*, align 8 + %b = alloca %struct.foo, align 1 + store i8* %a, i8** %a.addr, align 8 + %buf = getelementptr inbounds %struct.foo* %b, i32 0, i32 0 + %arraydecay = getelementptr inbounds [16 x i8]* %buf, i32 0, i32 0 + %0 = load i8** %a.addr, align 8 + %call = call i8* @strcpy(i8* %arraydecay, i8* %0) + %buf1 = getelementptr inbounds %struct.foo* %b, i32 0, i32 0 + %arraydecay2 = getelementptr inbounds [16 x i8]* %buf1, i32 0, i32 0 + %call3 = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([4 x i8]* @.str, i32 0, i32 0), i8* %arraydecay2) + ret void +} + +; test2c: struct { [16 x i8] } +; sspstrong attribute +; Requires protector. +define void @test2c(i8* %a) nounwind uwtable sspstrong { +entry: +; LINUX-I386: test2c: +; LINUX-I386: mov{{l|q}} %gs: +; LINUX-I386: calll __stack_chk_fail + +; LINUX-X64: test2c: +; LINUX-X64: mov{{l|q}} %fs: +; LINUX-X64: callq __stack_chk_fail + +; LINUX-KERNEL-X64: test2c: +; LINUX-KERNEL-X64: mov{{l|q}} %gs: +; LINUX-KERNEL-X64: callq __stack_chk_fail + +; DARWIN-X64: test2c: +; DARWIN-X64: mov{{l|q}} ___stack_chk_guard +; DARWIN-X64: callq ___stack_chk_fail + %a.addr = alloca i8*, align 8 + %b = alloca %struct.foo, align 1 + store i8* %a, i8** %a.addr, align 8 + %buf = getelementptr inbounds %struct.foo* %b, i32 0, i32 0 + %arraydecay = getelementptr inbounds [16 x i8]* %buf, i32 0, i32 0 + %0 = load i8** %a.addr, align 8 + %call = call i8* @strcpy(i8* %arraydecay, i8* %0) + %buf1 = getelementptr inbounds %struct.foo* %b, i32 0, i32 0 + %arraydecay2 = getelementptr inbounds [16 x i8]* %buf1, i32 0, i32 0 + %call3 = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([4 x i8]* @.str, i32 0, i32 0), i8* %arraydecay2) + ret void +} + +; test2d: struct { [16 x i8] } +; sspreq attribute +; Requires protector. +define void @test2d(i8* %a) nounwind uwtable sspreq { +entry: +; LINUX-I386: test2d: +; LINUX-I386: mov{{l|q}} %gs: +; LINUX-I386: calll __stack_chk_fail + +; LINUX-X64: test2d: +; LINUX-X64: mov{{l|q}} %fs: +; LINUX-X64: callq __stack_chk_fail + +; LINUX-KERNEL-X64: test2d: +; LINUX-KERNEL-X64: mov{{l|q}} %gs: +; LINUX-KERNEL-X64: callq __stack_chk_fail + +; DARWIN-X64: test2d: +; DARWIN-X64: mov{{l|q}} ___stack_chk_guard +; DARWIN-X64: callq ___stack_chk_fail + %a.addr = alloca i8*, align 8 + %b = alloca %struct.foo, align 1 + store i8* %a, i8** %a.addr, align 8 + %buf = getelementptr inbounds %struct.foo* %b, i32 0, i32 0 + %arraydecay = getelementptr inbounds [16 x i8]* %buf, i32 0, i32 0 + %0 = load i8** %a.addr, align 8 + %call = call i8* @strcpy(i8* %arraydecay, i8* %0) + %buf1 = getelementptr inbounds %struct.foo* %b, i32 0, i32 0 + %arraydecay2 = getelementptr inbounds [16 x i8]* %buf1, i32 0, i32 0 + %call3 = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([4 x i8]* @.str, i32 0, i32 0), i8* %arraydecay2) + ret void +} + +; test3a: array of [4 x i8] +; no ssp attribute +; Requires no protector. +define void @test3a(i8* %a) nounwind uwtable { +entry: +; LINUX-I386: test3a: +; LINUX-I386-NOT: calll __stack_chk_fail +; LINUX-I386: .cfi_endproc + +; LINUX-X64: test3a: +; LINUX-X64-NOT: callq __stack_chk_fail +; LINUX-X64: .cfi_endproc + +; LINUX-KERNEL-X64: test3a: +; LINUX-KERNEL-X64-NOT: callq __stack_chk_fail +; LINUX-KERNEL-X64: .cfi_endproc + +; DARWIN-X64: test3a: +; DARWIN-X64-NOT: callq ___stack_chk_fail +; DARWIN-X64: .cfi_endproc + %a.addr = alloca i8*, align 8 + %buf = alloca [4 x i8], align 1 + store i8* %a, i8** %a.addr, align 8 + %arraydecay = getelementptr inbounds [4 x i8]* %buf, i32 0, i32 0 + %0 = load i8** %a.addr, align 8 + %call = call i8* @strcpy(i8* %arraydecay, i8* %0) + %arraydecay1 = getelementptr inbounds [4 x i8]* %buf, i32 0, i32 0 + %call2 = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([4 x i8]* @.str, i32 0, i32 0), i8* %arraydecay1) + ret void +} + +; test3b: array [4 x i8] +; ssp attribute +; Requires no protector. +define void @test3b(i8* %a) nounwind uwtable ssp { +entry: +; LINUX-I386: test3b: +; LINUX-I386-NOT: calll __stack_chk_fail +; LINUX-I386: .cfi_endproc + +; LINUX-X64: test3b: +; LINUX-X64-NOT: callq __stack_chk_fail +; LINUX-X64: .cfi_endproc + +; LINUX-KERNEL-X64: test3b: +; LINUX-KERNEL-X64-NOT: callq __stack_chk_fail +; LINUX-KERNEL-X64: .cfi_endproc + +; DARWIN-X64: test3b: +; DARWIN-X64-NOT: callq ___stack_chk_fail +; DARWIN-X64: .cfi_endproc + %a.addr = alloca i8*, align 8 + %buf = alloca [4 x i8], align 1 + store i8* %a, i8** %a.addr, align 8 + %arraydecay = getelementptr inbounds [4 x i8]* %buf, i32 0, i32 0 + %0 = load i8** %a.addr, align 8 + %call = call i8* @strcpy(i8* %arraydecay, i8* %0) + %arraydecay1 = getelementptr inbounds [4 x i8]* %buf, i32 0, i32 0 + %call2 = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([4 x i8]* @.str, i32 0, i32 0), i8* %arraydecay1) + ret void +} + +; test3c: array of [4 x i8] +; sspstrong attribute +; Requires protector. +define void @test3c(i8* %a) nounwind uwtable sspstrong { +entry: +; LINUX-I386: test3c: +; LINUX-I386: mov{{l|q}} %gs: +; LINUX-I386: calll __stack_chk_fail + +; LINUX-X64: test3c: +; LINUX-X64: mov{{l|q}} %fs: +; LINUX-X64: callq __stack_chk_fail + +; LINUX-KERNEL-X64: test3c: +; LINUX-KERNEL-X64: mov{{l|q}} %gs: +; LINUX-KERNEL-X64: callq __stack_chk_fail + +; DARWIN-X64: test3c: +; DARWIN-X64: mov{{l|q}} ___stack_chk_guard +; DARWIN-X64: callq ___stack_chk_fail + %a.addr = alloca i8*, align 8 + %buf = alloca [4 x i8], align 1 + store i8* %a, i8** %a.addr, align 8 + %arraydecay = getelementptr inbounds [4 x i8]* %buf, i32 0, i32 0 + %0 = load i8** %a.addr, align 8 + %call = call i8* @strcpy(i8* %arraydecay, i8* %0) + %arraydecay1 = getelementptr inbounds [4 x i8]* %buf, i32 0, i32 0 + %call2 = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([4 x i8]* @.str, i32 0, i32 0), i8* %arraydecay1) + ret void +} + +; test3d: array of [4 x i8] +; sspreq attribute +; Requires protector. +define void @test3d(i8* %a) nounwind uwtable sspreq { +entry: +; LINUX-I386: test3d: +; LINUX-I386: mov{{l|q}} %gs: +; LINUX-I386: calll __stack_chk_fail + +; LINUX-X64: test3d: +; LINUX-X64: mov{{l|q}} %fs: +; LINUX-X64: callq __stack_chk_fail + +; LINUX-KERNEL-X64: test3d: +; LINUX-KERNEL-X64: mov{{l|q}} %gs: +; LINUX-KERNEL-X64: callq __stack_chk_fail + +; DARWIN-X64: test3d: +; DARWIN-X64: mov{{l|q}} ___stack_chk_guard +; DARWIN-X64: callq ___stack_chk_fail + %a.addr = alloca i8*, align 8 + %buf = alloca [4 x i8], align 1 + store i8* %a, i8** %a.addr, align 8 + %arraydecay = getelementptr inbounds [4 x i8]* %buf, i32 0, i32 0 + %0 = load i8** %a.addr, align 8 + %call = call i8* @strcpy(i8* %arraydecay, i8* %0) + %arraydecay1 = getelementptr inbounds [4 x i8]* %buf, i32 0, i32 0 + %call2 = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([4 x i8]* @.str, i32 0, i32 0), i8* %arraydecay1) + ret void +} + +; test4a: struct { [4 x i8] } +; no ssp attribute +; Requires no protector. +define void @test4a(i8* %a) nounwind uwtable { +entry: +; LINUX-I386: test4a: +; LINUX-I386-NOT: calll __stack_chk_fail +; LINUX-I386: .cfi_endproc + +; LINUX-X64: test4a: +; LINUX-X64-NOT: callq __stack_chk_fail +; LINUX-X64: .cfi_endproc + +; LINUX-KERNEL-X64: test4a: +; LINUX-KERNEL-X64-NOT: callq __stack_chk_fail +; LINUX-KERNEL-X64: .cfi_endproc + +; DARWIN-X64: test4a: +; DARWIN-X64-NOT: callq ___stack_chk_fail +; DARWIN-X64: .cfi_endproc + %a.addr = alloca i8*, align 8 + %b = alloca %struct.foo.0, align 1 + store i8* %a, i8** %a.addr, align 8 + %buf = getelementptr inbounds %struct.foo.0* %b, i32 0, i32 0 + %arraydecay = getelementptr inbounds [4 x i8]* %buf, i32 0, i32 0 + %0 = load i8** %a.addr, align 8 + %call = call i8* @strcpy(i8* %arraydecay, i8* %0) + %buf1 = getelementptr inbounds %struct.foo.0* %b, i32 0, i32 0 + %arraydecay2 = getelementptr inbounds [4 x i8]* %buf1, i32 0, i32 0 + %call3 = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([4 x i8]* @.str, i32 0, i32 0), i8* %arraydecay2) + ret void +} + +; test4b: struct { [4 x i8] } +; ssp attribute +; Requires no protector. +define void @test4b(i8* %a) nounwind uwtable ssp { +entry: +; LINUX-I386: test4b: +; LINUX-I386-NOT: calll __stack_chk_fail +; LINUX-I386: .cfi_endproc + +; LINUX-X64: test4b: +; LINUX-X64-NOT: callq __stack_chk_fail +; LINUX-X64: .cfi_endproc + +; LINUX-KERNEL-X64: test4b: +; LINUX-KERNEL-X64-NOT: callq __stack_chk_fail +; LINUX-KERNEL-X64: .cfi_endproc + +; DARWIN-X64: test4b: +; DARWIN-X64-NOT: callq ___stack_chk_fail +; DARWIN-X64: .cfi_endproc + %a.addr = alloca i8*, align 8 + %b = alloca %struct.foo.0, align 1 + store i8* %a, i8** %a.addr, align 8 + %buf = getelementptr inbounds %struct.foo.0* %b, i32 0, i32 0 + %arraydecay = getelementptr inbounds [4 x i8]* %buf, i32 0, i32 0 + %0 = load i8** %a.addr, align 8 + %call = call i8* @strcpy(i8* %arraydecay, i8* %0) + %buf1 = getelementptr inbounds %struct.foo.0* %b, i32 0, i32 0 + %arraydecay2 = getelementptr inbounds [4 x i8]* %buf1, i32 0, i32 0 + %call3 = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([4 x i8]* @.str, i32 0, i32 0), i8* %arraydecay2) + ret void +} + +; test4c: struct { [4 x i8] } +; sspstrong attribute +; Requires protector. +define void @test4c(i8* %a) nounwind uwtable sspstrong { +entry: +; LINUX-I386: test4c: +; LINUX-I386: mov{{l|q}} %gs: +; LINUX-I386: calll __stack_chk_fail + +; LINUX-X64: test4c: +; LINUX-X64: mov{{l|q}} %fs: +; LINUX-X64: callq __stack_chk_fail + +; LINUX-KERNEL-X64: test4c: +; LINUX-KERNEL-X64: mov{{l|q}} %gs: +; LINUX-KERNEL-X64: callq __stack_chk_fail + +; DARWIN-X64: test4c: +; DARWIN-X64: mov{{l|q}} ___stack_chk_guard +; DARWIN-X64: callq ___stack_chk_fail + %a.addr = alloca i8*, align 8 + %b = alloca %struct.foo.0, align 1 + store i8* %a, i8** %a.addr, align 8 + %buf = getelementptr inbounds %struct.foo.0* %b, i32 0, i32 0 + %arraydecay = getelementptr inbounds [4 x i8]* %buf, i32 0, i32 0 + %0 = load i8** %a.addr, align 8 + %call = call i8* @strcpy(i8* %arraydecay, i8* %0) + %buf1 = getelementptr inbounds %struct.foo.0* %b, i32 0, i32 0 + %arraydecay2 = getelementptr inbounds [4 x i8]* %buf1, i32 0, i32 0 + %call3 = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([4 x i8]* @.str, i32 0, i32 0), i8* %arraydecay2) + ret void +} + +; test4d: struct { [4 x i8] } +; sspreq attribute +; Requires protector. +define void @test4d(i8* %a) nounwind uwtable sspreq { +entry: +; LINUX-I386: test4d: +; LINUX-I386: mov{{l|q}} %gs: +; LINUX-I386: calll __stack_chk_fail + +; LINUX-X64: test4d: +; LINUX-X64: mov{{l|q}} %fs: +; LINUX-X64: callq __stack_chk_fail + +; LINUX-KERNEL-X64: test4d: +; LINUX-KERNEL-X64: mov{{l|q}} %gs: +; LINUX-KERNEL-X64: callq __stack_chk_fail + +; DARWIN-X64: test4d: +; DARWIN-X64: mov{{l|q}} ___stack_chk_guard +; DARWIN-X64: callq ___stack_chk_fail + %a.addr = alloca i8*, align 8 + %b = alloca %struct.foo.0, align 1 + store i8* %a, i8** %a.addr, align 8 + %buf = getelementptr inbounds %struct.foo.0* %b, i32 0, i32 0 + %arraydecay = getelementptr inbounds [4 x i8]* %buf, i32 0, i32 0 + %0 = load i8** %a.addr, align 8 + %call = call i8* @strcpy(i8* %arraydecay, i8* %0) + %buf1 = getelementptr inbounds %struct.foo.0* %b, i32 0, i32 0 + %arraydecay2 = getelementptr inbounds [4 x i8]* %buf1, i32 0, i32 0 + %call3 = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([4 x i8]* @.str, i32 0, i32 0), i8* %arraydecay2) + ret void +} + +; test5a: no arrays / no nested arrays +; no ssp attribute +; Requires no protector. +define void @test5a(i8* %a) nounwind uwtable { +entry: +; LINUX-I386: test5a: +; LINUX-I386-NOT: calll __stack_chk_fail +; LINUX-I386: .cfi_endproc + +; LINUX-X64: test5a: +; LINUX-X64-NOT: callq __stack_chk_fail +; LINUX-X64: .cfi_endproc + +; LINUX-KERNEL-X64: test5a: +; LINUX-KERNEL-X64-NOT: callq __stack_chk_fail +; LINUX-KERNEL-X64: .cfi_endproc + +; DARWIN-X64: test5a: +; DARWIN-X64-NOT: callq ___stack_chk_fail +; DARWIN-X64: .cfi_endproc + %a.addr = alloca i8*, align 8 + store i8* %a, i8** %a.addr, align 8 + %0 = load i8** %a.addr, align 8 + %call = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([4 x i8]* @.str, i32 0, i32 0), i8* %0) + ret void +} + +; test5b: no arrays / no nested arrays +; ssp attribute +; Requires no protector. +define void @test5b(i8* %a) nounwind uwtable ssp { +entry: +; LINUX-I386: test5b: +; LINUX-I386-NOT: calll __stack_chk_fail +; LINUX-I386: .cfi_endproc + +; LINUX-X64: test5b: +; LINUX-X64-NOT: callq __stack_chk_fail +; LINUX-X64: .cfi_endproc + +; LINUX-KERNEL-X64: test5b: +; LINUX-KERNEL-X64-NOT: callq __stack_chk_fail +; LINUX-KERNEL-X64: .cfi_endproc + +; DARWIN-X64: test5b: +; DARWIN-X64-NOT: callq ___stack_chk_fail +; DARWIN-X64: .cfi_endproc + %a.addr = alloca i8*, align 8 + store i8* %a, i8** %a.addr, align 8 + %0 = load i8** %a.addr, align 8 + %call = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([4 x i8]* @.str, i32 0, i32 0), i8* %0) + ret void +} + +; test5c: no arrays / no nested arrays +; sspstrong attribute +; Requires protector. +; FIXME: Once strong heuristic is implemented, this should _not_ require +; a protector +define void @test5c(i8* %a) nounwind uwtable sspstrong { +entry: +; LINUX-I386: test5c: +; LINUX-I386: mov{{l|q}} %gs: +; LINUX-I386: calll __stack_chk_fail + +; LINUX-X64: test5c: +; LINUX-X64: mov{{l|q}} %fs: +; LINUX-X64: callq __stack_chk_fail + +; LINUX-KERNEL-X64: test5c: +; LINUX-KERNEL-X64: mov{{l|q}} %gs: +; LINUX-KERNEL-X64: callq __stack_chk_fail + +; DARWIN-X64: test5c: +; DARWIN-X64: mov{{l|q}} ___stack_chk_guard +; DARWIN-X64: callq ___stack_chk_fail + %a.addr = alloca i8*, align 8 + store i8* %a, i8** %a.addr, align 8 + %0 = load i8** %a.addr, align 8 + %call = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([4 x i8]* @.str, i32 0, i32 0), i8* %0) + ret void +} + +; test5d: no arrays / no nested arrays +; sspreq attribute +; Requires protector. +define void @test5d(i8* %a) nounwind uwtable sspreq { +entry: +; LINUX-I386: test5d: +; LINUX-I386: mov{{l|q}} %gs: +; LINUX-I386: calll __stack_chk_fail + +; LINUX-X64: test5d: +; LINUX-X64: mov{{l|q}} %fs: +; LINUX-X64: callq __stack_chk_fail + +; LINUX-KERNEL-X64: test5d: +; LINUX-KERNEL-X64: mov{{l|q}} %gs: +; LINUX-KERNEL-X64: callq __stack_chk_fail + +; DARWIN-X64: test5d: +; DARWIN-X64: mov{{l|q}} ___stack_chk_guard +; DARWIN-X64: callq ___stack_chk_fail + %a.addr = alloca i8*, align 8 + store i8* %a, i8** %a.addr, align 8 + %0 = load i8** %a.addr, align 8 + %call = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([4 x i8]* @.str, i32 0, i32 0), i8* %0) + ret void +} + +declare i8* @strcpy(i8*, i8*) +declare i32 @printf(i8*, ...) diff --git a/test/Transforms/Inline/inline_ssp.ll b/test/Transforms/Inline/inline_ssp.ll new file mode 100644 index 00000000000..d9996b30aab --- /dev/null +++ b/test/Transforms/Inline/inline_ssp.ll @@ -0,0 +1,155 @@ +; RUN: opt -inline %s -S | FileCheck %s +; Ensure SSP attributes are propagated correctly when inlining. + +@.str = private unnamed_addr constant [11 x i8] c"fun_nossp\0A\00", align 1 +@.str1 = private unnamed_addr constant [9 x i8] c"fun_ssp\0A\00", align 1 +@.str2 = private unnamed_addr constant [15 x i8] c"fun_sspstrong\0A\00", align 1 +@.str3 = private unnamed_addr constant [12 x i8] c"fun_sspreq\0A\00", align 1 + +; These first four functions (@fun_sspreq, @fun_sspstrong, @fun_ssp, @fun_nossp) +; are used by the remaining functions to ensure that the SSP attributes are +; propagated correctly. The caller should have its SSP attribute set as: +; strictest(caller-ssp-attr, callee-ssp-attr), where strictness is ordered as: +; sspreq > sspstrong > ssp > [no ssp] +define internal void @fun_sspreq() nounwind uwtable sspreq { +entry: + %call = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([12 x i8]* @.str3, i32 0, i32 0)) + ret void +} + +define internal void @fun_sspstrong() nounwind uwtable sspstrong { +entry: + %call = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([15 x i8]* @.str2, i32 0, i32 0)) + ret void +} + +define internal void @fun_ssp() nounwind uwtable ssp { +entry: + %call = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([9 x i8]* @.str1, i32 0, i32 0)) + ret void +} + +define internal void @fun_nossp() nounwind uwtable { +entry: + %call = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([11 x i8]* @.str, i32 0, i32 0)) + ret void +} + +; Tests start below + +define void @inline_req_req() nounwind uwtable sspreq { +entry: +; CHECK: @inline_req_req() nounwind uwtable sspreq + call void @fun_sspreq() + ret void +} + +define void @inline_req_strong() nounwind uwtable sspstrong { +entry: +; CHECK: @inline_req_strong() nounwind uwtable sspreq + call void @fun_sspreq() + ret void +} + +define void @inline_req_ssp() nounwind uwtable ssp { +entry: +; CHECK: @inline_req_ssp() nounwind uwtable sspreq + call void @fun_sspreq() + ret void +} + +define void @inline_req_nossp() nounwind uwtable { +entry: +; CHECK: @inline_req_nossp() nounwind uwtable sspreq + call void @fun_sspreq() + ret void +} + +define void @inline_strong_req() nounwind uwtable sspreq { +entry: +; CHECK: @inline_strong_req() nounwind uwtable sspreq + call void @fun_sspstrong() + ret void +} + + +define void @inline_strong_strong() nounwind uwtable sspstrong { +entry: +; CHECK: @inline_strong_strong() nounwind uwtable sspstrong + call void @fun_sspstrong() + ret void +} + +define void @inline_strong_ssp() nounwind uwtable ssp { +entry: +; CHECK: @inline_strong_ssp() nounwind uwtable sspstrong + call void @fun_sspstrong() + ret void +} + +define void @inline_strong_nossp() nounwind uwtable { +entry: +; CHECK: @inline_strong_nossp() nounwind uwtable sspstrong + call void @fun_sspstrong() + ret void +} + +define void @inline_ssp_req() nounwind uwtable sspreq { +entry: +; CHECK: @inline_ssp_req() nounwind uwtable sspreq + call void @fun_ssp() + ret void +} + + +define void @inline_ssp_strong() nounwind uwtable sspstrong { +entry: +; CHECK: @inline_ssp_strong() nounwind uwtable sspstrong + call void @fun_ssp() + ret void +} + +define void @inline_ssp_ssp() nounwind uwtable ssp { +entry: +; CHECK: @inline_ssp_ssp() nounwind uwtable ssp + call void @fun_ssp() + ret void +} + +define void @inline_ssp_nossp() nounwind uwtable { +entry: +; CHECK: @inline_ssp_nossp() nounwind uwtable ssp + call void @fun_ssp() + ret void +} + +define void @inline_nossp_req() nounwind uwtable sspreq { +entry: +; CHECK: @inline_nossp_req() nounwind uwtable sspreq + call void @fun_nossp() + ret void +} + + +define void @inline_nossp_strong() nounwind uwtable sspstrong { +entry: +; CHECK: @inline_nossp_strong() nounwind uwtable sspstrong + call void @fun_nossp() + ret void +} + +define void @inline_nossp_ssp() nounwind uwtable ssp { +entry: +; CHECK: @inline_nossp_ssp() nounwind uwtable ssp + call void @fun_nossp() + ret void +} + +define void @inline_nossp_nossp() nounwind uwtable { +entry: +; CHECK: @inline_nossp_nossp() nounwind uwtable + call void @fun_nossp() + ret void +} + +declare i32 @printf(i8*, ...) diff --git a/utils/kate/llvm.xml b/utils/kate/llvm.xml index 074fa16cb88..1778cfce384 100644 --- a/utils/kate/llvm.xml +++ b/utils/kate/llvm.xml @@ -90,6 +90,7 @@ readonly ssp sspreq + sspstrong float diff --git a/utils/vim/llvm.vim b/utils/vim/llvm.vim index 753549fd725..cea86eee5b3 100644 --- a/utils/vim/llvm.vim +++ b/utils/vim/llvm.vim @@ -51,10 +51,10 @@ syn keyword llvmKeyword noimplicitfloat noinline nonlazybind noredzone noreturn syn keyword llvmKeyword nounwind optsize personality private protected syn keyword llvmKeyword ptx_device ptx_kernel readnone readonly release syn keyword llvmKeyword returns_twice section seq_cst sideeffect signext -syn keyword llvmKeyword singlethread spir_func spir_kernel sret ssp sspreq tail -syn keyword llvmKeyword target thread_local to triple unnamed_addr unordered -syn keyword llvmKeyword uwtable volatile weak weak_odr x86_fastcallcc -syn keyword llvmKeyword x86_stdcallcc x86_thiscallcc zeroext +syn keyword llvmKeyword singlethread spir_func spir_kernel sret ssp sspreq +syn keyword llvmKeyword sspstrong tail target thread_local to triple +syn keyword llvmKeyword unnamed_addr unordered uwtable volatile weak weak_odr +syn keyword llvmKeyword x86_fastcallcc x86_stdcallcc x86_thiscallcc zeroext " Obsolete keywords. syn keyword llvmError getresult begin end