llvm-project/llvm/test/CodeGen/X86/stack-protector-msvc-oz.ll
Omair Javaid e1e1836bbd
[CodeGen] Inline stack guard check on Windows (#136290)
This patch optimizes the Windows security cookie check mechanism by
moving the comparison inline and only calling __security_check_cookie
when the check fails. This reduces the overhead of making a DLL call 
for every function return.

Previously, we implemented this optimization through a machine pass
(X86WinFixupBufferSecurityCheckPass) in PR #95904 submitted by
@mahesh-attarde. We have reverted that pass in favor of this new 
approach. Also we have abandoned the AArch64 specific implementation 
of same pass in PR #121938 in favor of this more general solution.

The old machine instruction pass approach:
- Scanned the generated code to find __security_check_cookie calls
- Modified these calls by splitting basic blocks
- Added comparison logic and conditional branching
- Required complex block management and live register computation

The new approach:
- Implements the same optimization during instruction selection
- Directly emits the comparison and conditional branching
- No need for post-processing or basic block manipulation
- Disables optimization at -Oz.

Thanks @tamaspetz, @efriedma-quic and @arsenm for their help.
2025-06-12 19:38:42 +05:00

120 lines
4.5 KiB
LLVM

; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
; RUN: llc -mtriple=i386-pc-windows-msvc < %s -o - | FileCheck -check-prefix=MSVC-X86 %s
; RUN: llc -mtriple=x86_64-pc-windows-msvc < %s -o - | FileCheck -check-prefix=MSVC-X64 %s
; Make sure fastisel falls back and does something secure.
; RUN: llc -mtriple=i686-pc-windows-msvc -O0 < %s -o - | FileCheck -check-prefix=MSVC-X86-O0 %s
; RUN: llc -mtriple=x86_64-pc-windows-msvc -O0 < %s -o - | FileCheck -check-prefix=MSVC-X64-O0 %s
@"\01LC" = internal constant [11 x i8] c"buf == %s\0A\00" ; <ptr> [#uses=1]
define void @test(ptr %a) nounwind ssp minsize {
; MSVC-X86-LABEL: test:
; MSVC-X86: # %bb.0: # %entry
; MSVC-X86-NEXT: pushl %esi
; MSVC-X86-NEXT: subl $12, %esp
; MSVC-X86-NEXT: movl ___security_cookie, %eax
; MSVC-X86-NEXT: xorl %esp, %eax
; MSVC-X86-NEXT: movl %eax, {{[0-9]+}}(%esp)
; MSVC-X86-NEXT: movl %esp, %esi
; MSVC-X86-NEXT: pushl {{[0-9]+}}(%esp)
; MSVC-X86-NEXT: pushl %esi
; MSVC-X86-NEXT: calll _strcpy
; MSVC-X86-NEXT: popl %ecx
; MSVC-X86-NEXT: popl %edx
; MSVC-X86-NEXT: pushl %esi
; MSVC-X86-NEXT: pushl $LC
; MSVC-X86-NEXT: calll _printf
; MSVC-X86-NEXT: popl %ecx
; MSVC-X86-NEXT: popl %edx
; MSVC-X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
; MSVC-X86-NEXT: xorl %esp, %ecx
; MSVC-X86-NEXT: calll @__security_check_cookie@4
; MSVC-X86-NEXT: addl $12, %esp
; MSVC-X86-NEXT: popl %esi
; MSVC-X86-NEXT: retl
;
; MSVC-X64-LABEL: test:
; MSVC-X64: # %bb.0: # %entry
; MSVC-X64-NEXT: pushq %rsi
; MSVC-X64-NEXT: subq $64, %rsp
; MSVC-X64-NEXT: movq %rcx, %rdx
; MSVC-X64-NEXT: movq __security_cookie(%rip), %rax
; MSVC-X64-NEXT: xorq %rsp, %rax
; MSVC-X64-NEXT: movq %rax, {{[0-9]+}}(%rsp)
; MSVC-X64-NEXT: movq %rcx, {{[0-9]+}}(%rsp)
; MSVC-X64-NEXT: leaq {{[0-9]+}}(%rsp), %rsi
; MSVC-X64-NEXT: movq %rsi, %rcx
; MSVC-X64-NEXT: callq strcpy
; MSVC-X64-NEXT: leaq LC(%rip), %rcx
; MSVC-X64-NEXT: movq %rsi, %rdx
; MSVC-X64-NEXT: callq printf
; MSVC-X64-NEXT: movq {{[0-9]+}}(%rsp), %rcx
; MSVC-X64-NEXT: xorq %rsp, %rcx
; MSVC-X64-NEXT: callq __security_check_cookie
; MSVC-X64-NEXT: addq $64, %rsp
; MSVC-X64-NEXT: popq %rsi
; MSVC-X64-NEXT: retq
;
; MSVC-X86-O0-LABEL: test:
; MSVC-X86-O0: # %bb.0: # %entry
; MSVC-X86-O0-NEXT: subl $20, %esp
; MSVC-X86-O0-NEXT: movl {{[0-9]+}}(%esp), %eax
; MSVC-X86-O0-NEXT: movl ___security_cookie, %eax
; MSVC-X86-O0-NEXT: xorl %esp, %eax
; MSVC-X86-O0-NEXT: movl %eax, {{[0-9]+}}(%esp)
; MSVC-X86-O0-NEXT: movl {{[0-9]+}}(%esp), %ecx
; MSVC-X86-O0-NEXT: movl %esp, %eax
; MSVC-X86-O0-NEXT: movl %ecx, 4(%eax)
; MSVC-X86-O0-NEXT: leal {{[0-9]+}}(%esp), %ecx
; MSVC-X86-O0-NEXT: movl %ecx, (%eax)
; MSVC-X86-O0-NEXT: calll _strcpy
; MSVC-X86-O0-NEXT: leal LC, %ecx
; MSVC-X86-O0-NEXT: leal {{[0-9]+}}(%esp), %eax
; MSVC-X86-O0-NEXT: movl %ecx, (%esp)
; MSVC-X86-O0-NEXT: movl %eax, {{[0-9]+}}(%esp)
; MSVC-X86-O0-NEXT: calll _printf
; MSVC-X86-O0-NEXT: # %bb.1: # %return
; MSVC-X86-O0-NEXT: movl {{[0-9]+}}(%esp), %ecx
; MSVC-X86-O0-NEXT: xorl %esp, %ecx
; MSVC-X86-O0-NEXT: calll @__security_check_cookie@4
; MSVC-X86-O0-NEXT: addl $20, %esp
; MSVC-X86-O0-NEXT: retl
;
; MSVC-X64-O0-LABEL: test:
; MSVC-X64-O0: # %bb.0: # %entry
; MSVC-X64-O0-NEXT: subq $56, %rsp
; MSVC-X64-O0-NEXT: movq __security_cookie(%rip), %rax
; MSVC-X64-O0-NEXT: xorq %rsp, %rax
; MSVC-X64-O0-NEXT: movq %rax, {{[0-9]+}}(%rsp)
; MSVC-X64-O0-NEXT: movq %rcx, {{[0-9]+}}(%rsp)
; MSVC-X64-O0-NEXT: movq {{[0-9]+}}(%rsp), %rdx
; MSVC-X64-O0-NEXT: leaq {{[0-9]+}}(%rsp), %rcx
; MSVC-X64-O0-NEXT: callq strcpy
; MSVC-X64-O0-NEXT: leaq LC(%rip), %rcx
; MSVC-X64-O0-NEXT: leaq {{[0-9]+}}(%rsp), %rdx
; MSVC-X64-O0-NEXT: callq printf
; MSVC-X64-O0-NEXT: # %bb.1: # %return
; MSVC-X64-O0-NEXT: movq {{[0-9]+}}(%rsp), %rcx
; MSVC-X64-O0-NEXT: xorq %rsp, %rcx
; MSVC-X64-O0-NEXT: callq __security_check_cookie
; MSVC-X64-O0-NEXT: addq $56, %rsp
; MSVC-X64-O0-NEXT: retq
entry:
%a_addr = alloca ptr ; <ptr> [#uses=2]
%buf = alloca [8 x i8] ; <ptr> [#uses=2]
store ptr %a, ptr %a_addr
%0 = load ptr, ptr %a_addr, align 4 ; <ptr> [#uses=1]
%1 = call ptr @strcpy(ptr %buf, ptr %0) nounwind ; <ptr> [#uses=0]
%2 = call i32 (ptr, ...) @printf(ptr @"\01LC", ptr %buf) nounwind ; <i32> [#uses=0]
br label %return
return: ; preds = %entry
ret void
}
declare ptr @strcpy(ptr, ptr) nounwind
declare i32 @printf(ptr, ...) nounwind