mirror of
https://github.com/c64scene-ar/llvm-6502.git
synced 2025-09-24 23:28:41 +00:00
Added support for overloading intrinsics (atomics) based on pointers
to different address spaces. This alters the naming scheme for those intrinsics, e.g., atomic.load.add.i32 => atomic.load.add.i32.p0i32 git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@54195 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
@@ -5788,14 +5788,15 @@ i1 <device> )
|
||||
<div class="doc_text">
|
||||
<h5>Syntax:</h5>
|
||||
<p>
|
||||
This is an overloaded intrinsic. You can use <tt>llvm.atomic.cmp.swap</tt> on any
|
||||
integer bit width. Not all targets support all bit widths however.</p>
|
||||
This is an overloaded intrinsic. You can use <tt>llvm.atomic.cmp.swap</tt> on
|
||||
any integer bit width and for different address spaces. Not all targets
|
||||
support all bit widths however.</p>
|
||||
|
||||
<pre>
|
||||
declare i8 @llvm.atomic.cmp.swap.i8( i8* <ptr>, i8 <cmp>, i8 <val> )
|
||||
declare i16 @llvm.atomic.cmp.swap.i16( i16* <ptr>, i16 <cmp>, i16 <val> )
|
||||
declare i32 @llvm.atomic.cmp.swap.i32( i32* <ptr>, i32 <cmp>, i32 <val> )
|
||||
declare i64 @llvm.atomic.cmp.swap.i64( i64* <ptr>, i64 <cmp>, i64 <val> )
|
||||
declare i8 @llvm.atomic.cmp.swap.i8.p0i8( i8* <ptr>, i8 <cmp>, i8 <val> )
|
||||
declare i16 @llvm.atomic.cmp.swap.i16.p0i16( i16* <ptr>, i16 <cmp>, i16 <val> )
|
||||
declare i32 @llvm.atomic.cmp.swap.i32.p0i32( i32* <ptr>, i32 <cmp>, i32 <val> )
|
||||
declare i64 @llvm.atomic.cmp.swap.i64.p0i64( i64* <ptr>, i64 <cmp>, i64 <val> )
|
||||
|
||||
</pre>
|
||||
<h5>Overview:</h5>
|
||||
@@ -5827,13 +5828,13 @@ declare i64 @llvm.atomic.cmp.swap.i64( i64* <ptr>, i64 <cmp>, i64 &l
|
||||
store i32 4, %ptr
|
||||
|
||||
%val1 = add i32 4, 4
|
||||
%result1 = call i32 @llvm.atomic.cmp.swap.i32( i32* %ptr, i32 4, %val1 )
|
||||
%result1 = call i32 @llvm.atomic.cmp.swap.i32.p0i32( i32* %ptr, i32 4, %val1 )
|
||||
<i>; yields {i32}:result1 = 4</i>
|
||||
%stored1 = icmp eq i32 %result1, 4 <i>; yields {i1}:stored1 = true</i>
|
||||
%memval1 = load i32* %ptr <i>; yields {i32}:memval1 = 8</i>
|
||||
|
||||
%val2 = add i32 1, 1
|
||||
%result2 = call i32 @llvm.atomic.cmp.swap.i32( i32* %ptr, i32 5, %val2 )
|
||||
%result2 = call i32 @llvm.atomic.cmp.swap.i32.p0i32( i32* %ptr, i32 5, %val2 )
|
||||
<i>; yields {i32}:result2 = 8</i>
|
||||
%stored2 = icmp eq i32 %result2, 5 <i>; yields {i1}:stored2 = false</i>
|
||||
|
||||
@@ -5852,10 +5853,10 @@ declare i64 @llvm.atomic.cmp.swap.i64( i64* <ptr>, i64 <cmp>, i64 &l
|
||||
This is an overloaded intrinsic. You can use <tt>llvm.atomic.swap</tt> on any
|
||||
integer bit width. Not all targets support all bit widths however.</p>
|
||||
<pre>
|
||||
declare i8 @llvm.atomic.swap.i8( i8* <ptr>, i8 <val> )
|
||||
declare i16 @llvm.atomic.swap.i16( i16* <ptr>, i16 <val> )
|
||||
declare i32 @llvm.atomic.swap.i32( i32* <ptr>, i32 <val> )
|
||||
declare i64 @llvm.atomic.swap.i64( i64* <ptr>, i64 <val> )
|
||||
declare i8 @llvm.atomic.swap.i8.p0i8( i8* <ptr>, i8 <val> )
|
||||
declare i16 @llvm.atomic.swap.i16.p0i16( i16* <ptr>, i16 <val> )
|
||||
declare i32 @llvm.atomic.swap.i32.p0i32( i32* <ptr>, i32 <val> )
|
||||
declare i64 @llvm.atomic.swap.i64.p0i64( i64* <ptr>, i64 <val> )
|
||||
|
||||
</pre>
|
||||
<h5>Overview:</h5>
|
||||
@@ -5886,13 +5887,13 @@ declare i64 @llvm.atomic.swap.i64( i64* <ptr>, i64 <val> )
|
||||
store i32 4, %ptr
|
||||
|
||||
%val1 = add i32 4, 4
|
||||
%result1 = call i32 @llvm.atomic.swap.i32( i32* %ptr, i32 %val1 )
|
||||
%result1 = call i32 @llvm.atomic.swap.i32.p0i32( i32* %ptr, i32 %val1 )
|
||||
<i>; yields {i32}:result1 = 4</i>
|
||||
%stored1 = icmp eq i32 %result1, 4 <i>; yields {i1}:stored1 = true</i>
|
||||
%memval1 = load i32* %ptr <i>; yields {i32}:memval1 = 8</i>
|
||||
|
||||
%val2 = add i32 1, 1
|
||||
%result2 = call i32 @llvm.atomic.swap.i32( i32* %ptr, i32 %val2 )
|
||||
%result2 = call i32 @llvm.atomic.swap.i32.p0i32( i32* %ptr, i32 %val2 )
|
||||
<i>; yields {i32}:result2 = 8</i>
|
||||
|
||||
%stored2 = icmp eq i32 %result2, 8 <i>; yields {i1}:stored2 = true</i>
|
||||
@@ -5911,10 +5912,10 @@ declare i64 @llvm.atomic.swap.i64( i64* <ptr>, i64 <val> )
|
||||
This is an overloaded intrinsic. You can use <tt>llvm.atomic.load.add</tt> on any
|
||||
integer bit width. Not all targets support all bit widths however.</p>
|
||||
<pre>
|
||||
declare i8 @llvm.atomic.load.add.i8.( i8* <ptr>, i8 <delta> )
|
||||
declare i16 @llvm.atomic.load.add.i16.( i16* <ptr>, i16 <delta> )
|
||||
declare i32 @llvm.atomic.load.add.i32.( i32* <ptr>, i32 <delta> )
|
||||
declare i64 @llvm.atomic.load.add.i64.( i64* <ptr>, i64 <delta> )
|
||||
declare i8 @llvm.atomic.load.add.i8..p0i8( i8* <ptr>, i8 <delta> )
|
||||
declare i16 @llvm.atomic.load.add.i16..p0i16( i16* <ptr>, i16 <delta> )
|
||||
declare i32 @llvm.atomic.load.add.i32..p0i32( i32* <ptr>, i32 <delta> )
|
||||
declare i64 @llvm.atomic.load.add.i64..p0i64( i64* <ptr>, i64 <delta> )
|
||||
|
||||
</pre>
|
||||
<h5>Overview:</h5>
|
||||
@@ -5941,11 +5942,11 @@ declare i64 @llvm.atomic.load.add.i64.( i64* <ptr>, i64 <delta> )
|
||||
<pre>
|
||||
%ptr = malloc i32
|
||||
store i32 4, %ptr
|
||||
%result1 = call i32 @llvm.atomic.load.add.i32( i32* %ptr, i32 4 )
|
||||
%result1 = call i32 @llvm.atomic.load.add.i32.p0i32( i32* %ptr, i32 4 )
|
||||
<i>; yields {i32}:result1 = 4</i>
|
||||
%result2 = call i32 @llvm.atomic.load.add.i32( i32* %ptr, i32 2 )
|
||||
%result2 = call i32 @llvm.atomic.load.add.i32.p0i32( i32* %ptr, i32 2 )
|
||||
<i>; yields {i32}:result2 = 8</i>
|
||||
%result3 = call i32 @llvm.atomic.load.add.i32( i32* %ptr, i32 5 )
|
||||
%result3 = call i32 @llvm.atomic.load.add.i32.p0i32( i32* %ptr, i32 5 )
|
||||
<i>; yields {i32}:result3 = 10</i>
|
||||
%memval1 = load i32* %ptr <i>; yields {i32}:memval1 = 15</i>
|
||||
</pre>
|
||||
@@ -5960,12 +5961,13 @@ declare i64 @llvm.atomic.load.add.i64.( i64* <ptr>, i64 <delta> )
|
||||
<h5>Syntax:</h5>
|
||||
<p>
|
||||
This is an overloaded intrinsic. You can use <tt>llvm.atomic.load.sub</tt> on
|
||||
any integer bit width. Not all targets support all bit widths however.</p>
|
||||
any integer bit width and for different address spaces. Not all targets
|
||||
support all bit widths however.</p>
|
||||
<pre>
|
||||
declare i8 @llvm.atomic.load.sub.i8.( i8* <ptr>, i8 <delta> )
|
||||
declare i16 @llvm.atomic.load.sub.i16.( i16* <ptr>, i16 <delta> )
|
||||
declare i32 @llvm.atomic.load.sub.i32.( i32* <ptr>, i32 <delta> )
|
||||
declare i64 @llvm.atomic.load.sub.i64.( i64* <ptr>, i64 <delta> )
|
||||
declare i8 @llvm.atomic.load.sub.i8.p0i32( i8* <ptr>, i8 <delta> )
|
||||
declare i16 @llvm.atomic.load.sub.i16.p0i32( i16* <ptr>, i16 <delta> )
|
||||
declare i32 @llvm.atomic.load.sub.i32.p0i32( i32* <ptr>, i32 <delta> )
|
||||
declare i64 @llvm.atomic.load.sub.i64.p0i32( i64* <ptr>, i64 <delta> )
|
||||
|
||||
</pre>
|
||||
<h5>Overview:</h5>
|
||||
@@ -5992,11 +5994,11 @@ declare i64 @llvm.atomic.load.sub.i64.( i64* <ptr>, i64 <delta> )
|
||||
<pre>
|
||||
%ptr = malloc i32
|
||||
store i32 8, %ptr
|
||||
%result1 = call i32 @llvm.atomic.load.sub.i32( i32* %ptr, i32 4 )
|
||||
%result1 = call i32 @llvm.atomic.load.sub.i32.p0i32( i32* %ptr, i32 4 )
|
||||
<i>; yields {i32}:result1 = 8</i>
|
||||
%result2 = call i32 @llvm.atomic.load.sub.i32( i32* %ptr, i32 2 )
|
||||
%result2 = call i32 @llvm.atomic.load.sub.i32.p0i32( i32* %ptr, i32 2 )
|
||||
<i>; yields {i32}:result2 = 4</i>
|
||||
%result3 = call i32 @llvm.atomic.load.sub.i32( i32* %ptr, i32 5 )
|
||||
%result3 = call i32 @llvm.atomic.load.sub.i32.p0i32( i32* %ptr, i32 5 )
|
||||
<i>; yields {i32}:result3 = 2</i>
|
||||
%memval1 = load i32* %ptr <i>; yields {i32}:memval1 = -3</i>
|
||||
</pre>
|
||||
@@ -6015,37 +6017,37 @@ declare i64 @llvm.atomic.load.sub.i64.( i64* <ptr>, i64 <delta> )
|
||||
<p>
|
||||
These are overloaded intrinsics. You can use <tt>llvm.atomic.load_and</tt>,
|
||||
<tt>llvm.atomic.load_nand</tt>, <tt>llvm.atomic.load_or</tt>, and
|
||||
<tt>llvm.atomic.load_xor</tt> on any integer bit width. Not all targets
|
||||
support all bit widths however.</p>
|
||||
<tt>llvm.atomic.load_xor</tt> on any integer bit width and for different
|
||||
address spaces. Not all targets support all bit widths however.</p>
|
||||
<pre>
|
||||
declare i8 @llvm.atomic.load.and.i8.( i8* <ptr>, i8 <delta> )
|
||||
declare i16 @llvm.atomic.load.and.i16.( i16* <ptr>, i16 <delta> )
|
||||
declare i32 @llvm.atomic.load.and.i32.( i32* <ptr>, i32 <delta> )
|
||||
declare i64 @llvm.atomic.load.and.i64.( i64* <ptr>, i64 <delta> )
|
||||
declare i8 @llvm.atomic.load.and.i8.p0i8( i8* <ptr>, i8 <delta> )
|
||||
declare i16 @llvm.atomic.load.and.i16.p0i16( i16* <ptr>, i16 <delta> )
|
||||
declare i32 @llvm.atomic.load.and.i32.p0i32( i32* <ptr>, i32 <delta> )
|
||||
declare i64 @llvm.atomic.load.and.i64.p0i64( i64* <ptr>, i64 <delta> )
|
||||
|
||||
</pre>
|
||||
|
||||
<pre>
|
||||
declare i8 @llvm.atomic.load.or.i8.( i8* <ptr>, i8 <delta> )
|
||||
declare i16 @llvm.atomic.load.or.i16.( i16* <ptr>, i16 <delta> )
|
||||
declare i32 @llvm.atomic.load.or.i32.( i32* <ptr>, i32 <delta> )
|
||||
declare i64 @llvm.atomic.load.or.i64.( i64* <ptr>, i64 <delta> )
|
||||
declare i8 @llvm.atomic.load.or.i8.p0i8( i8* <ptr>, i8 <delta> )
|
||||
declare i16 @llvm.atomic.load.or.i16.p0i16( i16* <ptr>, i16 <delta> )
|
||||
declare i32 @llvm.atomic.load.or.i32.p0i32( i32* <ptr>, i32 <delta> )
|
||||
declare i64 @llvm.atomic.load.or.i64.p0i64( i64* <ptr>, i64 <delta> )
|
||||
|
||||
</pre>
|
||||
|
||||
<pre>
|
||||
declare i8 @llvm.atomic.load.nand.i8.( i8* <ptr>, i8 <delta> )
|
||||
declare i16 @llvm.atomic.load.nand.i16.( i16* <ptr>, i16 <delta> )
|
||||
declare i32 @llvm.atomic.load.nand.i32.( i32* <ptr>, i32 <delta> )
|
||||
declare i64 @llvm.atomic.load.nand.i64.( i64* <ptr>, i64 <delta> )
|
||||
declare i8 @llvm.atomic.load.nand.i8.p0i32( i8* <ptr>, i8 <delta> )
|
||||
declare i16 @llvm.atomic.load.nand.i16.p0i32( i16* <ptr>, i16 <delta> )
|
||||
declare i32 @llvm.atomic.load.nand.i32.p0i32( i32* <ptr>, i32 <delta> )
|
||||
declare i64 @llvm.atomic.load.nand.i64.p0i32( i64* <ptr>, i64 <delta> )
|
||||
|
||||
</pre>
|
||||
|
||||
<pre>
|
||||
declare i8 @llvm.atomic.load.xor.i8.( i8* <ptr>, i8 <delta> )
|
||||
declare i16 @llvm.atomic.load.xor.i16.( i16* <ptr>, i16 <delta> )
|
||||
declare i32 @llvm.atomic.load.xor.i32.( i32* <ptr>, i32 <delta> )
|
||||
declare i64 @llvm.atomic.load.xor.i64.( i64* <ptr>, i64 <delta> )
|
||||
declare i8 @llvm.atomic.load.xor.i8.p0i32( i8* <ptr>, i8 <delta> )
|
||||
declare i16 @llvm.atomic.load.xor.i16.p0i32( i16* <ptr>, i16 <delta> )
|
||||
declare i32 @llvm.atomic.load.xor.i32.p0i32( i32* <ptr>, i32 <delta> )
|
||||
declare i64 @llvm.atomic.load.xor.i64.p0i32( i64* <ptr>, i64 <delta> )
|
||||
|
||||
</pre>
|
||||
<h5>Overview:</h5>
|
||||
@@ -6074,13 +6076,13 @@ declare i64 @llvm.atomic.load.xor.i64.( i64* <ptr>, i64 <delta> )
|
||||
<pre>
|
||||
%ptr = malloc i32
|
||||
store i32 0x0F0F, %ptr
|
||||
%result0 = call i32 @llvm.atomic.load.nand.i32( i32* %ptr, i32 0xFF )
|
||||
%result0 = call i32 @llvm.atomic.load.nand.i32.p0i32( i32* %ptr, i32 0xFF )
|
||||
<i>; yields {i32}:result0 = 0x0F0F</i>
|
||||
%result1 = call i32 @llvm.atomic.load.and.i32( i32* %ptr, i32 0xFF )
|
||||
%result1 = call i32 @llvm.atomic.load.and.i32.p0i32( i32* %ptr, i32 0xFF )
|
||||
<i>; yields {i32}:result1 = 0xFFFFFFF0</i>
|
||||
%result2 = call i32 @llvm.atomic.load.or.i32( i32* %ptr, i32 0F )
|
||||
%result2 = call i32 @llvm.atomic.load.or.i32.p0i32( i32* %ptr, i32 0F )
|
||||
<i>; yields {i32}:result2 = 0xF0</i>
|
||||
%result3 = call i32 @llvm.atomic.load.xor.i32( i32* %ptr, i32 0F )
|
||||
%result3 = call i32 @llvm.atomic.load.xor.i32.p0i32( i32* %ptr, i32 0F )
|
||||
<i>; yields {i32}:result3 = FF</i>
|
||||
%memval1 = load i32* %ptr <i>; yields {i32}:memval1 = F0</i>
|
||||
</pre>
|
||||
@@ -6100,37 +6102,38 @@ declare i64 @llvm.atomic.load.xor.i64.( i64* <ptr>, i64 <delta> )
|
||||
<p>
|
||||
These are overloaded intrinsics. You can use <tt>llvm.atomic.load_max</tt>,
|
||||
<tt>llvm.atomic.load_min</tt>, <tt>llvm.atomic.load_umax</tt>, and
|
||||
<tt>llvm.atomic.load_umin</tt> on any integer bit width. Not all targets
|
||||
<tt>llvm.atomic.load_umin</tt> on any integer bit width and for different
|
||||
address spaces. Not all targets
|
||||
support all bit widths however.</p>
|
||||
<pre>
|
||||
declare i8 @llvm.atomic.load.max.i8.( i8* <ptr>, i8 <delta> )
|
||||
declare i16 @llvm.atomic.load.max.i16.( i16* <ptr>, i16 <delta> )
|
||||
declare i32 @llvm.atomic.load.max.i32.( i32* <ptr>, i32 <delta> )
|
||||
declare i64 @llvm.atomic.load.max.i64.( i64* <ptr>, i64 <delta> )
|
||||
declare i8 @llvm.atomic.load.max.i8.p0i8( i8* <ptr>, i8 <delta> )
|
||||
declare i16 @llvm.atomic.load.max.i16.p0i16( i16* <ptr>, i16 <delta> )
|
||||
declare i32 @llvm.atomic.load.max.i32.p0i32( i32* <ptr>, i32 <delta> )
|
||||
declare i64 @llvm.atomic.load.max.i64.p0i64( i64* <ptr>, i64 <delta> )
|
||||
|
||||
</pre>
|
||||
|
||||
<pre>
|
||||
declare i8 @llvm.atomic.load.min.i8.( i8* <ptr>, i8 <delta> )
|
||||
declare i16 @llvm.atomic.load.min.i16.( i16* <ptr>, i16 <delta> )
|
||||
declare i32 @llvm.atomic.load.min.i32.( i32* <ptr>, i32 <delta> )
|
||||
declare i64 @llvm.atomic.load.min.i64.( i64* <ptr>, i64 <delta> )
|
||||
declare i8 @llvm.atomic.load.min.i8.p0i8( i8* <ptr>, i8 <delta> )
|
||||
declare i16 @llvm.atomic.load.min.i16.p0i16( i16* <ptr>, i16 <delta> )
|
||||
declare i32 @llvm.atomic.load.min.i32..p0i32( i32* <ptr>, i32 <delta> )
|
||||
declare i64 @llvm.atomic.load.min.i64..p0i64( i64* <ptr>, i64 <delta> )
|
||||
|
||||
</pre>
|
||||
|
||||
<pre>
|
||||
declare i8 @llvm.atomic.load.umax.i8.( i8* <ptr>, i8 <delta> )
|
||||
declare i16 @llvm.atomic.load.umax.i16.( i16* <ptr>, i16 <delta> )
|
||||
declare i32 @llvm.atomic.load.umax.i32.( i32* <ptr>, i32 <delta> )
|
||||
declare i64 @llvm.atomic.load.umax.i64.( i64* <ptr>, i64 <delta> )
|
||||
declare i8 @llvm.atomic.load.umax.i8.p0i8( i8* <ptr>, i8 <delta> )
|
||||
declare i16 @llvm.atomic.load.umax.i16.p0i16( i16* <ptr>, i16 <delta> )
|
||||
declare i32 @llvm.atomic.load.umax.i32.p0i32( i32* <ptr>, i32 <delta> )
|
||||
declare i64 @llvm.atomic.load.umax.i64.p0i64( i64* <ptr>, i64 <delta> )
|
||||
|
||||
</pre>
|
||||
|
||||
<pre>
|
||||
declare i8 @llvm.atomic.load.umin.i8.( i8* <ptr>, i8 <delta> )
|
||||
declare i16 @llvm.atomic.load.umin.i16.( i16* <ptr>, i16 <delta> )
|
||||
declare i32 @llvm.atomic.load.umin.i32.( i32* <ptr>, i32 <delta> )
|
||||
declare i64 @llvm.atomic.load.umin.i64.( i64* <ptr>, i64 <delta> )
|
||||
declare i8 @llvm.atomic.load.umin.i8..p0i8( i8* <ptr>, i8 <delta> )
|
||||
declare i16 @llvm.atomic.load.umin.i16.p0i16( i16* <ptr>, i16 <delta> )
|
||||
declare i32 @llvm.atomic.load.umin.i32..p0i32( i32* <ptr>, i32 <delta> )
|
||||
declare i64 @llvm.atomic.load.umin.i64..p0i64( i64* <ptr>, i64 <delta> )
|
||||
|
||||
</pre>
|
||||
<h5>Overview:</h5>
|
||||
@@ -6159,13 +6162,13 @@ declare i64 @llvm.atomic.load.umin.i64.( i64* <ptr>, i64 <delta> )
|
||||
<pre>
|
||||
%ptr = malloc i32
|
||||
store i32 7, %ptr
|
||||
%result0 = call i32 @llvm.atomic.load.min.i32( i32* %ptr, i32 -2 )
|
||||
%result0 = call i32 @llvm.atomic.load.min.i32.p0i32( i32* %ptr, i32 -2 )
|
||||
<i>; yields {i32}:result0 = 7</i>
|
||||
%result1 = call i32 @llvm.atomic.load.max.i32( i32* %ptr, i32 8 )
|
||||
%result1 = call i32 @llvm.atomic.load.max.i32.p0i32( i32* %ptr, i32 8 )
|
||||
<i>; yields {i32}:result1 = -2</i>
|
||||
%result2 = call i32 @llvm.atomic.load.umin.i32( i32* %ptr, i32 10 )
|
||||
%result2 = call i32 @llvm.atomic.load.umin.i32.p0i32( i32* %ptr, i32 10 )
|
||||
<i>; yields {i32}:result2 = 8</i>
|
||||
%result3 = call i32 @llvm.atomic.load.umax.i32( i32* %ptr, i32 30 )
|
||||
%result3 = call i32 @llvm.atomic.load.umax.i32.p0i32( i32* %ptr, i32 30 )
|
||||
<i>; yields {i32}:result3 = 8</i>
|
||||
%memval1 = load i32* %ptr <i>; yields {i32}:memval1 = 30</i>
|
||||
</pre>
|
||||
|
Reference in New Issue
Block a user