Added support for overloading intrinsics (atomics) based on pointers

to different address spaces.  This alters the naming scheme for those
intrinsics, e.g., atomic.load.add.i32 => atomic.load.add.i32.p0i32


git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@54195 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
Mon P Wang 2008-07-30 04:36:53 +00:00
parent 1fbffe0cef
commit e3b3a7241c
13 changed files with 209 additions and 137 deletions

View File

@ -5788,14 +5788,15 @@ i1 <device> )
<div class="doc_text">
<h5>Syntax:</h5>
<p>
This is an overloaded intrinsic. You can use <tt>llvm.atomic.cmp.swap</tt> on any
integer bit width. Not all targets support all bit widths however.</p>
This is an overloaded intrinsic. You can use <tt>llvm.atomic.cmp.swap</tt> on
any integer bit width and for different address spaces. Not all targets
support all bit widths however.</p>
<pre>
declare i8 @llvm.atomic.cmp.swap.i8( i8* &lt;ptr&gt;, i8 &lt;cmp&gt;, i8 &lt;val&gt; )
declare i16 @llvm.atomic.cmp.swap.i16( i16* &lt;ptr&gt;, i16 &lt;cmp&gt;, i16 &lt;val&gt; )
declare i32 @llvm.atomic.cmp.swap.i32( i32* &lt;ptr&gt;, i32 &lt;cmp&gt;, i32 &lt;val&gt; )
declare i64 @llvm.atomic.cmp.swap.i64( i64* &lt;ptr&gt;, i64 &lt;cmp&gt;, i64 &lt;val&gt; )
declare i8 @llvm.atomic.cmp.swap.i8.p0i8( i8* &lt;ptr&gt;, i8 &lt;cmp&gt;, i8 &lt;val&gt; )
declare i16 @llvm.atomic.cmp.swap.i16.p0i16( i16* &lt;ptr&gt;, i16 &lt;cmp&gt;, i16 &lt;val&gt; )
declare i32 @llvm.atomic.cmp.swap.i32.p0i32( i32* &lt;ptr&gt;, i32 &lt;cmp&gt;, i32 &lt;val&gt; )
declare i64 @llvm.atomic.cmp.swap.i64.p0i64( i64* &lt;ptr&gt;, i64 &lt;cmp&gt;, i64 &lt;val&gt; )
</pre>
<h5>Overview:</h5>
@ -5827,13 +5828,13 @@ declare i64 @llvm.atomic.cmp.swap.i64( i64* &lt;ptr&gt;, i64 &lt;cmp&gt;, i64 &l
store i32 4, %ptr
%val1 = add i32 4, 4
%result1 = call i32 @llvm.atomic.cmp.swap.i32( i32* %ptr, i32 4, %val1 )
%result1 = call i32 @llvm.atomic.cmp.swap.i32.p0i32( i32* %ptr, i32 4, %val1 )
<i>; yields {i32}:result1 = 4</i>
%stored1 = icmp eq i32 %result1, 4 <i>; yields {i1}:stored1 = true</i>
%memval1 = load i32* %ptr <i>; yields {i32}:memval1 = 8</i>
%val2 = add i32 1, 1
%result2 = call i32 @llvm.atomic.cmp.swap.i32( i32* %ptr, i32 5, %val2 )
%result2 = call i32 @llvm.atomic.cmp.swap.i32.p0i32( i32* %ptr, i32 5, %val2 )
<i>; yields {i32}:result2 = 8</i>
%stored2 = icmp eq i32 %result2, 5 <i>; yields {i1}:stored2 = false</i>
@ -5852,10 +5853,10 @@ declare i64 @llvm.atomic.cmp.swap.i64( i64* &lt;ptr&gt;, i64 &lt;cmp&gt;, i64 &l
This is an overloaded intrinsic. You can use <tt>llvm.atomic.swap</tt> on any
integer bit width. Not all targets support all bit widths however.</p>
<pre>
declare i8 @llvm.atomic.swap.i8( i8* &lt;ptr&gt;, i8 &lt;val&gt; )
declare i16 @llvm.atomic.swap.i16( i16* &lt;ptr&gt;, i16 &lt;val&gt; )
declare i32 @llvm.atomic.swap.i32( i32* &lt;ptr&gt;, i32 &lt;val&gt; )
declare i64 @llvm.atomic.swap.i64( i64* &lt;ptr&gt;, i64 &lt;val&gt; )
declare i8 @llvm.atomic.swap.i8.p0i8( i8* &lt;ptr&gt;, i8 &lt;val&gt; )
declare i16 @llvm.atomic.swap.i16.p0i16( i16* &lt;ptr&gt;, i16 &lt;val&gt; )
declare i32 @llvm.atomic.swap.i32.p0i32( i32* &lt;ptr&gt;, i32 &lt;val&gt; )
declare i64 @llvm.atomic.swap.i64.p0i64( i64* &lt;ptr&gt;, i64 &lt;val&gt; )
</pre>
<h5>Overview:</h5>
@ -5886,13 +5887,13 @@ declare i64 @llvm.atomic.swap.i64( i64* &lt;ptr&gt;, i64 &lt;val&gt; )
store i32 4, %ptr
%val1 = add i32 4, 4
%result1 = call i32 @llvm.atomic.swap.i32( i32* %ptr, i32 %val1 )
%result1 = call i32 @llvm.atomic.swap.i32.p0i32( i32* %ptr, i32 %val1 )
<i>; yields {i32}:result1 = 4</i>
%stored1 = icmp eq i32 %result1, 4 <i>; yields {i1}:stored1 = true</i>
%memval1 = load i32* %ptr <i>; yields {i32}:memval1 = 8</i>
%val2 = add i32 1, 1
%result2 = call i32 @llvm.atomic.swap.i32( i32* %ptr, i32 %val2 )
%result2 = call i32 @llvm.atomic.swap.i32.p0i32( i32* %ptr, i32 %val2 )
<i>; yields {i32}:result2 = 8</i>
%stored2 = icmp eq i32 %result2, 8 <i>; yields {i1}:stored2 = true</i>
@ -5911,10 +5912,10 @@ declare i64 @llvm.atomic.swap.i64( i64* &lt;ptr&gt;, i64 &lt;val&gt; )
This is an overloaded intrinsic. You can use <tt>llvm.atomic.load.add</tt> on any
integer bit width. Not all targets support all bit widths however.</p>
<pre>
declare i8 @llvm.atomic.load.add.i8.( i8* &lt;ptr&gt;, i8 &lt;delta&gt; )
declare i16 @llvm.atomic.load.add.i16.( i16* &lt;ptr&gt;, i16 &lt;delta&gt; )
declare i32 @llvm.atomic.load.add.i32.( i32* &lt;ptr&gt;, i32 &lt;delta&gt; )
declare i64 @llvm.atomic.load.add.i64.( i64* &lt;ptr&gt;, i64 &lt;delta&gt; )
declare i8 @llvm.atomic.load.add.i8..p0i8( i8* &lt;ptr&gt;, i8 &lt;delta&gt; )
declare i16 @llvm.atomic.load.add.i16..p0i16( i16* &lt;ptr&gt;, i16 &lt;delta&gt; )
declare i32 @llvm.atomic.load.add.i32..p0i32( i32* &lt;ptr&gt;, i32 &lt;delta&gt; )
declare i64 @llvm.atomic.load.add.i64..p0i64( i64* &lt;ptr&gt;, i64 &lt;delta&gt; )
</pre>
<h5>Overview:</h5>
@ -5941,11 +5942,11 @@ declare i64 @llvm.atomic.load.add.i64.( i64* &lt;ptr&gt;, i64 &lt;delta&gt; )
<pre>
%ptr = malloc i32
store i32 4, %ptr
%result1 = call i32 @llvm.atomic.load.add.i32( i32* %ptr, i32 4 )
%result1 = call i32 @llvm.atomic.load.add.i32.p0i32( i32* %ptr, i32 4 )
<i>; yields {i32}:result1 = 4</i>
%result2 = call i32 @llvm.atomic.load.add.i32( i32* %ptr, i32 2 )
%result2 = call i32 @llvm.atomic.load.add.i32.p0i32( i32* %ptr, i32 2 )
<i>; yields {i32}:result2 = 8</i>
%result3 = call i32 @llvm.atomic.load.add.i32( i32* %ptr, i32 5 )
%result3 = call i32 @llvm.atomic.load.add.i32.p0i32( i32* %ptr, i32 5 )
<i>; yields {i32}:result3 = 10</i>
%memval1 = load i32* %ptr <i>; yields {i32}:memval1 = 15</i>
</pre>
@ -5960,12 +5961,13 @@ declare i64 @llvm.atomic.load.add.i64.( i64* &lt;ptr&gt;, i64 &lt;delta&gt; )
<h5>Syntax:</h5>
<p>
This is an overloaded intrinsic. You can use <tt>llvm.atomic.load.sub</tt> on
any integer bit width. Not all targets support all bit widths however.</p>
any integer bit width and for different address spaces. Not all targets
support all bit widths however.</p>
<pre>
declare i8 @llvm.atomic.load.sub.i8.( i8* &lt;ptr&gt;, i8 &lt;delta&gt; )
declare i16 @llvm.atomic.load.sub.i16.( i16* &lt;ptr&gt;, i16 &lt;delta&gt; )
declare i32 @llvm.atomic.load.sub.i32.( i32* &lt;ptr&gt;, i32 &lt;delta&gt; )
declare i64 @llvm.atomic.load.sub.i64.( i64* &lt;ptr&gt;, i64 &lt;delta&gt; )
declare i8 @llvm.atomic.load.sub.i8.p0i32( i8* &lt;ptr&gt;, i8 &lt;delta&gt; )
declare i16 @llvm.atomic.load.sub.i16.p0i32( i16* &lt;ptr&gt;, i16 &lt;delta&gt; )
declare i32 @llvm.atomic.load.sub.i32.p0i32( i32* &lt;ptr&gt;, i32 &lt;delta&gt; )
declare i64 @llvm.atomic.load.sub.i64.p0i32( i64* &lt;ptr&gt;, i64 &lt;delta&gt; )
</pre>
<h5>Overview:</h5>
@ -5992,11 +5994,11 @@ declare i64 @llvm.atomic.load.sub.i64.( i64* &lt;ptr&gt;, i64 &lt;delta&gt; )
<pre>
%ptr = malloc i32
store i32 8, %ptr
%result1 = call i32 @llvm.atomic.load.sub.i32( i32* %ptr, i32 4 )
%result1 = call i32 @llvm.atomic.load.sub.i32.p0i32( i32* %ptr, i32 4 )
<i>; yields {i32}:result1 = 8</i>
%result2 = call i32 @llvm.atomic.load.sub.i32( i32* %ptr, i32 2 )
%result2 = call i32 @llvm.atomic.load.sub.i32.p0i32( i32* %ptr, i32 2 )
<i>; yields {i32}:result2 = 4</i>
%result3 = call i32 @llvm.atomic.load.sub.i32( i32* %ptr, i32 5 )
%result3 = call i32 @llvm.atomic.load.sub.i32.p0i32( i32* %ptr, i32 5 )
<i>; yields {i32}:result3 = 2</i>
%memval1 = load i32* %ptr <i>; yields {i32}:memval1 = -3</i>
</pre>
@ -6015,37 +6017,37 @@ declare i64 @llvm.atomic.load.sub.i64.( i64* &lt;ptr&gt;, i64 &lt;delta&gt; )
<p>
These are overloaded intrinsics. You can use <tt>llvm.atomic.load_and</tt>,
<tt>llvm.atomic.load_nand</tt>, <tt>llvm.atomic.load_or</tt>, and
<tt>llvm.atomic.load_xor</tt> on any integer bit width. Not all targets
support all bit widths however.</p>
<tt>llvm.atomic.load_xor</tt> on any integer bit width and for different
address spaces. Not all targets support all bit widths however.</p>
<pre>
declare i8 @llvm.atomic.load.and.i8.( i8* &lt;ptr&gt;, i8 &lt;delta&gt; )
declare i16 @llvm.atomic.load.and.i16.( i16* &lt;ptr&gt;, i16 &lt;delta&gt; )
declare i32 @llvm.atomic.load.and.i32.( i32* &lt;ptr&gt;, i32 &lt;delta&gt; )
declare i64 @llvm.atomic.load.and.i64.( i64* &lt;ptr&gt;, i64 &lt;delta&gt; )
declare i8 @llvm.atomic.load.and.i8.p0i8( i8* &lt;ptr&gt;, i8 &lt;delta&gt; )
declare i16 @llvm.atomic.load.and.i16.p0i16( i16* &lt;ptr&gt;, i16 &lt;delta&gt; )
declare i32 @llvm.atomic.load.and.i32.p0i32( i32* &lt;ptr&gt;, i32 &lt;delta&gt; )
declare i64 @llvm.atomic.load.and.i64.p0i64( i64* &lt;ptr&gt;, i64 &lt;delta&gt; )
</pre>
<pre>
declare i8 @llvm.atomic.load.or.i8.( i8* &lt;ptr&gt;, i8 &lt;delta&gt; )
declare i16 @llvm.atomic.load.or.i16.( i16* &lt;ptr&gt;, i16 &lt;delta&gt; )
declare i32 @llvm.atomic.load.or.i32.( i32* &lt;ptr&gt;, i32 &lt;delta&gt; )
declare i64 @llvm.atomic.load.or.i64.( i64* &lt;ptr&gt;, i64 &lt;delta&gt; )
declare i8 @llvm.atomic.load.or.i8.p0i8( i8* &lt;ptr&gt;, i8 &lt;delta&gt; )
declare i16 @llvm.atomic.load.or.i16.p0i16( i16* &lt;ptr&gt;, i16 &lt;delta&gt; )
declare i32 @llvm.atomic.load.or.i32.p0i32( i32* &lt;ptr&gt;, i32 &lt;delta&gt; )
declare i64 @llvm.atomic.load.or.i64.p0i64( i64* &lt;ptr&gt;, i64 &lt;delta&gt; )
</pre>
<pre>
declare i8 @llvm.atomic.load.nand.i8.( i8* &lt;ptr&gt;, i8 &lt;delta&gt; )
declare i16 @llvm.atomic.load.nand.i16.( i16* &lt;ptr&gt;, i16 &lt;delta&gt; )
declare i32 @llvm.atomic.load.nand.i32.( i32* &lt;ptr&gt;, i32 &lt;delta&gt; )
declare i64 @llvm.atomic.load.nand.i64.( i64* &lt;ptr&gt;, i64 &lt;delta&gt; )
declare i8 @llvm.atomic.load.nand.i8.p0i32( i8* &lt;ptr&gt;, i8 &lt;delta&gt; )
declare i16 @llvm.atomic.load.nand.i16.p0i32( i16* &lt;ptr&gt;, i16 &lt;delta&gt; )
declare i32 @llvm.atomic.load.nand.i32.p0i32( i32* &lt;ptr&gt;, i32 &lt;delta&gt; )
declare i64 @llvm.atomic.load.nand.i64.p0i32( i64* &lt;ptr&gt;, i64 &lt;delta&gt; )
</pre>
<pre>
declare i8 @llvm.atomic.load.xor.i8.( i8* &lt;ptr&gt;, i8 &lt;delta&gt; )
declare i16 @llvm.atomic.load.xor.i16.( i16* &lt;ptr&gt;, i16 &lt;delta&gt; )
declare i32 @llvm.atomic.load.xor.i32.( i32* &lt;ptr&gt;, i32 &lt;delta&gt; )
declare i64 @llvm.atomic.load.xor.i64.( i64* &lt;ptr&gt;, i64 &lt;delta&gt; )
declare i8 @llvm.atomic.load.xor.i8.p0i32( i8* &lt;ptr&gt;, i8 &lt;delta&gt; )
declare i16 @llvm.atomic.load.xor.i16.p0i32( i16* &lt;ptr&gt;, i16 &lt;delta&gt; )
declare i32 @llvm.atomic.load.xor.i32.p0i32( i32* &lt;ptr&gt;, i32 &lt;delta&gt; )
declare i64 @llvm.atomic.load.xor.i64.p0i32( i64* &lt;ptr&gt;, i64 &lt;delta&gt; )
</pre>
<h5>Overview:</h5>
@ -6074,13 +6076,13 @@ declare i64 @llvm.atomic.load.xor.i64.( i64* &lt;ptr&gt;, i64 &lt;delta&gt; )
<pre>
%ptr = malloc i32
store i32 0x0F0F, %ptr
%result0 = call i32 @llvm.atomic.load.nand.i32( i32* %ptr, i32 0xFF )
%result0 = call i32 @llvm.atomic.load.nand.i32.p0i32( i32* %ptr, i32 0xFF )
<i>; yields {i32}:result0 = 0x0F0F</i>
%result1 = call i32 @llvm.atomic.load.and.i32( i32* %ptr, i32 0xFF )
%result1 = call i32 @llvm.atomic.load.and.i32.p0i32( i32* %ptr, i32 0xFF )
<i>; yields {i32}:result1 = 0xFFFFFFF0</i>
%result2 = call i32 @llvm.atomic.load.or.i32( i32* %ptr, i32 0F )
%result2 = call i32 @llvm.atomic.load.or.i32.p0i32( i32* %ptr, i32 0F )
<i>; yields {i32}:result2 = 0xF0</i>
%result3 = call i32 @llvm.atomic.load.xor.i32( i32* %ptr, i32 0F )
%result3 = call i32 @llvm.atomic.load.xor.i32.p0i32( i32* %ptr, i32 0F )
<i>; yields {i32}:result3 = FF</i>
%memval1 = load i32* %ptr <i>; yields {i32}:memval1 = F0</i>
</pre>
@ -6100,37 +6102,38 @@ declare i64 @llvm.atomic.load.xor.i64.( i64* &lt;ptr&gt;, i64 &lt;delta&gt; )
<p>
These are overloaded intrinsics. You can use <tt>llvm.atomic.load_max</tt>,
<tt>llvm.atomic.load_min</tt>, <tt>llvm.atomic.load_umax</tt>, and
<tt>llvm.atomic.load_umin</tt> on any integer bit width. Not all targets
<tt>llvm.atomic.load_umin</tt> on any integer bit width and for different
address spaces. Not all targets
support all bit widths however.</p>
<pre>
declare i8 @llvm.atomic.load.max.i8.( i8* &lt;ptr&gt;, i8 &lt;delta&gt; )
declare i16 @llvm.atomic.load.max.i16.( i16* &lt;ptr&gt;, i16 &lt;delta&gt; )
declare i32 @llvm.atomic.load.max.i32.( i32* &lt;ptr&gt;, i32 &lt;delta&gt; )
declare i64 @llvm.atomic.load.max.i64.( i64* &lt;ptr&gt;, i64 &lt;delta&gt; )
declare i8 @llvm.atomic.load.max.i8.p0i8( i8* &lt;ptr&gt;, i8 &lt;delta&gt; )
declare i16 @llvm.atomic.load.max.i16.p0i16( i16* &lt;ptr&gt;, i16 &lt;delta&gt; )
declare i32 @llvm.atomic.load.max.i32.p0i32( i32* &lt;ptr&gt;, i32 &lt;delta&gt; )
declare i64 @llvm.atomic.load.max.i64.p0i64( i64* &lt;ptr&gt;, i64 &lt;delta&gt; )
</pre>
<pre>
declare i8 @llvm.atomic.load.min.i8.( i8* &lt;ptr&gt;, i8 &lt;delta&gt; )
declare i16 @llvm.atomic.load.min.i16.( i16* &lt;ptr&gt;, i16 &lt;delta&gt; )
declare i32 @llvm.atomic.load.min.i32.( i32* &lt;ptr&gt;, i32 &lt;delta&gt; )
declare i64 @llvm.atomic.load.min.i64.( i64* &lt;ptr&gt;, i64 &lt;delta&gt; )
declare i8 @llvm.atomic.load.min.i8.p0i8( i8* &lt;ptr&gt;, i8 &lt;delta&gt; )
declare i16 @llvm.atomic.load.min.i16.p0i16( i16* &lt;ptr&gt;, i16 &lt;delta&gt; )
declare i32 @llvm.atomic.load.min.i32..p0i32( i32* &lt;ptr&gt;, i32 &lt;delta&gt; )
declare i64 @llvm.atomic.load.min.i64..p0i64( i64* &lt;ptr&gt;, i64 &lt;delta&gt; )
</pre>
<pre>
declare i8 @llvm.atomic.load.umax.i8.( i8* &lt;ptr&gt;, i8 &lt;delta&gt; )
declare i16 @llvm.atomic.load.umax.i16.( i16* &lt;ptr&gt;, i16 &lt;delta&gt; )
declare i32 @llvm.atomic.load.umax.i32.( i32* &lt;ptr&gt;, i32 &lt;delta&gt; )
declare i64 @llvm.atomic.load.umax.i64.( i64* &lt;ptr&gt;, i64 &lt;delta&gt; )
declare i8 @llvm.atomic.load.umax.i8.p0i8( i8* &lt;ptr&gt;, i8 &lt;delta&gt; )
declare i16 @llvm.atomic.load.umax.i16.p0i16( i16* &lt;ptr&gt;, i16 &lt;delta&gt; )
declare i32 @llvm.atomic.load.umax.i32.p0i32( i32* &lt;ptr&gt;, i32 &lt;delta&gt; )
declare i64 @llvm.atomic.load.umax.i64.p0i64( i64* &lt;ptr&gt;, i64 &lt;delta&gt; )
</pre>
<pre>
declare i8 @llvm.atomic.load.umin.i8.( i8* &lt;ptr&gt;, i8 &lt;delta&gt; )
declare i16 @llvm.atomic.load.umin.i16.( i16* &lt;ptr&gt;, i16 &lt;delta&gt; )
declare i32 @llvm.atomic.load.umin.i32.( i32* &lt;ptr&gt;, i32 &lt;delta&gt; )
declare i64 @llvm.atomic.load.umin.i64.( i64* &lt;ptr&gt;, i64 &lt;delta&gt; )
declare i8 @llvm.atomic.load.umin.i8..p0i8( i8* &lt;ptr&gt;, i8 &lt;delta&gt; )
declare i16 @llvm.atomic.load.umin.i16.p0i16( i16* &lt;ptr&gt;, i16 &lt;delta&gt; )
declare i32 @llvm.atomic.load.umin.i32..p0i32( i32* &lt;ptr&gt;, i32 &lt;delta&gt; )
declare i64 @llvm.atomic.load.umin.i64..p0i64( i64* &lt;ptr&gt;, i64 &lt;delta&gt; )
</pre>
<h5>Overview:</h5>
@ -6159,13 +6162,13 @@ declare i64 @llvm.atomic.load.umin.i64.( i64* &lt;ptr&gt;, i64 &lt;delta&gt; )
<pre>
%ptr = malloc i32
store i32 7, %ptr
%result0 = call i32 @llvm.atomic.load.min.i32( i32* %ptr, i32 -2 )
%result0 = call i32 @llvm.atomic.load.min.i32.p0i32( i32* %ptr, i32 -2 )
<i>; yields {i32}:result0 = 7</i>
%result1 = call i32 @llvm.atomic.load.max.i32( i32* %ptr, i32 8 )
%result1 = call i32 @llvm.atomic.load.max.i32.p0i32( i32* %ptr, i32 8 )
<i>; yields {i32}:result1 = -2</i>
%result2 = call i32 @llvm.atomic.load.umin.i32( i32* %ptr, i32 10 )
%result2 = call i32 @llvm.atomic.load.umin.i32.p0i32( i32* %ptr, i32 10 )
<i>; yields {i32}:result2 = 8</i>
%result3 = call i32 @llvm.atomic.load.umax.i32( i32* %ptr, i32 30 )
%result3 = call i32 @llvm.atomic.load.umax.i32.p0i32( i32* %ptr, i32 30 )
<i>; yields {i32}:result3 = 8</i>
%memval1 = load i32* %ptr <i>; yields {i32}:memval1 = 30</i>
</pre>

View File

@ -70,6 +70,11 @@ namespace llvm {
LAST_VALUETYPE = 27, // This always remains at the end of the list.
// iPTRAny - An int value the size of the pointer of the current
// target to any address space. This must only be used internal to
// tblgen. Other than for overloading, we treat iPTRAny the same as iPTR.
iPTRAny = 252,
// fAny - Any floating-point or vector floating-point value. This is used
// for intrinsics that have overloadings based on floating-point types.
// This is only for tblgen's consumption!

View File

@ -49,6 +49,10 @@ def v3f32 : ValueType<96 , 24>; // 3 x f32 vector value
def v4f32 : ValueType<128, 25>; // 4 x f32 vector value
def v2f64 : ValueType<128, 26>; // 2 x f64 vector value
// Pseudo valuetype mapped to the current pointer size to any address space.
// Should only be used in TableGen.
def iPTRAny : ValueType<0, 252>;
// Pseudo valuetype to represent "float of any format"
def fAny : ValueType<0 , 253>;

View File

@ -64,6 +64,11 @@ class LLVMPointerType<LLVMType elty>
LLVMType ElTy = elty;
}
class LLVMAnyPointerType<LLVMType elty>
: LLVMType<iPTRAny>{
LLVMType ElTy = elty;
}
class LLVMMatchType<int num>
: LLVMType<OtherVT>{
int Number = num;
@ -84,6 +89,7 @@ def llvm_f128_ty : LLVMType<f128>;
def llvm_ppcf128_ty : LLVMType<ppcf128>;
def llvm_ptr_ty : LLVMPointerType<llvm_i8_ty>; // i8*
def llvm_ptrptr_ty : LLVMPointerType<llvm_ptr_ty>; // i8**
def llvm_anyptr_ty : LLVMPointerType<llvm_i8_ty>; // (space)i8*
def llvm_empty_ty : LLVMType<OtherVT>; // { }
def llvm_descriptor_ty : LLVMPointerType<llvm_empty_ty>; // { }*
@ -271,62 +277,62 @@ def int_memory_barrier : Intrinsic<[llvm_void_ty, llvm_i1_ty, llvm_i1_ty,
llvm_i1_ty, llvm_i1_ty, llvm_i1_ty], []>;
def int_atomic_cmp_swap : Intrinsic<[llvm_anyint_ty,
LLVMPointerType<LLVMMatchType<0>>,
LLVMAnyPointerType<LLVMMatchType<0>>,
LLVMMatchType<0>, LLVMMatchType<0>],
[IntrWriteArgMem]>,
GCCBuiltin<"__sync_val_compare_and_swap">;
def int_atomic_load_add : Intrinsic<[llvm_anyint_ty,
LLVMPointerType<LLVMMatchType<0>>,
LLVMAnyPointerType<LLVMMatchType<0>>,
LLVMMatchType<0>],
[IntrWriteArgMem]>,
GCCBuiltin<"__sync_fetch_and_add">;
def int_atomic_swap : Intrinsic<[llvm_anyint_ty,
LLVMPointerType<LLVMMatchType<0>>,
LLVMAnyPointerType<LLVMMatchType<0>>,
LLVMMatchType<0>],
[IntrWriteArgMem]>,
GCCBuiltin<"__sync_lock_test_and_set">;
def int_atomic_load_sub : Intrinsic<[llvm_anyint_ty,
LLVMPointerType<LLVMMatchType<0>>,
LLVMAnyPointerType<LLVMMatchType<0>>,
LLVMMatchType<0>],
[IntrWriteArgMem]>,
GCCBuiltin<"__sync_fetch_and_sub">;
def int_atomic_load_and : Intrinsic<[llvm_anyint_ty,
LLVMPointerType<LLVMMatchType<0>>,
LLVMAnyPointerType<LLVMMatchType<0>>,
LLVMMatchType<0>],
[IntrWriteArgMem]>,
GCCBuiltin<"__sync_fetch_and_and">;
def int_atomic_load_or : Intrinsic<[llvm_anyint_ty,
LLVMPointerType<LLVMMatchType<0>>,
LLVMAnyPointerType<LLVMMatchType<0>>,
LLVMMatchType<0>],
[IntrWriteArgMem]>,
GCCBuiltin<"__sync_fetch_and_or">;
def int_atomic_load_xor : Intrinsic<[llvm_anyint_ty,
LLVMPointerType<LLVMMatchType<0>>,
LLVMAnyPointerType<LLVMMatchType<0>>,
LLVMMatchType<0>],
[IntrWriteArgMem]>,
GCCBuiltin<"__sync_fetch_and_xor">;
def int_atomic_load_nand : Intrinsic<[llvm_anyint_ty,
LLVMPointerType<LLVMMatchType<0>>,
LLVMAnyPointerType<LLVMMatchType<0>>,
LLVMMatchType<0>],
[IntrWriteArgMem]>,
GCCBuiltin<"__sync_fetch_and_nand">;
def int_atomic_load_min : Intrinsic<[llvm_anyint_ty,
LLVMPointerType<LLVMMatchType<0>>,
LLVMAnyPointerType<LLVMMatchType<0>>,
LLVMMatchType<0>],
[IntrWriteArgMem]>,
GCCBuiltin<"__sync_fetch_and_min">;
def int_atomic_load_max : Intrinsic<[llvm_anyint_ty,
LLVMPointerType<LLVMMatchType<0>>,
LLVMAnyPointerType<LLVMMatchType<0>>,
LLVMMatchType<0>],
[IntrWriteArgMem]>,
GCCBuiltin<"__sync_fetch_and_max">;
def int_atomic_load_umin : Intrinsic<[llvm_anyint_ty,
LLVMPointerType<LLVMMatchType<0>>,
LLVMAnyPointerType<LLVMMatchType<0>>,
LLVMMatchType<0>],
[IntrWriteArgMem]>,
GCCBuiltin<"__sync_fetch_and_umin">;
def int_atomic_load_umax : Intrinsic<[llvm_anyint_ty,
LLVMPointerType<LLVMMatchType<0>>,
LLVMAnyPointerType<LLVMMatchType<0>>,
LLVMMatchType<0>],
[IntrWriteArgMem]>,
GCCBuiltin<"__sync_fetch_and_umax">;

View File

@ -40,24 +40,38 @@ static bool UpgradeIntrinsicFunction1(Function *F, Function *&NewFn) {
switch (Name[5]) {
default: break;
case 'a':
// This upgrades the llvm.atomic.lcs, llvm.atomic.las, and llvm.atomic.lss
// to their new function name
if (Name.compare(5,8,"atomic.l",8) == 0) {
// This upgrades the llvm.atomic.lcs, llvm.atomic.las, llvm.atomic.lss,
// and atomics with default address spaces to their new names to their new
// function name (e.g. llvm.atomic.add.i32 => llvm.atomic.add.i32.p0i32)
if (Name.compare(5,7,"atomic.",7) == 0) {
if (Name.compare(12,3,"lcs",3) == 0) {
std::string::size_type delim = Name.find('.',12);
F->setName("llvm.atomic.cmp.swap"+Name.substr(delim));
F->setName("llvm.atomic.cmp.swap" + Name.substr(delim) +
".p0" + Name.substr(delim+1));
NewFn = F;
return true;
}
else if (Name.compare(12,3,"las",3) == 0) {
std::string::size_type delim = Name.find('.',12);
F->setName("llvm.atomic.load.add"+Name.substr(delim));
F->setName("llvm.atomic.load.add"+Name.substr(delim)
+ ".p0" + Name.substr(delim+1));
NewFn = F;
return true;
}
else if (Name.compare(12,3,"lss",3) == 0) {
std::string::size_type delim = Name.find('.',12);
F->setName("llvm.atomic.load.sub"+Name.substr(delim));
F->setName("llvm.atomic.load.sub"+Name.substr(delim)
+ ".p0" + Name.substr(delim+1));
NewFn = F;
return true;
}
else if (Name.rfind(".p") == std::string::npos) {
// We don't have an address space qualifier so this has be upgraded
// to the new name. Copy the type name at the end of the intrinsic
// and add to it
std::string::size_type delim = Name.find_last_of('.');
assert(delim != std::string::npos && "can not find type");
F->setName(Name + ".p0" + Name.substr(delim+1));
NewFn = F;
return true;
}

View File

@ -328,9 +328,14 @@ std::string Intrinsic::getName(ID id, const Type **Tys, unsigned numTys) {
if (numTys == 0)
return Table[id];
std::string Result(Table[id]);
for (unsigned i = 0; i < numTys; ++i)
if (Tys[i])
for (unsigned i = 0; i < numTys; ++i) {
if (const PointerType* PTyp = dyn_cast<PointerType>(Tys[i])) {
Result += ".p" + llvm::utostr(PTyp->getAddressSpace()) +
MVT::getMVT(PTyp->getElementType()).getMVTString();
}
else if (Tys[i])
Result += "." + MVT::getMVT(Tys[i]).getMVTString();
}
return Result;
}

View File

@ -1327,7 +1327,6 @@ void Verifier::VerifyIntrinsicPrototype(Intrinsic::ID ID,
unsigned Count, ...) {
va_list VA;
va_start(VA, Count);
const FunctionType *FTy = F->getFunctionType();
// For overloaded intrinsics, the Suffix of the function name must match the
@ -1423,6 +1422,21 @@ void Verifier::VerifyIntrinsicPrototype(Intrinsic::ID ID,
else
CheckFailed("Intrinsic parameter #" + utostr(ArgNo-1) + " is not a "
"pointer and a pointer is required.", F);
}
} else if (VT == MVT::iPTRAny) {
// Outside of TableGen, we don't distinguish iPTRAny (to any address
// space) and iPTR. In the verifier, we can not distinguish which case
// we have so allow either case to be legal.
if (const PointerType* PTyp = dyn_cast<PointerType>(Ty)) {
Suffix += ".p" + utostr(PTyp->getAddressSpace()) +
MVT::getMVT(PTyp->getElementType()).getMVTString();
} else {
if (ArgNo == 0)
CheckFailed("Intrinsic result type is not a "
"pointer and a pointer is required.", F);
else
CheckFailed("Intrinsic parameter #" + utostr(ArgNo-1) + " is not a "
"pointer and a pointer is required.", F);
break;
}
} else if (MVT((MVT::SimpleValueType)VT).isVector()) {
@ -1456,17 +1470,21 @@ void Verifier::VerifyIntrinsicPrototype(Intrinsic::ID ID,
va_end(VA);
// If we computed a Suffix then the intrinsic is overloaded and we need to
// make sure that the name of the function is correct. We add the suffix to
// the name of the intrinsic and compare against the given function name. If
// they are not the same, the function name is invalid. This ensures that
// overloading of intrinsics uses a sane and consistent naming convention.
// For intrinsics without pointer arguments, if we computed a Suffix then the
// intrinsic is overloaded and we need to make sure that the name of the
// function is correct. We add the suffix to the name of the intrinsic and
// compare against the given function name. If they are not the same, the
// function name is invalid. This ensures that overloading of intrinsics
// uses a sane and consistent naming convention. Note that intrinsics with
// pointer argument may or may not be overloaded so we will check assuming it
// has a suffix and not.
if (!Suffix.empty()) {
std::string Name(Intrinsic::getName(ID));
if (Name + Suffix != F->getName())
if (Name + Suffix != F->getName()) {
CheckFailed("Overloaded intrinsic has incorrect suffix: '" +
F->getName().substr(Name.length()) + "'. It should be '" +
Suffix + "'", F);
}
}
// Check parameter attributes.

View File

@ -29,65 +29,65 @@ entry:
store i32 3855, i32* %xort
store i32 4, i32* %temp
%tmp = load i32* %temp ; <i32> [#uses=1]
call i32 @llvm.atomic.load.add.i32( i32* %val1, i32 %tmp ) ; <i32>:0 [#uses=1]
call i32 @llvm.atomic.load.add.i32.p0i32( i32* %val1, i32 %tmp ) ; <i32>:0 [#uses=1]
store i32 %0, i32* %old
call i32 @llvm.atomic.load.sub.i32( i32* %val2, i32 30 ) ; <i32>:1 [#uses=1]
call i32 @llvm.atomic.load.sub.i32.p0i32( i32* %val2, i32 30 ) ; <i32>:1 [#uses=1]
store i32 %1, i32* %old
call i32 @llvm.atomic.load.add.i32( i32* %val2, i32 1 ) ; <i32>:2 [#uses=1]
call i32 @llvm.atomic.load.add.i32.p0i32( i32* %val2, i32 1 ) ; <i32>:2 [#uses=1]
store i32 %2, i32* %old
call i32 @llvm.atomic.load.sub.i32( i32* %val2, i32 1 ) ; <i32>:3 [#uses=1]
call i32 @llvm.atomic.load.sub.i32.p0i32( i32* %val2, i32 1 ) ; <i32>:3 [#uses=1]
store i32 %3, i32* %old
call i32 @llvm.atomic.load.and.i32( i32* %andt, i32 4080 ) ; <i32>:4 [#uses=1]
call i32 @llvm.atomic.load.and.i32.p0i32( i32* %andt, i32 4080 ) ; <i32>:4 [#uses=1]
store i32 %4, i32* %old
call i32 @llvm.atomic.load.or.i32( i32* %ort, i32 4080 ) ; <i32>:5 [#uses=1]
call i32 @llvm.atomic.load.or.i32.p0i32( i32* %ort, i32 4080 ) ; <i32>:5 [#uses=1]
store i32 %5, i32* %old
call i32 @llvm.atomic.load.xor.i32( i32* %xort, i32 4080 ) ; <i32>:6 [#uses=1]
call i32 @llvm.atomic.load.xor.i32.p0i32( i32* %xort, i32 4080 ) ; <i32>:6 [#uses=1]
store i32 %6, i32* %old
call i32 @llvm.atomic.load.min.i32( i32* %val2, i32 16 ) ; <i32>:7 [#uses=1]
call i32 @llvm.atomic.load.min.i32.p0i32( i32* %val2, i32 16 ) ; <i32>:7 [#uses=1]
store i32 %7, i32* %old
%neg = sub i32 0, 1 ; <i32> [#uses=1]
call i32 @llvm.atomic.load.min.i32( i32* %val2, i32 %neg ) ; <i32>:8 [#uses=1]
call i32 @llvm.atomic.load.min.i32.p0i32( i32* %val2, i32 %neg ) ; <i32>:8 [#uses=1]
store i32 %8, i32* %old
call i32 @llvm.atomic.load.max.i32( i32* %val2, i32 1 ) ; <i32>:9 [#uses=1]
call i32 @llvm.atomic.load.max.i32.p0i32( i32* %val2, i32 1 ) ; <i32>:9 [#uses=1]
store i32 %9, i32* %old
call i32 @llvm.atomic.load.max.i32( i32* %val2, i32 0 ) ; <i32>:10 [#uses=1]
call i32 @llvm.atomic.load.max.i32.p0i32( i32* %val2, i32 0 ) ; <i32>:10 [#uses=1]
store i32 %10, i32* %old
call i32 @llvm.atomic.load.umax.i32( i32* %val2, i32 65535 ) ; <i32>:11 [#uses=1]
call i32 @llvm.atomic.load.umax.i32.p0i32( i32* %val2, i32 65535 ) ; <i32>:11 [#uses=1]
store i32 %11, i32* %old
call i32 @llvm.atomic.load.umax.i32( i32* %val2, i32 10 ) ; <i32>:12 [#uses=1]
call i32 @llvm.atomic.load.umax.i32.p0i32( i32* %val2, i32 10 ) ; <i32>:12 [#uses=1]
store i32 %12, i32* %old
call i32 @llvm.atomic.load.umin.i32( i32* %val2, i32 1 ) ; <i32>:13 [#uses=1]
call i32 @llvm.atomic.load.umin.i32.p0i32( i32* %val2, i32 1 ) ; <i32>:13 [#uses=1]
store i32 %13, i32* %old
call i32 @llvm.atomic.load.umin.i32( i32* %val2, i32 10 ) ; <i32>:14 [#uses=1]
call i32 @llvm.atomic.load.umin.i32.p0i32( i32* %val2, i32 10 ) ; <i32>:14 [#uses=1]
store i32 %14, i32* %old
call i32 @llvm.atomic.swap.i32( i32* %val2, i32 1976 ) ; <i32>:15 [#uses=1]
call i32 @llvm.atomic.swap.i32.p0i32( i32* %val2, i32 1976 ) ; <i32>:15 [#uses=1]
store i32 %15, i32* %old
%neg1 = sub i32 0, 10 ; <i32> [#uses=1]
call i32 @llvm.atomic.cmp.swap.i32( i32* %val2, i32 %neg1, i32 1 ) ; <i32>:16 [#uses=1]
call i32 @llvm.atomic.cmp.swap.i32.p0i32( i32* %val2, i32 %neg1, i32 1 ) ; <i32>:16 [#uses=1]
store i32 %16, i32* %old
call i32 @llvm.atomic.cmp.swap.i32( i32* %val2, i32 1976, i32 1 ) ; <i32>:17 [#uses=1]
call i32 @llvm.atomic.cmp.swap.i32.p0i32( i32* %val2, i32 1976, i32 1 ) ; <i32>:17 [#uses=1]
store i32 %17, i32* %old
ret void
}
declare i32 @llvm.atomic.load.add.i32(i32*, i32) nounwind
declare i32 @llvm.atomic.load.add.i32.p0i32(i32*, i32) nounwind
declare i32 @llvm.atomic.load.sub.i32(i32*, i32) nounwind
declare i32 @llvm.atomic.load.sub.i32.p0i32(i32*, i32) nounwind
declare i32 @llvm.atomic.load.and.i32(i32*, i32) nounwind
declare i32 @llvm.atomic.load.and.i32.p0i32(i32*, i32) nounwind
declare i32 @llvm.atomic.load.or.i32(i32*, i32) nounwind
declare i32 @llvm.atomic.load.or.i32.p0i32(i32*, i32) nounwind
declare i32 @llvm.atomic.load.xor.i32(i32*, i32) nounwind
declare i32 @llvm.atomic.load.xor.i32.p0i32(i32*, i32) nounwind
declare i32 @llvm.atomic.load.min.i32(i32*, i32) nounwind
declare i32 @llvm.atomic.load.min.i32.p0i32(i32*, i32) nounwind
declare i32 @llvm.atomic.load.max.i32(i32*, i32) nounwind
declare i32 @llvm.atomic.load.max.i32.p0i32(i32*, i32) nounwind
declare i32 @llvm.atomic.load.umax.i32(i32*, i32) nounwind
declare i32 @llvm.atomic.load.umax.i32.p0i32(i32*, i32) nounwind
declare i32 @llvm.atomic.load.umin.i32(i32*, i32) nounwind
declare i32 @llvm.atomic.load.umin.i32.p0i32(i32*, i32) nounwind
declare i32 @llvm.atomic.swap.i32(i32*, i32) nounwind
declare i32 @llvm.atomic.swap.i32.p0i32(i32*, i32) nounwind
declare i32 @llvm.atomic.cmp.swap.i32(i32*, i32, i32) nounwind
declare i32 @llvm.atomic.cmp.swap.i32.p0i32(i32*, i32, i32) nounwind

View File

@ -443,8 +443,8 @@ bool TreePatternNode::UpdateNodeType(const std::vector<unsigned char> &ExtVTs,
return true;
}
if (getExtTypeNum(0) == MVT::iPTR) {
if (ExtVTs[0] == MVT::iPTR || ExtVTs[0] == EMVT::isInt)
if (getExtTypeNum(0) == MVT::iPTR || getExtTypeNum(0) == MVT::iPTRAny) {
if (ExtVTs[0] == MVT::iPTR || ExtVTs[0] == MVT::iPTRAny || ExtVTs[0] == EMVT::isInt)
return false;
if (EMVT::isExtIntegerInVTs(ExtVTs)) {
std::vector<unsigned char> FVTs = FilterEVTs(ExtVTs, isInteger);
@ -463,7 +463,8 @@ bool TreePatternNode::UpdateNodeType(const std::vector<unsigned char> &ExtVTs,
setTypes(FVTs);
return true;
}
if (ExtVTs[0] == MVT::iPTR && EMVT::isExtIntegerInVTs(getExtTypes())) {
if ((ExtVTs[0] == MVT::iPTR || ExtVTs[0] == MVT::iPTRAny) &&
EMVT::isExtIntegerInVTs(getExtTypes())) {
//assert(hasTypeSet() && "should be handled above!");
std::vector<unsigned char> FVTs = FilterEVTs(getExtTypes(), isInteger);
if (getExtTypes() == FVTs)
@ -495,7 +496,8 @@ bool TreePatternNode::UpdateNodeType(const std::vector<unsigned char> &ExtVTs,
setTypes(ExtVTs);
return true;
}
if (getExtTypeNum(0) == EMVT::isInt && ExtVTs[0] == MVT::iPTR) {
if (getExtTypeNum(0) == EMVT::isInt &&
(ExtVTs[0] == MVT::iPTR || ExtVTs[0] == MVT::iPTRAny)) {
setTypes(ExtVTs);
return true;
}
@ -527,6 +529,7 @@ void TreePatternNode::print(std::ostream &OS) const {
case EMVT::isFP : OS << ":isFP"; break;
case EMVT::isUnknown: ; /*OS << ":?";*/ break;
case MVT::iPTR: OS << ":iPTR"; break;
case MVT::iPTRAny: OS << ":iPTRAny"; break;
default: {
std::string VTName = llvm::getName(getTypeNum(0));
// Strip off MVT:: prefix if present.
@ -781,7 +784,7 @@ bool TreePatternNode::ApplyTypeConstraints(TreePattern &TP, bool NotRegisters) {
assert(getTypeNum(i) == VT && "TreePattern has too many types!");
VT = getTypeNum(0);
if (VT != MVT::iPTR) {
if (VT != MVT::iPTR && VT != MVT::iPTRAny) {
unsigned Size = MVT(VT).getSizeInBits();
// Make sure that the value is representable for this type.
if (Size < 32) {

View File

@ -182,13 +182,14 @@ public:
bool isLeaf() const { return Val != 0; }
bool hasTypeSet() const {
return (Types[0] < MVT::LAST_VALUETYPE) || (Types[0] == MVT::iPTR);
return (Types[0] < MVT::LAST_VALUETYPE) || (Types[0] == MVT::iPTR) ||
(Types[0] == MVT::iPTRAny);
}
bool isTypeCompletelyUnknown() const {
return Types[0] == EMVT::isUnknown;
}
bool isTypeDynamicallyResolved() const {
return Types[0] == MVT::iPTR;
return (Types[0] == MVT::iPTR) || (Types[0] == MVT::iPTRAny);
}
MVT::SimpleValueType getTypeNum(unsigned Num) const {
assert(hasTypeSet() && "Doesn't have a type yet!");

View File

@ -65,6 +65,7 @@ std::string llvm::getName(MVT::SimpleValueType T) {
case MVT::v3i32: return "MVT::v3i32";
case MVT::v3f32: return "MVT::v3f32";
case MVT::iPTR: return "TLI.getPointerTy()";
case MVT::iPTRAny: return "TLI.getPointerTy()";
default: assert(0 && "ILLEGAL VALUE TYPE!"); return "";
}
}
@ -101,6 +102,7 @@ std::string llvm::getEnumName(MVT::SimpleValueType T) {
case MVT::v3i32: return "MVT::v3i32";
case MVT::v3f32: return "MVT::v3f32";
case MVT::iPTR: return "MVT::iPTR";
case MVT::iPTRAny: return "MVT::iPTRAny";
default: assert(0 && "ILLEGAL VALUE TYPE!"); return "";
}
}
@ -459,7 +461,7 @@ CodeGenIntrinsic::CodeGenIntrinsic(Record *R) {
Record *TyEl = TypeList->getElementAsRecord(i);
assert(TyEl->isSubClassOf("LLVMType") && "Expected a type!");
MVT::SimpleValueType VT = getValueType(TyEl->getValueAsDef("VT"));
isOverloaded |= VT == MVT::iAny || VT == MVT::fAny;
isOverloaded |= VT == MVT::iAny || VT == MVT::fAny || VT == MVT::iPTRAny;
ArgVTs.push_back(VT);
ArgTypeDefs.push_back(TyEl);
}

View File

@ -56,7 +56,8 @@ static unsigned getPatternSize(TreePatternNode *P, CodeGenDAGPatterns &CGP) {
EMVT::isExtFloatingPointInVTs(P->getExtTypes()) ||
P->getExtTypeNum(0) == MVT::isVoid ||
P->getExtTypeNum(0) == MVT::Flag ||
P->getExtTypeNum(0) == MVT::iPTR) &&
P->getExtTypeNum(0) == MVT::iPTR ||
P->getExtTypeNum(0) == MVT::iPTRAny) &&
"Not a valid pattern node to size!");
unsigned Size = 3; // The node itself.
// If the root node is a ConstantSDNode, increases its size.
@ -1828,6 +1829,8 @@ void DAGISelEmitter::EmitInstructionSelector(std::ostream &OS) {
std::string OpVTStr;
if (OpVT == MVT::iPTR) {
OpVTStr = "_iPTR";
} else if (OpVT == MVT::iPTRAny) {
OpVTStr = "_iPTRAny";
} else if (OpVT == MVT::isVoid) {
// Nodes with a void result actually have a first result type of either
// Other (a chain) or Flag. Since there is no one-to-one mapping from

View File

@ -162,6 +162,14 @@ static void EmitTypeGenerate(std::ostream &OS, Record *ArgType,
OS << "PointerType::getUnqual(";
EmitTypeGenerate(OS, ArgType->getValueAsDef("ElTy"), ArgNo);
OS << ")";
} else if (VT == MVT::iPTRAny) {
// Make sure the user has passed us an argument type to overload. If not,
// treat it as an ordinary (not overloaded) intrinsic.
OS << "(" << ArgNo << " < numTys) ? Tys[" << ArgNo
<< "] : PointerType::getUnqual(";
EmitTypeGenerate(OS, ArgType->getValueAsDef("ElTy"), ArgNo);
OS << ")";
++ArgNo;
} else if (VT == MVT::isVoid) {
if (ArgNo == 0)
OS << "Type::VoidTy";