2006-03-27 07:04:16 +00:00
|
|
|
//===- README_ALTIVEC.txt - Notes for improving Altivec code gen ----------===//
|
|
|
|
|
|
|
|
Implement PPCInstrInfo::isLoadFromStackSlot/isStoreToStackSlot for vector
|
|
|
|
registers, to generate better spill code.
|
|
|
|
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
|
|
|
Altivec support. The first should be a single lvx from the constant pool, the
|
|
|
|
second should be a xor/stvx:
|
|
|
|
|
|
|
|
void foo(void) {
|
2006-04-06 23:16:19 +00:00
|
|
|
int x[8] __attribute__((aligned(128))) = { 1, 1, 1, 17, 1, 1, 1, 1 };
|
2006-03-27 07:04:16 +00:00
|
|
|
bar (x);
|
|
|
|
}
|
|
|
|
|
|
|
|
#include <string.h>
|
|
|
|
void foo(void) {
|
|
|
|
int x[8] __attribute__((aligned(128)));
|
|
|
|
memset (x, 0, sizeof (x));
|
|
|
|
bar (x);
|
|
|
|
}
|
|
|
|
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
|
|
|
Altivec: Codegen'ing MUL with vector FMADD should add -0.0, not 0.0:
|
|
|
|
http://gcc.gnu.org/bugzilla/show_bug.cgi?id=8763
|
|
|
|
|
|
|
|
When -ffast-math is on, we can use 0.0.
|
|
|
|
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
|
|
|
Consider this:
|
|
|
|
v4f32 Vector;
|
|
|
|
v4f32 Vector2 = { Vector.X, Vector.X, Vector.X, Vector.X };
|
|
|
|
|
|
|
|
Since we know that "Vector" is 16-byte aligned and we know the element offset
|
|
|
|
of ".X", we should change the load into a lve*x instruction, instead of doing
|
|
|
|
a load/store/lve*x sequence.
|
|
|
|
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
|
|
|
There are a wide range of vector constants we can generate with combinations of
|
2006-03-27 07:41:00 +00:00
|
|
|
altivec instructions. Examples
|
|
|
|
GCC does: "t=vsplti*, r = t+t" for constants it can't generate with one vsplti
|
|
|
|
|
Add a new way to match vector constants, which make it easier to bang bits of
different types.
Codegen spltw(0x7FFFFFFF) and spltw(0x80000000) without a constant pool load,
implementing PowerPC/vec_constants.ll:test1. This compiles:
typedef float vf __attribute__ ((vector_size (16)));
typedef int vi __attribute__ ((vector_size (16)));
void test(vi *P1, vi *P2, vf *P3) {
*P1 &= (vi){0x80000000,0x80000000,0x80000000,0x80000000};
*P2 &= (vi){0x7FFFFFFF,0x7FFFFFFF,0x7FFFFFFF,0x7FFFFFFF};
*P3 = vec_abs((vector float)*P3);
}
to:
_test:
mfspr r2, 256
oris r6, r2, 49152
mtspr 256, r6
vspltisw v0, -1
vslw v0, v0, v0
lvx v1, 0, r3
vand v1, v1, v0
stvx v1, 0, r3
lvx v1, 0, r4
vandc v1, v1, v0
stvx v1, 0, r4
lvx v1, 0, r5
vandc v0, v1, v0
stvx v0, 0, r5
mtspr 256, r2
blr
instead of (with two constant pool entries):
_test:
mfspr r2, 256
oris r6, r2, 49152
mtspr 256, r6
li r6, lo16(LCPI1_0)
lis r7, ha16(LCPI1_0)
li r8, lo16(LCPI1_1)
lis r9, ha16(LCPI1_1)
lvx v0, r7, r6
lvx v1, 0, r3
vand v0, v1, v0
stvx v0, 0, r3
lvx v0, r9, r8
lvx v1, 0, r4
vand v1, v1, v0
stvx v1, 0, r4
lvx v1, 0, r5
vand v0, v1, v0
stvx v0, 0, r5
mtspr 256, r2
blr
GCC produces (with 2 cp entries):
_test:
mfspr r0,256
stw r0,-4(r1)
oris r0,r0,0xc00c
mtspr 256,r0
lis r2,ha16(LC0)
lis r9,ha16(LC1)
la r2,lo16(LC0)(r2)
lvx v0,0,r3
lvx v1,0,r5
la r9,lo16(LC1)(r9)
lwz r12,-4(r1)
lvx v12,0,r2
lvx v13,0,r9
vand v0,v0,v12
stvx v0,0,r3
vspltisw v0,-1
vslw v12,v0,v0
vandc v1,v1,v12
stvx v1,0,r5
lvx v0,0,r4
vand v0,v0,v13
stvx v0,0,r4
mtspr 256,r12
blr
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@27624 91177308-0d34-0410-b5e6-96231b3b80d8
2006-04-12 19:07:14 +00:00
|
|
|
This should be added to the ISD::BUILD_VECTOR case in
|
|
|
|
PPCTargetLowering::LowerOperation.
|
2006-03-27 07:41:00 +00:00
|
|
|
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
|
|
|
FABS/FNEG can be codegen'd with the appropriate and/xor of -0.0.
|
2006-03-27 07:04:16 +00:00
|
|
|
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
2006-04-04 22:43:55 +00:00
|
|
|
Codegen the constant here with something better than a constant pool load.
|
|
|
|
|
|
|
|
void %test_f(<4 x float>* %P, <4 x float>* %Q, float %X) {
|
|
|
|
%tmp = load <4 x float>* %Q
|
|
|
|
%tmp = cast <4 x float> %tmp to <4 x int>
|
|
|
|
%tmp1 = and <4 x int> %tmp, < int 2147483647, int 2147483647, int 2147483647, int 2147483647 >
|
|
|
|
%tmp2 = cast <4 x int> %tmp1 to <4 x float>
|
|
|
|
store <4 x float> %tmp2, <4 x float>* %P
|
|
|
|
ret void
|
|
|
|
}
|
|
|
|
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
2006-03-27 07:41:00 +00:00
|
|
|
For functions that use altivec AND have calls, we are VRSAVE'ing all call
|
|
|
|
clobbered regs.
|
|
|
|
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
|
|
|
Implement passing/returning vectors by value.
|
|
|
|
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
|
|
|
GCC apparently tries to codegen { C1, C2, Variable, C3 } as a constant pool load
|
|
|
|
of C1/C2/C3, then a load and vperm of Variable.
|
|
|
|
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
|
|
|
We currently codegen SCALAR_TO_VECTOR as a store of the scalar to a 16-byte
|
|
|
|
aligned stack slot, followed by a lve*x/vperm. We should probably just store it
|
|
|
|
to a scalar stack slot, then use lvsl/vperm to load it. If the value is already
|
|
|
|
in memory, this is a huge win.
|
|
|
|
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
|
|
|
Do not generate the MFCR/RLWINM sequence for predicate compares when the
|
|
|
|
predicate compare is used immediately by a branch. Just branch on the right
|
|
|
|
cond code on CR6.
|
|
|
|
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
|
|
|
SROA should turn "vector unions" into the appropriate insert/extract element
|
|
|
|
instructions.
|
|
|
|
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
|
|
|
We need a way to teach tblgen that some operands of an intrinsic are required to
|
|
|
|
be constants. The verifier should enforce this constraint.
|
|
|
|
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
|
|
|
Instead of writting a pattern for type-agnostic operations (e.g. gen-zero, load,
|
|
|
|
store, and, ...) in every supported type, make legalize do the work. We should
|
|
|
|
have a canonical type that we want operations changed to (e.g. v4i32 for
|
|
|
|
build_vector) and legalize should change non-identical types to thse. This is
|
|
|
|
similar to what it does for operations that are only supported in some types,
|
|
|
|
e.g. x86 cmov (not supported on bytes).
|
|
|
|
|
|
|
|
This would fix two problems:
|
|
|
|
1. Writing patterns multiple times.
|
Add a new way to match vector constants, which make it easier to bang bits of
different types.
Codegen spltw(0x7FFFFFFF) and spltw(0x80000000) without a constant pool load,
implementing PowerPC/vec_constants.ll:test1. This compiles:
typedef float vf __attribute__ ((vector_size (16)));
typedef int vi __attribute__ ((vector_size (16)));
void test(vi *P1, vi *P2, vf *P3) {
*P1 &= (vi){0x80000000,0x80000000,0x80000000,0x80000000};
*P2 &= (vi){0x7FFFFFFF,0x7FFFFFFF,0x7FFFFFFF,0x7FFFFFFF};
*P3 = vec_abs((vector float)*P3);
}
to:
_test:
mfspr r2, 256
oris r6, r2, 49152
mtspr 256, r6
vspltisw v0, -1
vslw v0, v0, v0
lvx v1, 0, r3
vand v1, v1, v0
stvx v1, 0, r3
lvx v1, 0, r4
vandc v1, v1, v0
stvx v1, 0, r4
lvx v1, 0, r5
vandc v0, v1, v0
stvx v0, 0, r5
mtspr 256, r2
blr
instead of (with two constant pool entries):
_test:
mfspr r2, 256
oris r6, r2, 49152
mtspr 256, r6
li r6, lo16(LCPI1_0)
lis r7, ha16(LCPI1_0)
li r8, lo16(LCPI1_1)
lis r9, ha16(LCPI1_1)
lvx v0, r7, r6
lvx v1, 0, r3
vand v0, v1, v0
stvx v0, 0, r3
lvx v0, r9, r8
lvx v1, 0, r4
vand v1, v1, v0
stvx v1, 0, r4
lvx v1, 0, r5
vand v0, v1, v0
stvx v0, 0, r5
mtspr 256, r2
blr
GCC produces (with 2 cp entries):
_test:
mfspr r0,256
stw r0,-4(r1)
oris r0,r0,0xc00c
mtspr 256,r0
lis r2,ha16(LC0)
lis r9,ha16(LC1)
la r2,lo16(LC0)(r2)
lvx v0,0,r3
lvx v1,0,r5
la r9,lo16(LC1)(r9)
lwz r12,-4(r1)
lvx v12,0,r2
lvx v13,0,r9
vand v0,v0,v12
stvx v0,0,r3
vspltisw v0,-1
vslw v12,v0,v0
vandc v1,v1,v12
stvx v1,0,r5
lvx v0,0,r4
vand v0,v0,v13
stvx v0,0,r4
mtspr 256,r12
blr
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@27624 91177308-0d34-0410-b5e6-96231b3b80d8
2006-04-12 19:07:14 +00:00
|
|
|
2. Identical operations in different types are not getting CSE'd.
|
|
|
|
|
|
|
|
We already do this for shuffle and build_vector. We need load,undef,and,or,xor,
|
|
|
|
etc.
|
2006-03-27 07:41:00 +00:00
|
|
|
|
2006-03-28 18:56:23 +00:00
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
2006-04-02 03:59:11 +00:00
|
|
|
Implement multiply for vector integer types, to avoid the horrible scalarized
|
|
|
|
code produced by legalize.
|
|
|
|
|
|
|
|
void test(vector int *X, vector int *Y) {
|
|
|
|
*X = *X * *Y;
|
|
|
|
}
|
|
|
|
|
|
|
|
//===----------------------------------------------------------------------===//
|
2006-04-04 23:45:11 +00:00
|
|
|
|
2006-04-06 23:16:19 +00:00
|
|
|
There are a wide variety of vector_shuffle operations that we can do with a pair
|
|
|
|
of instructions (e.g. a vsldoi + vpkuhum). We should pattern match these, but
|
|
|
|
there are a huge number of these.
|
|
|
|
|
2006-04-11 18:47:03 +00:00
|
|
|
Specific examples:
|
|
|
|
|
|
|
|
C = vector_shuffle A, B, <0, 1, 2, 4>
|
|
|
|
-> t = vsldoi A, A, 12
|
|
|
|
-> C = vsldoi A, B, 4
|
|
|
|
|
2006-04-06 23:16:19 +00:00
|
|
|
//===----------------------------------------------------------------------===//
|
2006-04-13 16:48:00 +00:00
|
|
|
|
|
|
|
extract_vector_elt of an arbitrary constant vector can be done with the
|
|
|
|
following instructions:
|
|
|
|
|
|
|
|
vTemp = vec_splat(v0,2); // 2 is the element the src is in.
|
|
|
|
vec_ste(&destloc,0,vTemp);
|
|
|
|
|
|
|
|
We can do an arbitrary non-constant value by using lvsr/perm/ste.
|
|
|
|
|
|
|
|
//===----------------------------------------------------------------------===//
|