mirror of
				https://github.com/c64scene-ar/llvm-6502.git
				synced 2025-11-03 14:21:30 +00:00 
			
		
		
		
	Implement the newly added ACLE functions for ld1/st1 with 2/3/4 vectors.
The functions are like: vst1_s8_x2 ... git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@194990 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
		@@ -163,6 +163,33 @@ def int_aarch64_neon_vtbx4 :
 | 
				
			|||||||
            LLVMMatchType<1>,  LLVMMatchType<1>, LLVMMatchType<0>],
 | 
					            LLVMMatchType<1>,  LLVMMatchType<1>, LLVMMatchType<0>],
 | 
				
			||||||
            [IntrNoMem]>;
 | 
					            [IntrNoMem]>;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					// Vector Load/store
 | 
				
			||||||
 | 
					def int_aarch64_neon_vld1x2 : Intrinsic<[llvm_anyvector_ty, LLVMMatchType<0>],
 | 
				
			||||||
 | 
					                                        [llvm_ptr_ty, llvm_i32_ty],
 | 
				
			||||||
 | 
					                                        [IntrReadArgMem]>;
 | 
				
			||||||
 | 
					def int_aarch64_neon_vld1x3 : Intrinsic<[llvm_anyvector_ty, LLVMMatchType<0>,
 | 
				
			||||||
 | 
					                                         LLVMMatchType<0>],
 | 
				
			||||||
 | 
					                                        [llvm_ptr_ty, llvm_i32_ty],
 | 
				
			||||||
 | 
					                                        [IntrReadArgMem]>;
 | 
				
			||||||
 | 
					def int_aarch64_neon_vld1x4 : Intrinsic<[llvm_anyvector_ty, LLVMMatchType<0>,
 | 
				
			||||||
 | 
					                                         LLVMMatchType<0>, LLVMMatchType<0>],
 | 
				
			||||||
 | 
					                                        [llvm_ptr_ty, llvm_i32_ty],
 | 
				
			||||||
 | 
					                                        [IntrReadArgMem]>;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					def int_aarch64_neon_vst1x2 : Intrinsic<[],
 | 
				
			||||||
 | 
					                                        [llvm_ptr_ty, llvm_anyvector_ty,
 | 
				
			||||||
 | 
					                                         LLVMMatchType<0>, llvm_i32_ty],
 | 
				
			||||||
 | 
					                                        [IntrReadWriteArgMem]>;
 | 
				
			||||||
 | 
					def int_aarch64_neon_vst1x3 : Intrinsic<[],
 | 
				
			||||||
 | 
					                                        [llvm_ptr_ty, llvm_anyvector_ty,
 | 
				
			||||||
 | 
					                                         LLVMMatchType<0>, LLVMMatchType<0>,
 | 
				
			||||||
 | 
					                                         llvm_i32_ty], [IntrReadWriteArgMem]>;
 | 
				
			||||||
 | 
					def int_aarch64_neon_vst1x4 : Intrinsic<[],
 | 
				
			||||||
 | 
					                                        [llvm_ptr_ty, llvm_anyvector_ty,
 | 
				
			||||||
 | 
					                                         LLVMMatchType<0>, LLVMMatchType<0>,
 | 
				
			||||||
 | 
					                                         LLVMMatchType<0>, llvm_i32_ty],
 | 
				
			||||||
 | 
					                                        [IntrReadWriteArgMem]>;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
// Scalar Add
 | 
					// Scalar Add
 | 
				
			||||||
def int_aarch64_neon_vaddds :
 | 
					def int_aarch64_neon_vaddds :
 | 
				
			||||||
  Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v1i64_ty], [IntrNoMem]>;
 | 
					  Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v1i64_ty], [IntrNoMem]>;
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -483,7 +483,6 @@ static unsigned getVLDSTRegisterUpdateOpcode(unsigned Opc) {
 | 
				
			|||||||
  case AArch64::LD2WB_8B_fixed: return AArch64::LD2WB_8B_register;
 | 
					  case AArch64::LD2WB_8B_fixed: return AArch64::LD2WB_8B_register;
 | 
				
			||||||
  case AArch64::LD2WB_4H_fixed: return AArch64::LD2WB_4H_register;
 | 
					  case AArch64::LD2WB_4H_fixed: return AArch64::LD2WB_4H_register;
 | 
				
			||||||
  case AArch64::LD2WB_2S_fixed: return AArch64::LD2WB_2S_register;
 | 
					  case AArch64::LD2WB_2S_fixed: return AArch64::LD2WB_2S_register;
 | 
				
			||||||
  case AArch64::LD1WB2V_1D_fixed: return AArch64::LD1WB2V_1D_register;
 | 
					 | 
				
			||||||
  case AArch64::LD2WB_16B_fixed: return AArch64::LD2WB_16B_register;
 | 
					  case AArch64::LD2WB_16B_fixed: return AArch64::LD2WB_16B_register;
 | 
				
			||||||
  case AArch64::LD2WB_8H_fixed: return AArch64::LD2WB_8H_register;
 | 
					  case AArch64::LD2WB_8H_fixed: return AArch64::LD2WB_8H_register;
 | 
				
			||||||
  case AArch64::LD2WB_4S_fixed: return AArch64::LD2WB_4S_register;
 | 
					  case AArch64::LD2WB_4S_fixed: return AArch64::LD2WB_4S_register;
 | 
				
			||||||
@@ -492,7 +491,6 @@ static unsigned getVLDSTRegisterUpdateOpcode(unsigned Opc) {
 | 
				
			|||||||
  case AArch64::LD3WB_8B_fixed: return AArch64::LD3WB_8B_register;
 | 
					  case AArch64::LD3WB_8B_fixed: return AArch64::LD3WB_8B_register;
 | 
				
			||||||
  case AArch64::LD3WB_4H_fixed: return AArch64::LD3WB_4H_register;
 | 
					  case AArch64::LD3WB_4H_fixed: return AArch64::LD3WB_4H_register;
 | 
				
			||||||
  case AArch64::LD3WB_2S_fixed: return AArch64::LD3WB_2S_register;
 | 
					  case AArch64::LD3WB_2S_fixed: return AArch64::LD3WB_2S_register;
 | 
				
			||||||
  case AArch64::LD1WB3V_1D_fixed: return AArch64::LD1WB3V_1D_register;
 | 
					 | 
				
			||||||
  case AArch64::LD3WB_16B_fixed: return AArch64::LD3WB_16B_register;
 | 
					  case AArch64::LD3WB_16B_fixed: return AArch64::LD3WB_16B_register;
 | 
				
			||||||
  case AArch64::LD3WB_8H_fixed: return AArch64::LD3WB_8H_register;
 | 
					  case AArch64::LD3WB_8H_fixed: return AArch64::LD3WB_8H_register;
 | 
				
			||||||
  case AArch64::LD3WB_4S_fixed: return AArch64::LD3WB_4S_register;
 | 
					  case AArch64::LD3WB_4S_fixed: return AArch64::LD3WB_4S_register;
 | 
				
			||||||
@@ -501,12 +499,38 @@ static unsigned getVLDSTRegisterUpdateOpcode(unsigned Opc) {
 | 
				
			|||||||
  case AArch64::LD4WB_8B_fixed: return AArch64::LD4WB_8B_register;
 | 
					  case AArch64::LD4WB_8B_fixed: return AArch64::LD4WB_8B_register;
 | 
				
			||||||
  case AArch64::LD4WB_4H_fixed: return AArch64::LD4WB_4H_register;
 | 
					  case AArch64::LD4WB_4H_fixed: return AArch64::LD4WB_4H_register;
 | 
				
			||||||
  case AArch64::LD4WB_2S_fixed: return AArch64::LD4WB_2S_register;
 | 
					  case AArch64::LD4WB_2S_fixed: return AArch64::LD4WB_2S_register;
 | 
				
			||||||
  case AArch64::LD1WB4V_1D_fixed: return AArch64::LD1WB4V_1D_register;
 | 
					 | 
				
			||||||
  case AArch64::LD4WB_16B_fixed: return AArch64::LD4WB_16B_register;
 | 
					  case AArch64::LD4WB_16B_fixed: return AArch64::LD4WB_16B_register;
 | 
				
			||||||
  case AArch64::LD4WB_8H_fixed: return AArch64::LD4WB_8H_register;
 | 
					  case AArch64::LD4WB_8H_fixed: return AArch64::LD4WB_8H_register;
 | 
				
			||||||
  case AArch64::LD4WB_4S_fixed: return AArch64::LD4WB_4S_register;
 | 
					  case AArch64::LD4WB_4S_fixed: return AArch64::LD4WB_4S_register;
 | 
				
			||||||
  case AArch64::LD4WB_2D_fixed: return AArch64::LD4WB_2D_register;
 | 
					  case AArch64::LD4WB_2D_fixed: return AArch64::LD4WB_2D_register;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					  case AArch64::LD1x2WB_8B_fixed: return AArch64::LD1x2WB_8B_register;
 | 
				
			||||||
 | 
					  case AArch64::LD1x2WB_4H_fixed: return AArch64::LD1x2WB_4H_register;
 | 
				
			||||||
 | 
					  case AArch64::LD1x2WB_2S_fixed: return AArch64::LD1x2WB_2S_register;
 | 
				
			||||||
 | 
					  case AArch64::LD1x2WB_1D_fixed: return AArch64::LD1x2WB_1D_register;
 | 
				
			||||||
 | 
					  case AArch64::LD1x2WB_16B_fixed: return AArch64::LD1x2WB_16B_register;
 | 
				
			||||||
 | 
					  case AArch64::LD1x2WB_8H_fixed: return AArch64::LD1x2WB_8H_register;
 | 
				
			||||||
 | 
					  case AArch64::LD1x2WB_4S_fixed: return AArch64::LD1x2WB_4S_register;
 | 
				
			||||||
 | 
					  case AArch64::LD1x2WB_2D_fixed: return AArch64::LD1x2WB_2D_register;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					  case AArch64::LD1x3WB_8B_fixed: return AArch64::LD1x3WB_8B_register;
 | 
				
			||||||
 | 
					  case AArch64::LD1x3WB_4H_fixed: return AArch64::LD1x3WB_4H_register;
 | 
				
			||||||
 | 
					  case AArch64::LD1x3WB_2S_fixed: return AArch64::LD1x3WB_2S_register;
 | 
				
			||||||
 | 
					  case AArch64::LD1x3WB_1D_fixed: return AArch64::LD1x3WB_1D_register;
 | 
				
			||||||
 | 
					  case AArch64::LD1x3WB_16B_fixed: return AArch64::LD1x3WB_16B_register;
 | 
				
			||||||
 | 
					  case AArch64::LD1x3WB_8H_fixed: return AArch64::LD1x3WB_8H_register;
 | 
				
			||||||
 | 
					  case AArch64::LD1x3WB_4S_fixed: return AArch64::LD1x3WB_4S_register;
 | 
				
			||||||
 | 
					  case AArch64::LD1x3WB_2D_fixed: return AArch64::LD1x3WB_2D_register;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					  case AArch64::LD1x4WB_8B_fixed: return AArch64::LD1x4WB_8B_register;
 | 
				
			||||||
 | 
					  case AArch64::LD1x4WB_4H_fixed: return AArch64::LD1x4WB_4H_register;
 | 
				
			||||||
 | 
					  case AArch64::LD1x4WB_2S_fixed: return AArch64::LD1x4WB_2S_register;
 | 
				
			||||||
 | 
					  case AArch64::LD1x4WB_1D_fixed: return AArch64::LD1x4WB_1D_register;
 | 
				
			||||||
 | 
					  case AArch64::LD1x4WB_16B_fixed: return AArch64::LD1x4WB_16B_register;
 | 
				
			||||||
 | 
					  case AArch64::LD1x4WB_8H_fixed: return AArch64::LD1x4WB_8H_register;
 | 
				
			||||||
 | 
					  case AArch64::LD1x4WB_4S_fixed: return AArch64::LD1x4WB_4S_register;
 | 
				
			||||||
 | 
					  case AArch64::LD1x4WB_2D_fixed: return AArch64::LD1x4WB_2D_register;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
  case AArch64::ST1WB_8B_fixed: return AArch64::ST1WB_8B_register;
 | 
					  case AArch64::ST1WB_8B_fixed: return AArch64::ST1WB_8B_register;
 | 
				
			||||||
  case AArch64::ST1WB_4H_fixed: return AArch64::ST1WB_4H_register;
 | 
					  case AArch64::ST1WB_4H_fixed: return AArch64::ST1WB_4H_register;
 | 
				
			||||||
  case AArch64::ST1WB_2S_fixed: return AArch64::ST1WB_2S_register;
 | 
					  case AArch64::ST1WB_2S_fixed: return AArch64::ST1WB_2S_register;
 | 
				
			||||||
@@ -519,7 +543,6 @@ static unsigned getVLDSTRegisterUpdateOpcode(unsigned Opc) {
 | 
				
			|||||||
  case AArch64::ST2WB_8B_fixed: return AArch64::ST2WB_8B_register;
 | 
					  case AArch64::ST2WB_8B_fixed: return AArch64::ST2WB_8B_register;
 | 
				
			||||||
  case AArch64::ST2WB_4H_fixed: return AArch64::ST2WB_4H_register;
 | 
					  case AArch64::ST2WB_4H_fixed: return AArch64::ST2WB_4H_register;
 | 
				
			||||||
  case AArch64::ST2WB_2S_fixed: return AArch64::ST2WB_2S_register;
 | 
					  case AArch64::ST2WB_2S_fixed: return AArch64::ST2WB_2S_register;
 | 
				
			||||||
  case AArch64::ST1WB2V_1D_fixed: return AArch64::ST1WB2V_1D_register;
 | 
					 | 
				
			||||||
  case AArch64::ST2WB_16B_fixed: return AArch64::ST2WB_16B_register;
 | 
					  case AArch64::ST2WB_16B_fixed: return AArch64::ST2WB_16B_register;
 | 
				
			||||||
  case AArch64::ST2WB_8H_fixed: return AArch64::ST2WB_8H_register;
 | 
					  case AArch64::ST2WB_8H_fixed: return AArch64::ST2WB_8H_register;
 | 
				
			||||||
  case AArch64::ST2WB_4S_fixed: return AArch64::ST2WB_4S_register;
 | 
					  case AArch64::ST2WB_4S_fixed: return AArch64::ST2WB_4S_register;
 | 
				
			||||||
@@ -528,7 +551,6 @@ static unsigned getVLDSTRegisterUpdateOpcode(unsigned Opc) {
 | 
				
			|||||||
  case AArch64::ST3WB_8B_fixed: return AArch64::ST3WB_8B_register;
 | 
					  case AArch64::ST3WB_8B_fixed: return AArch64::ST3WB_8B_register;
 | 
				
			||||||
  case AArch64::ST3WB_4H_fixed: return AArch64::ST3WB_4H_register;
 | 
					  case AArch64::ST3WB_4H_fixed: return AArch64::ST3WB_4H_register;
 | 
				
			||||||
  case AArch64::ST3WB_2S_fixed: return AArch64::ST3WB_2S_register;
 | 
					  case AArch64::ST3WB_2S_fixed: return AArch64::ST3WB_2S_register;
 | 
				
			||||||
  case AArch64::ST1WB3V_1D_fixed: return AArch64::ST1WB3V_1D_register;
 | 
					 | 
				
			||||||
  case AArch64::ST3WB_16B_fixed: return AArch64::ST3WB_16B_register;
 | 
					  case AArch64::ST3WB_16B_fixed: return AArch64::ST3WB_16B_register;
 | 
				
			||||||
  case AArch64::ST3WB_8H_fixed: return AArch64::ST3WB_8H_register;
 | 
					  case AArch64::ST3WB_8H_fixed: return AArch64::ST3WB_8H_register;
 | 
				
			||||||
  case AArch64::ST3WB_4S_fixed: return AArch64::ST3WB_4S_register;
 | 
					  case AArch64::ST3WB_4S_fixed: return AArch64::ST3WB_4S_register;
 | 
				
			||||||
@@ -537,11 +559,37 @@ static unsigned getVLDSTRegisterUpdateOpcode(unsigned Opc) {
 | 
				
			|||||||
  case AArch64::ST4WB_8B_fixed: return AArch64::ST4WB_8B_register;
 | 
					  case AArch64::ST4WB_8B_fixed: return AArch64::ST4WB_8B_register;
 | 
				
			||||||
  case AArch64::ST4WB_4H_fixed: return AArch64::ST4WB_4H_register;
 | 
					  case AArch64::ST4WB_4H_fixed: return AArch64::ST4WB_4H_register;
 | 
				
			||||||
  case AArch64::ST4WB_2S_fixed: return AArch64::ST4WB_2S_register;
 | 
					  case AArch64::ST4WB_2S_fixed: return AArch64::ST4WB_2S_register;
 | 
				
			||||||
  case AArch64::ST1WB4V_1D_fixed: return AArch64::ST1WB4V_1D_register;
 | 
					 | 
				
			||||||
  case AArch64::ST4WB_16B_fixed: return AArch64::ST4WB_16B_register;
 | 
					  case AArch64::ST4WB_16B_fixed: return AArch64::ST4WB_16B_register;
 | 
				
			||||||
  case AArch64::ST4WB_8H_fixed: return AArch64::ST4WB_8H_register;
 | 
					  case AArch64::ST4WB_8H_fixed: return AArch64::ST4WB_8H_register;
 | 
				
			||||||
  case AArch64::ST4WB_4S_fixed: return AArch64::ST4WB_4S_register;
 | 
					  case AArch64::ST4WB_4S_fixed: return AArch64::ST4WB_4S_register;
 | 
				
			||||||
  case AArch64::ST4WB_2D_fixed: return AArch64::ST4WB_2D_register;
 | 
					  case AArch64::ST4WB_2D_fixed: return AArch64::ST4WB_2D_register;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					  case AArch64::ST1x2WB_8B_fixed: return AArch64::ST1x2WB_8B_register;
 | 
				
			||||||
 | 
					  case AArch64::ST1x2WB_4H_fixed: return AArch64::ST1x2WB_4H_register;
 | 
				
			||||||
 | 
					  case AArch64::ST1x2WB_2S_fixed: return AArch64::ST1x2WB_2S_register;
 | 
				
			||||||
 | 
					  case AArch64::ST1x2WB_1D_fixed: return AArch64::ST1x2WB_1D_register;
 | 
				
			||||||
 | 
					  case AArch64::ST1x2WB_16B_fixed: return AArch64::ST1x2WB_16B_register;
 | 
				
			||||||
 | 
					  case AArch64::ST1x2WB_8H_fixed: return AArch64::ST1x2WB_8H_register;
 | 
				
			||||||
 | 
					  case AArch64::ST1x2WB_4S_fixed: return AArch64::ST1x2WB_4S_register;
 | 
				
			||||||
 | 
					  case AArch64::ST1x2WB_2D_fixed: return AArch64::ST1x2WB_2D_register;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					  case AArch64::ST1x3WB_8B_fixed: return AArch64::ST1x3WB_8B_register;
 | 
				
			||||||
 | 
					  case AArch64::ST1x3WB_4H_fixed: return AArch64::ST1x3WB_4H_register;
 | 
				
			||||||
 | 
					  case AArch64::ST1x3WB_2S_fixed: return AArch64::ST1x3WB_2S_register;
 | 
				
			||||||
 | 
					  case AArch64::ST1x3WB_1D_fixed: return AArch64::ST1x3WB_1D_register;
 | 
				
			||||||
 | 
					  case AArch64::ST1x3WB_16B_fixed: return AArch64::ST1x3WB_16B_register;
 | 
				
			||||||
 | 
					  case AArch64::ST1x3WB_8H_fixed: return AArch64::ST1x3WB_8H_register;
 | 
				
			||||||
 | 
					  case AArch64::ST1x3WB_4S_fixed: return AArch64::ST1x3WB_4S_register;
 | 
				
			||||||
 | 
					  case AArch64::ST1x3WB_2D_fixed: return AArch64::ST1x3WB_2D_register;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					  case AArch64::ST1x4WB_8B_fixed: return AArch64::ST1x4WB_8B_register;
 | 
				
			||||||
 | 
					  case AArch64::ST1x4WB_4H_fixed: return AArch64::ST1x4WB_4H_register;
 | 
				
			||||||
 | 
					  case AArch64::ST1x4WB_2S_fixed: return AArch64::ST1x4WB_2S_register;
 | 
				
			||||||
 | 
					  case AArch64::ST1x4WB_1D_fixed: return AArch64::ST1x4WB_1D_register;
 | 
				
			||||||
 | 
					  case AArch64::ST1x4WB_16B_fixed: return AArch64::ST1x4WB_16B_register;
 | 
				
			||||||
 | 
					  case AArch64::ST1x4WB_8H_fixed: return AArch64::ST1x4WB_8H_register;
 | 
				
			||||||
 | 
					  case AArch64::ST1x4WB_4S_fixed: return AArch64::ST1x4WB_4S_register;
 | 
				
			||||||
 | 
					  case AArch64::ST1x4WB_2D_fixed: return AArch64::ST1x4WB_2D_register;
 | 
				
			||||||
  }
 | 
					  }
 | 
				
			||||||
  return Opc; // If not one we handle, return it unchanged.
 | 
					  return Opc; // If not one we handle, return it unchanged.
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
@@ -912,7 +960,7 @@ SDNode *AArch64DAGToDAGISel::Select(SDNode *Node) {
 | 
				
			|||||||
  case AArch64ISD::NEON_LD2_UPD: {
 | 
					  case AArch64ISD::NEON_LD2_UPD: {
 | 
				
			||||||
    static const uint16_t Opcodes[] = {
 | 
					    static const uint16_t Opcodes[] = {
 | 
				
			||||||
      AArch64::LD2WB_8B_fixed,  AArch64::LD2WB_4H_fixed,
 | 
					      AArch64::LD2WB_8B_fixed,  AArch64::LD2WB_4H_fixed,
 | 
				
			||||||
      AArch64::LD2WB_2S_fixed,  AArch64::LD1WB2V_1D_fixed,
 | 
					      AArch64::LD2WB_2S_fixed,  AArch64::LD1x2WB_1D_fixed,
 | 
				
			||||||
      AArch64::LD2WB_16B_fixed, AArch64::LD2WB_8H_fixed,
 | 
					      AArch64::LD2WB_16B_fixed, AArch64::LD2WB_8H_fixed,
 | 
				
			||||||
      AArch64::LD2WB_4S_fixed,  AArch64::LD2WB_2D_fixed
 | 
					      AArch64::LD2WB_4S_fixed,  AArch64::LD2WB_2D_fixed
 | 
				
			||||||
    };
 | 
					    };
 | 
				
			||||||
@@ -921,7 +969,7 @@ SDNode *AArch64DAGToDAGISel::Select(SDNode *Node) {
 | 
				
			|||||||
  case AArch64ISD::NEON_LD3_UPD: {
 | 
					  case AArch64ISD::NEON_LD3_UPD: {
 | 
				
			||||||
    static const uint16_t Opcodes[] = {
 | 
					    static const uint16_t Opcodes[] = {
 | 
				
			||||||
      AArch64::LD3WB_8B_fixed,  AArch64::LD3WB_4H_fixed,
 | 
					      AArch64::LD3WB_8B_fixed,  AArch64::LD3WB_4H_fixed,
 | 
				
			||||||
      AArch64::LD3WB_2S_fixed,  AArch64::LD1WB3V_1D_fixed,
 | 
					      AArch64::LD3WB_2S_fixed,  AArch64::LD1x3WB_1D_fixed,
 | 
				
			||||||
      AArch64::LD3WB_16B_fixed, AArch64::LD3WB_8H_fixed,
 | 
					      AArch64::LD3WB_16B_fixed, AArch64::LD3WB_8H_fixed,
 | 
				
			||||||
      AArch64::LD3WB_4S_fixed,  AArch64::LD3WB_2D_fixed
 | 
					      AArch64::LD3WB_4S_fixed,  AArch64::LD3WB_2D_fixed
 | 
				
			||||||
    };
 | 
					    };
 | 
				
			||||||
@@ -930,12 +978,39 @@ SDNode *AArch64DAGToDAGISel::Select(SDNode *Node) {
 | 
				
			|||||||
  case AArch64ISD::NEON_LD4_UPD: {
 | 
					  case AArch64ISD::NEON_LD4_UPD: {
 | 
				
			||||||
    static const uint16_t Opcodes[] = {
 | 
					    static const uint16_t Opcodes[] = {
 | 
				
			||||||
      AArch64::LD4WB_8B_fixed,  AArch64::LD4WB_4H_fixed,
 | 
					      AArch64::LD4WB_8B_fixed,  AArch64::LD4WB_4H_fixed,
 | 
				
			||||||
      AArch64::LD4WB_2S_fixed,  AArch64::LD1WB4V_1D_fixed,
 | 
					      AArch64::LD4WB_2S_fixed,  AArch64::LD1x4WB_1D_fixed,
 | 
				
			||||||
      AArch64::LD4WB_16B_fixed, AArch64::LD4WB_8H_fixed,
 | 
					      AArch64::LD4WB_16B_fixed, AArch64::LD4WB_8H_fixed,
 | 
				
			||||||
      AArch64::LD4WB_4S_fixed,  AArch64::LD4WB_2D_fixed
 | 
					      AArch64::LD4WB_4S_fixed,  AArch64::LD4WB_2D_fixed
 | 
				
			||||||
    };
 | 
					    };
 | 
				
			||||||
    return SelectVLD(Node, 4, true, Opcodes);
 | 
					    return SelectVLD(Node, 4, true, Opcodes);
 | 
				
			||||||
  }
 | 
					  }
 | 
				
			||||||
 | 
					  case AArch64ISD::NEON_LD1x2_UPD: {
 | 
				
			||||||
 | 
					    static const uint16_t Opcodes[] = {
 | 
				
			||||||
 | 
					      AArch64::LD1x2WB_8B_fixed,  AArch64::LD1x2WB_4H_fixed,
 | 
				
			||||||
 | 
					      AArch64::LD1x2WB_2S_fixed,  AArch64::LD1x2WB_1D_fixed,
 | 
				
			||||||
 | 
					      AArch64::LD1x2WB_16B_fixed, AArch64::LD1x2WB_8H_fixed,
 | 
				
			||||||
 | 
					      AArch64::LD1x2WB_4S_fixed,  AArch64::LD1x2WB_2D_fixed
 | 
				
			||||||
 | 
					    };
 | 
				
			||||||
 | 
					    return SelectVLD(Node, 2, true, Opcodes);
 | 
				
			||||||
 | 
					  }
 | 
				
			||||||
 | 
					  case AArch64ISD::NEON_LD1x3_UPD: {
 | 
				
			||||||
 | 
					    static const uint16_t Opcodes[] = {
 | 
				
			||||||
 | 
					      AArch64::LD1x3WB_8B_fixed,  AArch64::LD1x3WB_4H_fixed,
 | 
				
			||||||
 | 
					      AArch64::LD1x3WB_2S_fixed,  AArch64::LD1x3WB_1D_fixed,
 | 
				
			||||||
 | 
					      AArch64::LD1x3WB_16B_fixed, AArch64::LD1x3WB_8H_fixed,
 | 
				
			||||||
 | 
					      AArch64::LD1x3WB_4S_fixed,  AArch64::LD1x3WB_2D_fixed
 | 
				
			||||||
 | 
					    };
 | 
				
			||||||
 | 
					    return SelectVLD(Node, 3, true, Opcodes);
 | 
				
			||||||
 | 
					  }
 | 
				
			||||||
 | 
					  case AArch64ISD::NEON_LD1x4_UPD: {
 | 
				
			||||||
 | 
					    static const uint16_t Opcodes[] = {
 | 
				
			||||||
 | 
					      AArch64::LD1x4WB_8B_fixed,  AArch64::LD1x4WB_4H_fixed,
 | 
				
			||||||
 | 
					      AArch64::LD1x4WB_2S_fixed,  AArch64::LD1x4WB_1D_fixed,
 | 
				
			||||||
 | 
					      AArch64::LD1x4WB_16B_fixed, AArch64::LD1x4WB_8H_fixed,
 | 
				
			||||||
 | 
					      AArch64::LD1x4WB_4S_fixed,  AArch64::LD1x4WB_2D_fixed
 | 
				
			||||||
 | 
					    };
 | 
				
			||||||
 | 
					    return SelectVLD(Node, 4, true, Opcodes);
 | 
				
			||||||
 | 
					  }
 | 
				
			||||||
  case AArch64ISD::NEON_ST1_UPD: {
 | 
					  case AArch64ISD::NEON_ST1_UPD: {
 | 
				
			||||||
    static const uint16_t Opcodes[] = {
 | 
					    static const uint16_t Opcodes[] = {
 | 
				
			||||||
      AArch64::ST1WB_8B_fixed,  AArch64::ST1WB_4H_fixed,
 | 
					      AArch64::ST1WB_8B_fixed,  AArch64::ST1WB_4H_fixed,
 | 
				
			||||||
@@ -948,7 +1023,7 @@ SDNode *AArch64DAGToDAGISel::Select(SDNode *Node) {
 | 
				
			|||||||
  case AArch64ISD::NEON_ST2_UPD: {
 | 
					  case AArch64ISD::NEON_ST2_UPD: {
 | 
				
			||||||
    static const uint16_t Opcodes[] = {
 | 
					    static const uint16_t Opcodes[] = {
 | 
				
			||||||
      AArch64::ST2WB_8B_fixed,  AArch64::ST2WB_4H_fixed,
 | 
					      AArch64::ST2WB_8B_fixed,  AArch64::ST2WB_4H_fixed,
 | 
				
			||||||
      AArch64::ST2WB_2S_fixed,  AArch64::ST1WB2V_1D_fixed,
 | 
					      AArch64::ST2WB_2S_fixed,  AArch64::ST1x2WB_1D_fixed,
 | 
				
			||||||
      AArch64::ST2WB_16B_fixed, AArch64::ST2WB_8H_fixed,
 | 
					      AArch64::ST2WB_16B_fixed, AArch64::ST2WB_8H_fixed,
 | 
				
			||||||
      AArch64::ST2WB_4S_fixed,  AArch64::ST2WB_2D_fixed
 | 
					      AArch64::ST2WB_4S_fixed,  AArch64::ST2WB_2D_fixed
 | 
				
			||||||
    };
 | 
					    };
 | 
				
			||||||
@@ -957,7 +1032,7 @@ SDNode *AArch64DAGToDAGISel::Select(SDNode *Node) {
 | 
				
			|||||||
  case AArch64ISD::NEON_ST3_UPD: {
 | 
					  case AArch64ISD::NEON_ST3_UPD: {
 | 
				
			||||||
    static const uint16_t Opcodes[] = {
 | 
					    static const uint16_t Opcodes[] = {
 | 
				
			||||||
      AArch64::ST3WB_8B_fixed,  AArch64::ST3WB_4H_fixed,
 | 
					      AArch64::ST3WB_8B_fixed,  AArch64::ST3WB_4H_fixed,
 | 
				
			||||||
      AArch64::ST3WB_2S_fixed,  AArch64::ST1WB3V_1D_fixed,
 | 
					      AArch64::ST3WB_2S_fixed,  AArch64::ST1x3WB_1D_fixed,
 | 
				
			||||||
      AArch64::ST3WB_16B_fixed, AArch64::ST3WB_8H_fixed,
 | 
					      AArch64::ST3WB_16B_fixed, AArch64::ST3WB_8H_fixed,
 | 
				
			||||||
      AArch64::ST3WB_4S_fixed,  AArch64::ST3WB_2D_fixed
 | 
					      AArch64::ST3WB_4S_fixed,  AArch64::ST3WB_2D_fixed
 | 
				
			||||||
    };
 | 
					    };
 | 
				
			||||||
@@ -966,12 +1041,39 @@ SDNode *AArch64DAGToDAGISel::Select(SDNode *Node) {
 | 
				
			|||||||
  case AArch64ISD::NEON_ST4_UPD: {
 | 
					  case AArch64ISD::NEON_ST4_UPD: {
 | 
				
			||||||
    static const uint16_t Opcodes[] = {
 | 
					    static const uint16_t Opcodes[] = {
 | 
				
			||||||
      AArch64::ST4WB_8B_fixed,  AArch64::ST4WB_4H_fixed,
 | 
					      AArch64::ST4WB_8B_fixed,  AArch64::ST4WB_4H_fixed,
 | 
				
			||||||
      AArch64::ST4WB_2S_fixed,  AArch64::ST1WB4V_1D_fixed,
 | 
					      AArch64::ST4WB_2S_fixed,  AArch64::ST1x4WB_1D_fixed,
 | 
				
			||||||
      AArch64::ST4WB_16B_fixed, AArch64::ST4WB_8H_fixed,
 | 
					      AArch64::ST4WB_16B_fixed, AArch64::ST4WB_8H_fixed,
 | 
				
			||||||
      AArch64::ST4WB_4S_fixed,  AArch64::ST4WB_2D_fixed
 | 
					      AArch64::ST4WB_4S_fixed,  AArch64::ST4WB_2D_fixed
 | 
				
			||||||
    };
 | 
					    };
 | 
				
			||||||
    return SelectVST(Node, 4, true, Opcodes);
 | 
					    return SelectVST(Node, 4, true, Opcodes);
 | 
				
			||||||
  }
 | 
					  }
 | 
				
			||||||
 | 
					  case AArch64ISD::NEON_ST1x2_UPD: {
 | 
				
			||||||
 | 
					    static const uint16_t Opcodes[] = {
 | 
				
			||||||
 | 
					      AArch64::ST1x2WB_8B_fixed,  AArch64::ST1x2WB_4H_fixed,
 | 
				
			||||||
 | 
					      AArch64::ST1x2WB_2S_fixed,  AArch64::ST1x2WB_1D_fixed,
 | 
				
			||||||
 | 
					      AArch64::ST1x2WB_16B_fixed, AArch64::ST1x2WB_8H_fixed,
 | 
				
			||||||
 | 
					      AArch64::ST1x2WB_4S_fixed,  AArch64::ST1x2WB_2D_fixed
 | 
				
			||||||
 | 
					    };
 | 
				
			||||||
 | 
					    return SelectVST(Node, 2, true, Opcodes);
 | 
				
			||||||
 | 
					  }
 | 
				
			||||||
 | 
					  case AArch64ISD::NEON_ST1x3_UPD: {
 | 
				
			||||||
 | 
					    static const uint16_t Opcodes[] = {
 | 
				
			||||||
 | 
					      AArch64::ST1x3WB_8B_fixed,  AArch64::ST1x3WB_4H_fixed,
 | 
				
			||||||
 | 
					      AArch64::ST1x3WB_2S_fixed,  AArch64::ST1x3WB_1D_fixed,
 | 
				
			||||||
 | 
					      AArch64::ST1x3WB_16B_fixed, AArch64::ST1x3WB_8H_fixed,
 | 
				
			||||||
 | 
					      AArch64::ST1x3WB_4S_fixed,  AArch64::ST1x3WB_2D_fixed
 | 
				
			||||||
 | 
					    };
 | 
				
			||||||
 | 
					    return SelectVST(Node, 3, true, Opcodes);
 | 
				
			||||||
 | 
					  }
 | 
				
			||||||
 | 
					  case AArch64ISD::NEON_ST1x4_UPD: {
 | 
				
			||||||
 | 
					    static const uint16_t Opcodes[] = {
 | 
				
			||||||
 | 
					      AArch64::ST1x4WB_8B_fixed,  AArch64::ST1x4WB_4H_fixed,
 | 
				
			||||||
 | 
					      AArch64::ST1x4WB_2S_fixed,  AArch64::ST1x4WB_1D_fixed,
 | 
				
			||||||
 | 
					      AArch64::ST1x4WB_16B_fixed, AArch64::ST1x4WB_8H_fixed,
 | 
				
			||||||
 | 
					      AArch64::ST1x4WB_4S_fixed,  AArch64::ST1x4WB_2D_fixed
 | 
				
			||||||
 | 
					    };
 | 
				
			||||||
 | 
					    return SelectVST(Node, 4, true, Opcodes);
 | 
				
			||||||
 | 
					  }
 | 
				
			||||||
  case ISD::INTRINSIC_WO_CHAIN: {
 | 
					  case ISD::INTRINSIC_WO_CHAIN: {
 | 
				
			||||||
    unsigned IntNo = cast<ConstantSDNode>(Node->getOperand(0))->getZExtValue();
 | 
					    unsigned IntNo = cast<ConstantSDNode>(Node->getOperand(0))->getZExtValue();
 | 
				
			||||||
    bool IsExt = false;
 | 
					    bool IsExt = false;
 | 
				
			||||||
@@ -1013,25 +1115,49 @@ SDNode *AArch64DAGToDAGISel::Select(SDNode *Node) {
 | 
				
			|||||||
    }
 | 
					    }
 | 
				
			||||||
    case Intrinsic::arm_neon_vld2: {
 | 
					    case Intrinsic::arm_neon_vld2: {
 | 
				
			||||||
      static const uint16_t Opcodes[] = { AArch64::LD2_8B,  AArch64::LD2_4H,
 | 
					      static const uint16_t Opcodes[] = { AArch64::LD2_8B,  AArch64::LD2_4H,
 | 
				
			||||||
                                          AArch64::LD2_2S,  AArch64::LD1_2V_1D,
 | 
					                                          AArch64::LD2_2S,  AArch64::LD1x2_1D,
 | 
				
			||||||
                                          AArch64::LD2_16B, AArch64::LD2_8H,
 | 
					                                          AArch64::LD2_16B, AArch64::LD2_8H,
 | 
				
			||||||
                                          AArch64::LD2_4S,  AArch64::LD2_2D };
 | 
					                                          AArch64::LD2_4S,  AArch64::LD2_2D };
 | 
				
			||||||
      return SelectVLD(Node, 2, false, Opcodes);
 | 
					      return SelectVLD(Node, 2, false, Opcodes);
 | 
				
			||||||
    }
 | 
					    }
 | 
				
			||||||
    case Intrinsic::arm_neon_vld3: {
 | 
					    case Intrinsic::arm_neon_vld3: {
 | 
				
			||||||
      static const uint16_t Opcodes[] = { AArch64::LD3_8B,  AArch64::LD3_4H,
 | 
					      static const uint16_t Opcodes[] = { AArch64::LD3_8B,  AArch64::LD3_4H,
 | 
				
			||||||
                                          AArch64::LD3_2S,  AArch64::LD1_3V_1D,
 | 
					                                          AArch64::LD3_2S,  AArch64::LD1x3_1D,
 | 
				
			||||||
                                          AArch64::LD3_16B, AArch64::LD3_8H,
 | 
					                                          AArch64::LD3_16B, AArch64::LD3_8H,
 | 
				
			||||||
                                          AArch64::LD3_4S,  AArch64::LD3_2D };
 | 
					                                          AArch64::LD3_4S,  AArch64::LD3_2D };
 | 
				
			||||||
      return SelectVLD(Node, 3, false, Opcodes);
 | 
					      return SelectVLD(Node, 3, false, Opcodes);
 | 
				
			||||||
    }
 | 
					    }
 | 
				
			||||||
    case Intrinsic::arm_neon_vld4: {
 | 
					    case Intrinsic::arm_neon_vld4: {
 | 
				
			||||||
      static const uint16_t Opcodes[] = { AArch64::LD4_8B,  AArch64::LD4_4H,
 | 
					      static const uint16_t Opcodes[] = { AArch64::LD4_8B,  AArch64::LD4_4H,
 | 
				
			||||||
                                          AArch64::LD4_2S,  AArch64::LD1_4V_1D,
 | 
					                                          AArch64::LD4_2S,  AArch64::LD1x4_1D,
 | 
				
			||||||
                                          AArch64::LD4_16B, AArch64::LD4_8H,
 | 
					                                          AArch64::LD4_16B, AArch64::LD4_8H,
 | 
				
			||||||
                                          AArch64::LD4_4S,  AArch64::LD4_2D };
 | 
					                                          AArch64::LD4_4S,  AArch64::LD4_2D };
 | 
				
			||||||
      return SelectVLD(Node, 4, false, Opcodes);
 | 
					      return SelectVLD(Node, 4, false, Opcodes);
 | 
				
			||||||
    }
 | 
					    }
 | 
				
			||||||
 | 
					    case Intrinsic::aarch64_neon_vld1x2: {
 | 
				
			||||||
 | 
					      static const uint16_t Opcodes[] = {
 | 
				
			||||||
 | 
					        AArch64::LD1x2_8B, AArch64::LD1x2_4H,  AArch64::LD1x2_2S,
 | 
				
			||||||
 | 
					        AArch64::LD1x2_1D, AArch64::LD1x2_16B, AArch64::LD1x2_8H,
 | 
				
			||||||
 | 
					        AArch64::LD1x2_4S, AArch64::LD1x2_2D
 | 
				
			||||||
 | 
					      };
 | 
				
			||||||
 | 
					      return SelectVLD(Node, 2, false, Opcodes);
 | 
				
			||||||
 | 
					    }
 | 
				
			||||||
 | 
					    case Intrinsic::aarch64_neon_vld1x3: {
 | 
				
			||||||
 | 
					      static const uint16_t Opcodes[] = {
 | 
				
			||||||
 | 
					        AArch64::LD1x3_8B, AArch64::LD1x3_4H,  AArch64::LD1x3_2S,
 | 
				
			||||||
 | 
					        AArch64::LD1x3_1D, AArch64::LD1x3_16B, AArch64::LD1x3_8H,
 | 
				
			||||||
 | 
					        AArch64::LD1x3_4S, AArch64::LD1x3_2D
 | 
				
			||||||
 | 
					      };
 | 
				
			||||||
 | 
					      return SelectVLD(Node, 3, false, Opcodes);
 | 
				
			||||||
 | 
					    }
 | 
				
			||||||
 | 
					    case Intrinsic::aarch64_neon_vld1x4: {
 | 
				
			||||||
 | 
					      static const uint16_t Opcodes[] = {
 | 
				
			||||||
 | 
					        AArch64::LD1x4_8B, AArch64::LD1x4_4H,  AArch64::LD1x4_2S,
 | 
				
			||||||
 | 
					        AArch64::LD1x4_1D, AArch64::LD1x4_16B, AArch64::LD1x4_8H,
 | 
				
			||||||
 | 
					        AArch64::LD1x4_4S, AArch64::LD1x4_2D
 | 
				
			||||||
 | 
					      };
 | 
				
			||||||
 | 
					      return SelectVLD(Node, 4, false, Opcodes);
 | 
				
			||||||
 | 
					    }
 | 
				
			||||||
    case Intrinsic::arm_neon_vst1: {
 | 
					    case Intrinsic::arm_neon_vst1: {
 | 
				
			||||||
      static const uint16_t Opcodes[] = { AArch64::ST1_8B,  AArch64::ST1_4H,
 | 
					      static const uint16_t Opcodes[] = { AArch64::ST1_8B,  AArch64::ST1_4H,
 | 
				
			||||||
                                          AArch64::ST1_2S,  AArch64::ST1_1D,
 | 
					                                          AArch64::ST1_2S,  AArch64::ST1_1D,
 | 
				
			||||||
@@ -1041,25 +1167,49 @@ SDNode *AArch64DAGToDAGISel::Select(SDNode *Node) {
 | 
				
			|||||||
    }
 | 
					    }
 | 
				
			||||||
    case Intrinsic::arm_neon_vst2: {
 | 
					    case Intrinsic::arm_neon_vst2: {
 | 
				
			||||||
      static const uint16_t Opcodes[] = { AArch64::ST2_8B,  AArch64::ST2_4H,
 | 
					      static const uint16_t Opcodes[] = { AArch64::ST2_8B,  AArch64::ST2_4H,
 | 
				
			||||||
                                          AArch64::ST2_2S,  AArch64::ST1_2V_1D,
 | 
					                                          AArch64::ST2_2S,  AArch64::ST1x2_1D,
 | 
				
			||||||
                                          AArch64::ST2_16B, AArch64::ST2_8H,
 | 
					                                          AArch64::ST2_16B, AArch64::ST2_8H,
 | 
				
			||||||
                                          AArch64::ST2_4S,  AArch64::ST2_2D };
 | 
					                                          AArch64::ST2_4S,  AArch64::ST2_2D };
 | 
				
			||||||
      return SelectVST(Node, 2, false, Opcodes);
 | 
					      return SelectVST(Node, 2, false, Opcodes);
 | 
				
			||||||
    }
 | 
					    }
 | 
				
			||||||
    case Intrinsic::arm_neon_vst3: {
 | 
					    case Intrinsic::arm_neon_vst3: {
 | 
				
			||||||
      static const uint16_t Opcodes[] = { AArch64::ST3_8B,  AArch64::ST3_4H,
 | 
					      static const uint16_t Opcodes[] = { AArch64::ST3_8B,  AArch64::ST3_4H,
 | 
				
			||||||
                                          AArch64::ST3_2S,  AArch64::ST1_3V_1D,
 | 
					                                          AArch64::ST3_2S,  AArch64::ST1x3_1D,
 | 
				
			||||||
                                          AArch64::ST3_16B, AArch64::ST3_8H,
 | 
					                                          AArch64::ST3_16B, AArch64::ST3_8H,
 | 
				
			||||||
                                          AArch64::ST3_4S,  AArch64::ST3_2D };
 | 
					                                          AArch64::ST3_4S,  AArch64::ST3_2D };
 | 
				
			||||||
      return SelectVST(Node, 3, false, Opcodes);
 | 
					      return SelectVST(Node, 3, false, Opcodes);
 | 
				
			||||||
    }
 | 
					    }
 | 
				
			||||||
    case Intrinsic::arm_neon_vst4: {
 | 
					    case Intrinsic::arm_neon_vst4: {
 | 
				
			||||||
      static const uint16_t Opcodes[] = { AArch64::ST4_8B,  AArch64::ST4_4H,
 | 
					      static const uint16_t Opcodes[] = { AArch64::ST4_8B,  AArch64::ST4_4H,
 | 
				
			||||||
                                          AArch64::ST4_2S,  AArch64::ST1_4V_1D,
 | 
					                                          AArch64::ST4_2S,  AArch64::ST1x4_1D,
 | 
				
			||||||
                                          AArch64::ST4_16B, AArch64::ST4_8H,
 | 
					                                          AArch64::ST4_16B, AArch64::ST4_8H,
 | 
				
			||||||
                                          AArch64::ST4_4S,  AArch64::ST4_2D };
 | 
					                                          AArch64::ST4_4S,  AArch64::ST4_2D };
 | 
				
			||||||
      return SelectVST(Node, 4, false, Opcodes);
 | 
					      return SelectVST(Node, 4, false, Opcodes);
 | 
				
			||||||
    }
 | 
					    }
 | 
				
			||||||
 | 
					    case Intrinsic::aarch64_neon_vst1x2: {
 | 
				
			||||||
 | 
					      static const uint16_t Opcodes[] = {
 | 
				
			||||||
 | 
					        AArch64::ST1x2_8B, AArch64::ST1x2_4H,  AArch64::ST1x2_2S,
 | 
				
			||||||
 | 
					        AArch64::ST1x2_1D, AArch64::ST1x2_16B, AArch64::ST1x2_8H,
 | 
				
			||||||
 | 
					        AArch64::ST1x2_4S, AArch64::ST1x2_2D
 | 
				
			||||||
 | 
					      };
 | 
				
			||||||
 | 
					      return SelectVST(Node, 2, false, Opcodes);
 | 
				
			||||||
 | 
					    }
 | 
				
			||||||
 | 
					    case Intrinsic::aarch64_neon_vst1x3: {
 | 
				
			||||||
 | 
					      static const uint16_t Opcodes[] = {
 | 
				
			||||||
 | 
					        AArch64::ST1x3_8B, AArch64::ST1x3_4H,  AArch64::ST1x3_2S,
 | 
				
			||||||
 | 
					        AArch64::ST1x3_1D, AArch64::ST1x3_16B, AArch64::ST1x3_8H,
 | 
				
			||||||
 | 
					        AArch64::ST1x3_4S, AArch64::ST1x3_2D
 | 
				
			||||||
 | 
					      };
 | 
				
			||||||
 | 
					      return SelectVST(Node, 3, false, Opcodes);
 | 
				
			||||||
 | 
					    }
 | 
				
			||||||
 | 
					    case Intrinsic::aarch64_neon_vst1x4: {
 | 
				
			||||||
 | 
					      static const uint16_t Opcodes[] = {
 | 
				
			||||||
 | 
					        AArch64::ST1x4_8B, AArch64::ST1x4_4H,  AArch64::ST1x4_2S,
 | 
				
			||||||
 | 
					        AArch64::ST1x4_1D, AArch64::ST1x4_16B, AArch64::ST1x4_8H,
 | 
				
			||||||
 | 
					        AArch64::ST1x4_4S, AArch64::ST1x4_2D
 | 
				
			||||||
 | 
					      };
 | 
				
			||||||
 | 
					      return SelectVST(Node, 4, false, Opcodes);
 | 
				
			||||||
 | 
					    }
 | 
				
			||||||
    }
 | 
					    }
 | 
				
			||||||
    break;
 | 
					    break;
 | 
				
			||||||
  }
 | 
					  }
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -937,6 +937,18 @@ const char *AArch64TargetLowering::getTargetNodeName(unsigned Opcode) const {
 | 
				
			|||||||
    return "AArch64ISD::NEON_ST3_UPD";
 | 
					    return "AArch64ISD::NEON_ST3_UPD";
 | 
				
			||||||
  case AArch64ISD::NEON_ST4_UPD:
 | 
					  case AArch64ISD::NEON_ST4_UPD:
 | 
				
			||||||
    return "AArch64ISD::NEON_ST4_UPD";
 | 
					    return "AArch64ISD::NEON_ST4_UPD";
 | 
				
			||||||
 | 
					  case AArch64ISD::NEON_LD1x2_UPD:
 | 
				
			||||||
 | 
					    return "AArch64ISD::NEON_LD1x2_UPD";
 | 
				
			||||||
 | 
					  case AArch64ISD::NEON_LD1x3_UPD:
 | 
				
			||||||
 | 
					    return "AArch64ISD::NEON_LD1x3_UPD";
 | 
				
			||||||
 | 
					  case AArch64ISD::NEON_LD1x4_UPD:
 | 
				
			||||||
 | 
					    return "AArch64ISD::NEON_LD1x4_UPD";
 | 
				
			||||||
 | 
					  case AArch64ISD::NEON_ST1x2_UPD:
 | 
				
			||||||
 | 
					    return "AArch64ISD::NEON_ST1x2_UPD";
 | 
				
			||||||
 | 
					  case AArch64ISD::NEON_ST1x3_UPD:
 | 
				
			||||||
 | 
					    return "AArch64ISD::NEON_ST1x3_UPD";
 | 
				
			||||||
 | 
					  case AArch64ISD::NEON_ST1x4_UPD:
 | 
				
			||||||
 | 
					    return "AArch64ISD::NEON_ST1x4_UPD";
 | 
				
			||||||
  case AArch64ISD::NEON_VEXTRACT:
 | 
					  case AArch64ISD::NEON_VEXTRACT:
 | 
				
			||||||
    return "AArch64ISD::NEON_VEXTRACT";
 | 
					    return "AArch64ISD::NEON_VEXTRACT";
 | 
				
			||||||
  default:
 | 
					  default:
 | 
				
			||||||
@@ -3545,6 +3557,18 @@ static SDValue CombineBaseUpdate(SDNode *N,
 | 
				
			|||||||
      NumVecs = 3; isLoad = false; break;
 | 
					      NumVecs = 3; isLoad = false; break;
 | 
				
			||||||
    case Intrinsic::arm_neon_vst4:     NewOpc = AArch64ISD::NEON_ST4_UPD;
 | 
					    case Intrinsic::arm_neon_vst4:     NewOpc = AArch64ISD::NEON_ST4_UPD;
 | 
				
			||||||
      NumVecs = 4; isLoad = false; break;
 | 
					      NumVecs = 4; isLoad = false; break;
 | 
				
			||||||
 | 
					    case Intrinsic::aarch64_neon_vld1x2: NewOpc = AArch64ISD::NEON_LD1x2_UPD;
 | 
				
			||||||
 | 
					      NumVecs = 2; break;
 | 
				
			||||||
 | 
					    case Intrinsic::aarch64_neon_vld1x3: NewOpc = AArch64ISD::NEON_LD1x3_UPD;
 | 
				
			||||||
 | 
					      NumVecs = 3; break;
 | 
				
			||||||
 | 
					    case Intrinsic::aarch64_neon_vld1x4: NewOpc = AArch64ISD::NEON_LD1x4_UPD;
 | 
				
			||||||
 | 
					      NumVecs = 4; break;
 | 
				
			||||||
 | 
					    case Intrinsic::aarch64_neon_vst1x2: NewOpc = AArch64ISD::NEON_ST1x2_UPD;
 | 
				
			||||||
 | 
					      NumVecs = 2; isLoad = false; break;
 | 
				
			||||||
 | 
					    case Intrinsic::aarch64_neon_vst1x3: NewOpc = AArch64ISD::NEON_ST1x3_UPD;
 | 
				
			||||||
 | 
					      NumVecs = 3; isLoad = false; break;
 | 
				
			||||||
 | 
					    case Intrinsic::aarch64_neon_vst1x4: NewOpc = AArch64ISD::NEON_ST1x4_UPD;
 | 
				
			||||||
 | 
					      NumVecs = 4; isLoad = false; break;
 | 
				
			||||||
    }
 | 
					    }
 | 
				
			||||||
 | 
					
 | 
				
			||||||
    // Find the size of memory referenced by the load/store.
 | 
					    // Find the size of memory referenced by the load/store.
 | 
				
			||||||
@@ -3624,6 +3648,12 @@ AArch64TargetLowering::PerformDAGCombine(SDNode *N,
 | 
				
			|||||||
    case Intrinsic::arm_neon_vst2:
 | 
					    case Intrinsic::arm_neon_vst2:
 | 
				
			||||||
    case Intrinsic::arm_neon_vst3:
 | 
					    case Intrinsic::arm_neon_vst3:
 | 
				
			||||||
    case Intrinsic::arm_neon_vst4:
 | 
					    case Intrinsic::arm_neon_vst4:
 | 
				
			||||||
 | 
					    case Intrinsic::aarch64_neon_vld1x2:
 | 
				
			||||||
 | 
					    case Intrinsic::aarch64_neon_vld1x3:
 | 
				
			||||||
 | 
					    case Intrinsic::aarch64_neon_vld1x4:
 | 
				
			||||||
 | 
					    case Intrinsic::aarch64_neon_vst1x2:
 | 
				
			||||||
 | 
					    case Intrinsic::aarch64_neon_vst1x3:
 | 
				
			||||||
 | 
					    case Intrinsic::aarch64_neon_vst1x4:
 | 
				
			||||||
      return CombineBaseUpdate(N, DCI);
 | 
					      return CombineBaseUpdate(N, DCI);
 | 
				
			||||||
    default:
 | 
					    default:
 | 
				
			||||||
      break;
 | 
					      break;
 | 
				
			||||||
@@ -4170,7 +4200,10 @@ bool AArch64TargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info,
 | 
				
			|||||||
  case Intrinsic::arm_neon_vld1:
 | 
					  case Intrinsic::arm_neon_vld1:
 | 
				
			||||||
  case Intrinsic::arm_neon_vld2:
 | 
					  case Intrinsic::arm_neon_vld2:
 | 
				
			||||||
  case Intrinsic::arm_neon_vld3:
 | 
					  case Intrinsic::arm_neon_vld3:
 | 
				
			||||||
  case Intrinsic::arm_neon_vld4: {
 | 
					  case Intrinsic::arm_neon_vld4:
 | 
				
			||||||
 | 
					  case Intrinsic::aarch64_neon_vld1x2:
 | 
				
			||||||
 | 
					  case Intrinsic::aarch64_neon_vld1x3:
 | 
				
			||||||
 | 
					  case Intrinsic::aarch64_neon_vld1x4: {
 | 
				
			||||||
    Info.opc = ISD::INTRINSIC_W_CHAIN;
 | 
					    Info.opc = ISD::INTRINSIC_W_CHAIN;
 | 
				
			||||||
    // Conservatively set memVT to the entire set of vectors loaded.
 | 
					    // Conservatively set memVT to the entire set of vectors loaded.
 | 
				
			||||||
    uint64_t NumElts = getDataLayout()->getTypeAllocSize(I.getType()) / 8;
 | 
					    uint64_t NumElts = getDataLayout()->getTypeAllocSize(I.getType()) / 8;
 | 
				
			||||||
@@ -4187,7 +4220,10 @@ bool AArch64TargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info,
 | 
				
			|||||||
  case Intrinsic::arm_neon_vst1:
 | 
					  case Intrinsic::arm_neon_vst1:
 | 
				
			||||||
  case Intrinsic::arm_neon_vst2:
 | 
					  case Intrinsic::arm_neon_vst2:
 | 
				
			||||||
  case Intrinsic::arm_neon_vst3:
 | 
					  case Intrinsic::arm_neon_vst3:
 | 
				
			||||||
  case Intrinsic::arm_neon_vst4: {
 | 
					  case Intrinsic::arm_neon_vst4:
 | 
				
			||||||
 | 
					  case Intrinsic::aarch64_neon_vst1x2:
 | 
				
			||||||
 | 
					  case Intrinsic::aarch64_neon_vst1x3:
 | 
				
			||||||
 | 
					  case Intrinsic::aarch64_neon_vst1x4: {
 | 
				
			||||||
    Info.opc = ISD::INTRINSIC_VOID;
 | 
					    Info.opc = ISD::INTRINSIC_VOID;
 | 
				
			||||||
    // Conservatively set memVT to the entire set of vectors stored.
 | 
					    // Conservatively set memVT to the entire set of vectors stored.
 | 
				
			||||||
    unsigned NumElts = 0;
 | 
					    unsigned NumElts = 0;
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -157,12 +157,18 @@ namespace AArch64ISD {
 | 
				
			|||||||
    NEON_LD2_UPD,
 | 
					    NEON_LD2_UPD,
 | 
				
			||||||
    NEON_LD3_UPD,
 | 
					    NEON_LD3_UPD,
 | 
				
			||||||
    NEON_LD4_UPD,
 | 
					    NEON_LD4_UPD,
 | 
				
			||||||
 | 
					    NEON_LD1x2_UPD,
 | 
				
			||||||
 | 
					    NEON_LD1x3_UPD,
 | 
				
			||||||
 | 
					    NEON_LD1x4_UPD,
 | 
				
			||||||
 | 
					
 | 
				
			||||||
    // NEON stores with post-increment base updates:
 | 
					    // NEON stores with post-increment base updates:
 | 
				
			||||||
    NEON_ST1_UPD,
 | 
					    NEON_ST1_UPD,
 | 
				
			||||||
    NEON_ST2_UPD,
 | 
					    NEON_ST2_UPD,
 | 
				
			||||||
    NEON_ST3_UPD,
 | 
					    NEON_ST3_UPD,
 | 
				
			||||||
    NEON_ST4_UPD
 | 
					    NEON_ST4_UPD,
 | 
				
			||||||
 | 
					    NEON_ST1x2_UPD,
 | 
				
			||||||
 | 
					    NEON_ST1x3_UPD,
 | 
				
			||||||
 | 
					    NEON_ST1x4_UPD
 | 
				
			||||||
  };
 | 
					  };
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -3364,14 +3364,14 @@ defm LD3 : LDVList_BHSD<0b0100, "VTriple", "ld3">;
 | 
				
			|||||||
defm LD4 : LDVList_BHSD<0b0000, "VQuad", "ld4">;
 | 
					defm LD4 : LDVList_BHSD<0b0000, "VQuad", "ld4">;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
// Load multiple 1-element structure to N consecutive registers (N = 2,3,4)
 | 
					// Load multiple 1-element structure to N consecutive registers (N = 2,3,4)
 | 
				
			||||||
defm LD1_2V : LDVList_BHSD<0b1010, "VPair", "ld1">;
 | 
					defm LD1x2 : LDVList_BHSD<0b1010, "VPair", "ld1">;
 | 
				
			||||||
def LD1_2V_1D : NeonI_LDVList<0, 0b1010, 0b11, VPair1D_operand, "ld1">;
 | 
					def LD1x2_1D : NeonI_LDVList<0, 0b1010, 0b11, VPair1D_operand, "ld1">;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
defm LD1_3V : LDVList_BHSD<0b0110, "VTriple", "ld1">;
 | 
					defm LD1x3 : LDVList_BHSD<0b0110, "VTriple", "ld1">;
 | 
				
			||||||
def LD1_3V_1D : NeonI_LDVList<0, 0b0110, 0b11, VTriple1D_operand, "ld1">;
 | 
					def LD1x3_1D : NeonI_LDVList<0, 0b0110, 0b11, VTriple1D_operand, "ld1">;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
defm LD1_4V : LDVList_BHSD<0b0010, "VQuad", "ld1">;
 | 
					defm LD1x4 : LDVList_BHSD<0b0010, "VQuad", "ld1">;
 | 
				
			||||||
def LD1_4V_1D : NeonI_LDVList<0, 0b0010, 0b11, VQuad1D_operand, "ld1">;
 | 
					def LD1x4_1D : NeonI_LDVList<0, 0b0010, 0b11, VQuad1D_operand, "ld1">;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
class NeonI_STVList<bit q, bits<4> opcode, bits<2> size,
 | 
					class NeonI_STVList<bit q, bits<4> opcode, bits<2> size,
 | 
				
			||||||
                    RegisterOperand VecList, string asmop>
 | 
					                    RegisterOperand VecList, string asmop>
 | 
				
			||||||
@@ -3418,14 +3418,14 @@ defm ST3 : STVList_BHSD<0b0100, "VTriple", "st3">;
 | 
				
			|||||||
defm ST4 : STVList_BHSD<0b0000, "VQuad", "st4">;
 | 
					defm ST4 : STVList_BHSD<0b0000, "VQuad", "st4">;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
// Store multiple 1-element structures from N consecutive registers (N = 2,3,4)
 | 
					// Store multiple 1-element structures from N consecutive registers (N = 2,3,4)
 | 
				
			||||||
defm ST1_2V : STVList_BHSD<0b1010, "VPair", "st1">;
 | 
					defm ST1x2 : STVList_BHSD<0b1010, "VPair", "st1">;
 | 
				
			||||||
def ST1_2V_1D : NeonI_STVList<0, 0b1010, 0b11, VPair1D_operand, "st1">;
 | 
					def ST1x2_1D : NeonI_STVList<0, 0b1010, 0b11, VPair1D_operand, "st1">;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
defm ST1_3V : STVList_BHSD<0b0110, "VTriple", "st1">;
 | 
					defm ST1x3 : STVList_BHSD<0b0110, "VTriple", "st1">;
 | 
				
			||||||
def ST1_3V_1D : NeonI_STVList<0, 0b0110, 0b11, VTriple1D_operand, "st1">;
 | 
					def ST1x3_1D : NeonI_STVList<0, 0b0110, 0b11, VTriple1D_operand, "st1">;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
defm ST1_4V : STVList_BHSD<0b0010, "VQuad", "st1">;
 | 
					defm ST1x4 : STVList_BHSD<0b0010, "VQuad", "st1">;
 | 
				
			||||||
def ST1_4V_1D : NeonI_STVList<0, 0b0010, 0b11, VQuad1D_operand, "st1">;
 | 
					def ST1x4_1D : NeonI_STVList<0, 0b0010, 0b11, VQuad1D_operand, "st1">;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
// End of vector load/store multiple N-element structure(class SIMD lselem)
 | 
					// End of vector load/store multiple N-element structure(class SIMD lselem)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
@@ -3553,19 +3553,19 @@ defm LD4WB : LDWB_VList_BHSD<0b0000, "VQuad", uimm_exact32, uimm_exact64, "ld4">
 | 
				
			|||||||
 | 
					
 | 
				
			||||||
// Post-index load multiple 1-element structures from N consecutive registers
 | 
					// Post-index load multiple 1-element structures from N consecutive registers
 | 
				
			||||||
// (N = 2,3,4)
 | 
					// (N = 2,3,4)
 | 
				
			||||||
defm LD1WB2V : LDWB_VList_BHSD<0b1010, "VPair", uimm_exact16, uimm_exact32,
 | 
					defm LD1x2WB : LDWB_VList_BHSD<0b1010, "VPair", uimm_exact16, uimm_exact32,
 | 
				
			||||||
                               "ld1">;
 | 
					                               "ld1">;
 | 
				
			||||||
defm LD1WB2V_1D : NeonI_LDWB_VList<0, 0b1010, 0b11, VPair1D_operand,
 | 
					defm LD1x2WB_1D : NeonI_LDWB_VList<0, 0b1010, 0b11, VPair1D_operand,
 | 
				
			||||||
                                   uimm_exact16, "ld1">;
 | 
					                                   uimm_exact16, "ld1">;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
defm LD1WB3V : LDWB_VList_BHSD<0b0110, "VTriple", uimm_exact24, uimm_exact48,
 | 
					defm LD1x3WB : LDWB_VList_BHSD<0b0110, "VTriple", uimm_exact24, uimm_exact48,
 | 
				
			||||||
                               "ld1">;
 | 
					                               "ld1">;
 | 
				
			||||||
defm LD1WB3V_1D : NeonI_LDWB_VList<0, 0b0110, 0b11, VTriple1D_operand,
 | 
					defm LD1x3WB_1D : NeonI_LDWB_VList<0, 0b0110, 0b11, VTriple1D_operand,
 | 
				
			||||||
                                   uimm_exact24, "ld1">;
 | 
					                                   uimm_exact24, "ld1">;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
defm LD1WB_4V : LDWB_VList_BHSD<0b0010, "VQuad", uimm_exact32, uimm_exact64,
 | 
					defm LD1x4WB : LDWB_VList_BHSD<0b0010, "VQuad", uimm_exact32, uimm_exact64,
 | 
				
			||||||
                                "ld1">;
 | 
					                                "ld1">;
 | 
				
			||||||
defm LD1WB4V_1D : NeonI_LDWB_VList<0, 0b0010, 0b11, VQuad1D_operand,
 | 
					defm LD1x4WB_1D : NeonI_LDWB_VList<0, 0b0010, 0b11, VQuad1D_operand,
 | 
				
			||||||
                                   uimm_exact32, "ld1">;
 | 
					                                   uimm_exact32, "ld1">;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
multiclass NeonI_STWB_VList<bit q, bits<4> opcode, bits<2> size,
 | 
					multiclass NeonI_STWB_VList<bit q, bits<4> opcode, bits<2> size,
 | 
				
			||||||
@@ -3635,19 +3635,19 @@ defm ST4WB : STWB_VList_BHSD<0b0000, "VQuad", uimm_exact32, uimm_exact64, "st4">
 | 
				
			|||||||
 | 
					
 | 
				
			||||||
// Post-index load multiple 1-element structures from N consecutive registers
 | 
					// Post-index load multiple 1-element structures from N consecutive registers
 | 
				
			||||||
// (N = 2,3,4)
 | 
					// (N = 2,3,4)
 | 
				
			||||||
defm ST1WB2V : STWB_VList_BHSD<0b1010, "VPair", uimm_exact16, uimm_exact32,
 | 
					defm ST1x2WB : STWB_VList_BHSD<0b1010, "VPair", uimm_exact16, uimm_exact32,
 | 
				
			||||||
                               "st1">;
 | 
					                               "st1">;
 | 
				
			||||||
defm ST1WB2V_1D : NeonI_STWB_VList<0, 0b1010, 0b11, VPair1D_operand,
 | 
					defm ST1x2WB_1D : NeonI_STWB_VList<0, 0b1010, 0b11, VPair1D_operand,
 | 
				
			||||||
                                   uimm_exact16, "st1">;
 | 
					                                   uimm_exact16, "st1">;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
defm ST1WB3V : STWB_VList_BHSD<0b0110, "VTriple", uimm_exact24, uimm_exact48,
 | 
					defm ST1x3WB : STWB_VList_BHSD<0b0110, "VTriple", uimm_exact24, uimm_exact48,
 | 
				
			||||||
                               "st1">;
 | 
					                               "st1">;
 | 
				
			||||||
defm ST1WB3V_1D : NeonI_STWB_VList<0, 0b0110, 0b11, VTriple1D_operand,
 | 
					defm ST1x3WB_1D : NeonI_STWB_VList<0, 0b0110, 0b11, VTriple1D_operand,
 | 
				
			||||||
                                   uimm_exact24, "st1">;
 | 
					                                   uimm_exact24, "st1">;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
defm ST1WB4V : STWB_VList_BHSD<0b0010, "VQuad", uimm_exact32, uimm_exact64,
 | 
					defm ST1x4WB : STWB_VList_BHSD<0b0010, "VQuad", uimm_exact32, uimm_exact64,
 | 
				
			||||||
                               "st1">;
 | 
					                               "st1">;
 | 
				
			||||||
defm ST1WB4V_1D : NeonI_STWB_VList<0, 0b0010, 0b11, VQuad1D_operand,
 | 
					defm ST1x4WB_1D : NeonI_STWB_VList<0, 0b0010, 0b11, VQuad1D_operand,
 | 
				
			||||||
                                   uimm_exact32, "st1">;
 | 
					                                   uimm_exact32, "st1">;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
// End of post-index vector load/store multiple N-element structure
 | 
					// End of post-index vector load/store multiple N-element structure
 | 
				
			||||||
 
 | 
				
			|||||||
										
											
												File diff suppressed because it is too large
												Load Diff
											
										
									
								
							@@ -1,5 +1,6 @@
 | 
				
			|||||||
; RUN: llc < %s -verify-machineinstrs -mtriple=aarch64-none-linux-gnu -mattr=+neon | FileCheck %s
 | 
					; RUN: llc < %s -verify-machineinstrs -mtriple=aarch64-none-linux-gnu -mattr=+neon | FileCheck %s
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					;Check for a post-increment updating load.
 | 
				
			||||||
define <4 x i16> @test_vld1_fx_update(i16** %ptr) nounwind {
 | 
					define <4 x i16> @test_vld1_fx_update(i16** %ptr) nounwind {
 | 
				
			||||||
; CHECK: test_vld1_fx_update
 | 
					; CHECK: test_vld1_fx_update
 | 
				
			||||||
; CHECK: ld1 {v{{[0-9]+}}.4h}, [x{{[0-9]+|sp}}], #8
 | 
					; CHECK: ld1 {v{{[0-9]+}}.4h}, [x{{[0-9]+|sp}}], #8
 | 
				
			||||||
@@ -11,6 +12,7 @@ define <4 x i16> @test_vld1_fx_update(i16** %ptr) nounwind {
 | 
				
			|||||||
  ret <4 x i16> %tmp1
 | 
					  ret <4 x i16> %tmp1
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					;Check for a post-increment updating load with register increment.
 | 
				
			||||||
define <2 x i32> @test_vld1_reg_update(i32** %ptr, i32 %inc) nounwind {
 | 
					define <2 x i32> @test_vld1_reg_update(i32** %ptr, i32 %inc) nounwind {
 | 
				
			||||||
; CHECK: test_vld1_reg_update
 | 
					; CHECK: test_vld1_reg_update
 | 
				
			||||||
; CHECK: ld1 {v{{[0-9]+}}.2s}, [x{{[0-9]+|sp}}], x{{[0-9]+}}
 | 
					; CHECK: ld1 {v{{[0-9]+}}.2s}, [x{{[0-9]+|sp}}], x{{[0-9]+}}
 | 
				
			||||||
@@ -81,7 +83,6 @@ define <8 x i16> @test_vld4_fx_update(i16** %ptr) nounwind {
 | 
				
			|||||||
  ret <8 x i16> %tmp2
 | 
					  ret <8 x i16> %tmp2
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
;Check for a post-increment updating load with register increment.
 | 
					 | 
				
			||||||
define <8 x i8> @test_vld4_reg_update(i8** %ptr, i32 %inc) nounwind {
 | 
					define <8 x i8> @test_vld4_reg_update(i8** %ptr, i32 %inc) nounwind {
 | 
				
			||||||
; CHECK: test_vld4_reg_update
 | 
					; CHECK: test_vld4_reg_update
 | 
				
			||||||
; CHECK: ld4 {v{{[0-9]+}}.8b, v{{[0-9]+}}.8b, v{{[0-9]+}}.8b, v{{[0-9]+}}.8b}, [x{{[0-9]+|sp}}], x{{[0-9]+}}
 | 
					; CHECK: ld4 {v{{[0-9]+}}.8b, v{{[0-9]+}}.8b, v{{[0-9]+}}.8b, v{{[0-9]+}}.8b}, [x{{[0-9]+|sp}}], x{{[0-9]+}}
 | 
				
			||||||
@@ -93,7 +94,6 @@ define <8 x i8> @test_vld4_reg_update(i8** %ptr, i32 %inc) nounwind {
 | 
				
			|||||||
  ret <8 x i8> %tmp1
 | 
					  ret <8 x i8> %tmp1
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
;Check for a post-increment updating store.
 | 
					 | 
				
			||||||
define void @test_vst1_fx_update(float** %ptr, <2 x float> %B) nounwind {
 | 
					define void @test_vst1_fx_update(float** %ptr, <2 x float> %B) nounwind {
 | 
				
			||||||
; CHECK: test_vst1_fx_update
 | 
					; CHECK: test_vst1_fx_update
 | 
				
			||||||
; CHECK: st1 {v{{[0-9]+}}.2s}, [{{x[0-9]+|sp}}], #8
 | 
					; CHECK: st1 {v{{[0-9]+}}.2s}, [{{x[0-9]+|sp}}], #8
 | 
				
			||||||
@@ -198,3 +198,157 @@ declare void @llvm.arm.neon.vst3.v2i32(i8*, <2 x i32>, <2 x i32>, <2 x i32>, i32
 | 
				
			|||||||
declare void @llvm.arm.neon.vst3.v8i16(i8*, <8 x i16>, <8 x i16>, <8 x i16>, i32)
 | 
					declare void @llvm.arm.neon.vst3.v8i16(i8*, <8 x i16>, <8 x i16>, <8 x i16>, i32)
 | 
				
			||||||
declare void @llvm.arm.neon.vst4.v4f32(i8*, <4 x float>, <4 x float>, <4 x float>, <4 x float>, i32)
 | 
					declare void @llvm.arm.neon.vst4.v4f32(i8*, <4 x float>, <4 x float>, <4 x float>, <4 x float>, i32)
 | 
				
			||||||
declare void @llvm.arm.neon.vst4.v8i8(i8*, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, i32)
 | 
					declare void @llvm.arm.neon.vst4.v8i8(i8*, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, i32)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					define <16 x i8> @test_vld1x2_fx_update(i8* %a, i8** %ptr) {
 | 
				
			||||||
 | 
					; CHECK: test_vld1x2_fx_update
 | 
				
			||||||
 | 
					; CHECK: ld1 {v{{[0-9]+}}.16b, v{{[0-9]+}}.16b}, [x{{[0-9]+|sp}}], #32
 | 
				
			||||||
 | 
					  %1 = call { <16 x i8>, <16 x i8> } @llvm.aarch64.neon.vld1x2.v16i8(i8* %a, i32 1)
 | 
				
			||||||
 | 
					  %2 = extractvalue { <16 x i8>, <16 x i8> } %1, 0
 | 
				
			||||||
 | 
					  %tmp1 = getelementptr i8* %a, i32 32
 | 
				
			||||||
 | 
					  store i8* %tmp1, i8** %ptr
 | 
				
			||||||
 | 
					  ret <16 x i8> %2
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					define <8 x i16> @test_vld1x2_reg_update(i16* %a, i16** %ptr, i32 %inc) {
 | 
				
			||||||
 | 
					; CHECK: test_vld1x2_reg_update
 | 
				
			||||||
 | 
					; CHECK: ld1 {v{{[0-9]+}}.8h, v{{[0-9]+}}.8h}, [x{{[0-9]+|sp}}], x{{[0-9]+}}
 | 
				
			||||||
 | 
					  %1 = bitcast i16* %a to i8*
 | 
				
			||||||
 | 
					  %2 = tail call { <8 x i16>, <8 x i16> } @llvm.aarch64.neon.vld1x2.v8i16(i8* %1, i32 2)
 | 
				
			||||||
 | 
					  %3 = extractvalue { <8 x i16>, <8 x i16> } %2, 0
 | 
				
			||||||
 | 
					  %tmp1 = getelementptr i16* %a, i32 %inc
 | 
				
			||||||
 | 
					  store i16* %tmp1, i16** %ptr
 | 
				
			||||||
 | 
					  ret <8 x i16> %3
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					define <2 x i64> @test_vld1x3_fx_update(i64* %a, i64** %ptr) {
 | 
				
			||||||
 | 
					; CHECK: test_vld1x3_fx_update
 | 
				
			||||||
 | 
					; CHECK: ld1 {v{{[0-9]+}}.2d, v{{[0-9]+}}.2d, v{{[0-9]+}}.2d}, [x{{[0-9]+|sp}}], #48
 | 
				
			||||||
 | 
					  %1 = bitcast i64* %a to i8*
 | 
				
			||||||
 | 
					  %2 = tail call { <2 x i64>, <2 x i64>, <2 x i64> } @llvm.aarch64.neon.vld1x3.v2i64(i8* %1, i32 8)
 | 
				
			||||||
 | 
					  %3 = extractvalue { <2 x i64>, <2 x i64>, <2 x i64> } %2, 0
 | 
				
			||||||
 | 
					  %tmp1 = getelementptr i64* %a, i32 6
 | 
				
			||||||
 | 
					  store i64* %tmp1, i64** %ptr
 | 
				
			||||||
 | 
					  ret  <2 x i64> %3
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					define <8 x i16> @test_vld1x3_reg_update(i16* %a, i16** %ptr, i32 %inc) {
 | 
				
			||||||
 | 
					; CHECK: test_vld1x3_reg_update
 | 
				
			||||||
 | 
					; CHECK: ld1 {v{{[0-9]+}}.8h, v{{[0-9]+}}.8h, v{{[0-9]+}}.8h}, [x{{[0-9]+|sp}}], x{{[0-9]+}}
 | 
				
			||||||
 | 
					  %1 = bitcast i16* %a to i8*
 | 
				
			||||||
 | 
					  %2 = tail call { <8 x i16>, <8 x i16>, <8 x i16> } @llvm.aarch64.neon.vld1x3.v8i16(i8* %1, i32 2)
 | 
				
			||||||
 | 
					  %3 = extractvalue { <8 x i16>, <8 x i16>, <8 x i16> } %2, 0
 | 
				
			||||||
 | 
					  %tmp1 = getelementptr i16* %a, i32 %inc
 | 
				
			||||||
 | 
					  store i16* %tmp1, i16** %ptr
 | 
				
			||||||
 | 
					  ret <8 x i16> %3
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					define <4 x float> @test_vld1x4_fx_update(float* %a, float** %ptr) {
 | 
				
			||||||
 | 
					; CHECK: test_vld1x4_fx_update
 | 
				
			||||||
 | 
					; CHECK: ld1 {v{{[0-9]+}}.4s, v{{[0-9]+}}.4s, v{{[0-9]+}}.4s, v{{[0-9]+}}.4s}, [x{{[0-9]+|sp}}], #64
 | 
				
			||||||
 | 
					  %1 = bitcast float* %a to i8*
 | 
				
			||||||
 | 
					  %2 = tail call { <4 x float>, <4 x float>, <4 x float>, <4 x float> } @llvm.aarch64.neon.vld1x4.v4f32(i8* %1, i32 4)
 | 
				
			||||||
 | 
					  %3 = extractvalue { <4 x float>, <4 x float>, <4 x float>, <4 x float> } %2, 0
 | 
				
			||||||
 | 
					  %tmp1 = getelementptr float* %a, i32 16
 | 
				
			||||||
 | 
					  store float* %tmp1, float** %ptr
 | 
				
			||||||
 | 
					  ret <4 x float> %3
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					define <8 x i8> @test_vld1x4_reg_update(i8* readonly %a, i8** %ptr, i32 %inc) #0 {
 | 
				
			||||||
 | 
					; CHECK: test_vld1x4_reg_update
 | 
				
			||||||
 | 
					; CHECK: ld1 {v{{[0-9]+}}.8b, v{{[0-9]+}}.8b, v{{[0-9]+}}.8b, v{{[0-9]+}}.8b}, [x{{[0-9]+|sp}}], x{{[0-9]+}}
 | 
				
			||||||
 | 
					  %1 = tail call { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } @llvm.aarch64.neon.vld1x4.v8i8(i8* %a, i32 1)
 | 
				
			||||||
 | 
					  %2 = extractvalue { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } %1, 0
 | 
				
			||||||
 | 
					  %tmp1 = getelementptr i8* %a, i32 %inc
 | 
				
			||||||
 | 
					  store i8* %tmp1, i8** %ptr
 | 
				
			||||||
 | 
					  ret <8 x i8> %2
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					define void @test_vst1x2_fx_update(i8* %a, [2 x <16 x i8>] %b.coerce, i8** %ptr) #2 {
 | 
				
			||||||
 | 
					; CHECK: test_vst1x2_fx_update
 | 
				
			||||||
 | 
					; CHECK: st1 {v{{[0-9]+}}.16b, v{{[0-9]+}}.16b}, [x{{[0-9]+|sp}}], #32
 | 
				
			||||||
 | 
					  %1 = extractvalue [2 x <16 x i8>] %b.coerce, 0
 | 
				
			||||||
 | 
					  %2 = extractvalue [2 x <16 x i8>] %b.coerce, 1
 | 
				
			||||||
 | 
					  tail call void @llvm.aarch64.neon.vst1x2.v16i8(i8* %a, <16 x i8> %1, <16 x i8> %2, i32 1)
 | 
				
			||||||
 | 
					  %tmp1 = getelementptr i8* %a, i32 32
 | 
				
			||||||
 | 
					  store i8* %tmp1, i8** %ptr
 | 
				
			||||||
 | 
					  ret void
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					define void @test_vst1x2_reg_update(i16* %a, [2 x <8 x i16>] %b.coerce, i16** %ptr, i32 %inc) #2 {
 | 
				
			||||||
 | 
					; CHECK: test_vst1x2_reg_update
 | 
				
			||||||
 | 
					; CHECK: st1 {v{{[0-9]+}}.8h, v{{[0-9]+}}.8h}, [x{{[0-9]+|sp}}], x{{[0-9]+}}
 | 
				
			||||||
 | 
					  %1 = extractvalue [2 x <8 x i16>] %b.coerce, 0
 | 
				
			||||||
 | 
					  %2 = extractvalue [2 x <8 x i16>] %b.coerce, 1
 | 
				
			||||||
 | 
					  %3 = bitcast i16* %a to i8*
 | 
				
			||||||
 | 
					  tail call void @llvm.aarch64.neon.vst1x2.v8i16(i8* %3, <8 x i16> %1, <8 x i16> %2, i32 2)
 | 
				
			||||||
 | 
					  %tmp1 = getelementptr i16* %a, i32 %inc
 | 
				
			||||||
 | 
					  store i16* %tmp1, i16** %ptr
 | 
				
			||||||
 | 
					  ret void
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					define void @test_vst1x3_fx_update(i32* %a, [3 x <2 x i32>] %b.coerce, i32** %ptr) #2 {
 | 
				
			||||||
 | 
					; CHECK: test_vst1x3_fx_update
 | 
				
			||||||
 | 
					; CHECK: st1 {v{{[0-9]+}}.2s, v{{[0-9]+}}.2s, v{{[0-9]+}}.2s}, [x{{[0-9]+|sp}}], #24
 | 
				
			||||||
 | 
					  %1 = extractvalue [3 x <2 x i32>] %b.coerce, 0
 | 
				
			||||||
 | 
					  %2 = extractvalue [3 x <2 x i32>] %b.coerce, 1
 | 
				
			||||||
 | 
					  %3 = extractvalue [3 x <2 x i32>] %b.coerce, 2
 | 
				
			||||||
 | 
					  %4 = bitcast i32* %a to i8*
 | 
				
			||||||
 | 
					  tail call void @llvm.aarch64.neon.vst1x3.v2i32(i8* %4, <2 x i32> %1, <2 x i32> %2, <2 x i32> %3, i32 4)
 | 
				
			||||||
 | 
					  %tmp1 = getelementptr i32* %a, i32 6
 | 
				
			||||||
 | 
					  store i32* %tmp1, i32** %ptr
 | 
				
			||||||
 | 
					  ret void
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					define void @test_vst1x3_reg_update(i64* %a, [3 x <1 x i64>] %b.coerce, i64** %ptr, i32 %inc) #2 {
 | 
				
			||||||
 | 
					; CHECK: test_vst1x3_reg_update
 | 
				
			||||||
 | 
					; CHECK: st1 {v{{[0-9]+}}.1d, v{{[0-9]+}}.1d, v{{[0-9]+}}.1d}, [x{{[0-9]+|sp}}], x{{[0-9]+}}
 | 
				
			||||||
 | 
					  %1 = extractvalue [3 x <1 x i64>] %b.coerce, 0
 | 
				
			||||||
 | 
					  %2 = extractvalue [3 x <1 x i64>] %b.coerce, 1
 | 
				
			||||||
 | 
					  %3 = extractvalue [3 x <1 x i64>] %b.coerce, 2
 | 
				
			||||||
 | 
					  %4 = bitcast i64* %a to i8*
 | 
				
			||||||
 | 
					  tail call void @llvm.aarch64.neon.vst1x3.v1i64(i8* %4, <1 x i64> %1, <1 x i64> %2, <1 x i64> %3, i32 8)
 | 
				
			||||||
 | 
					  %tmp1 = getelementptr i64* %a, i32 %inc
 | 
				
			||||||
 | 
					  store i64* %tmp1, i64** %ptr
 | 
				
			||||||
 | 
					  ret void
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					define void @test_vst1x4_fx_update(float* %a, [4 x <4 x float>] %b.coerce, float** %ptr) #2 {
 | 
				
			||||||
 | 
					; CHECK: test_vst1x4_fx_update
 | 
				
			||||||
 | 
					; CHECK: st1 {v{{[0-9]+}}.4s, v{{[0-9]+}}.4s, v{{[0-9]+}}.4s, v{{[0-9]+}}.4s}, [x{{[0-9]+|sp}}], #64
 | 
				
			||||||
 | 
					  %1 = extractvalue [4 x <4 x float>] %b.coerce, 0
 | 
				
			||||||
 | 
					  %2 = extractvalue [4 x <4 x float>] %b.coerce, 1
 | 
				
			||||||
 | 
					  %3 = extractvalue [4 x <4 x float>] %b.coerce, 2
 | 
				
			||||||
 | 
					  %4 = extractvalue [4 x <4 x float>] %b.coerce, 3
 | 
				
			||||||
 | 
					  %5 = bitcast float* %a to i8*
 | 
				
			||||||
 | 
					  tail call void @llvm.aarch64.neon.vst1x4.v4f32(i8* %5, <4 x float> %1, <4 x float> %2, <4 x float> %3, <4 x float> %4, i32 4)
 | 
				
			||||||
 | 
					  %tmp1 = getelementptr float* %a, i32 16
 | 
				
			||||||
 | 
					  store float* %tmp1, float** %ptr
 | 
				
			||||||
 | 
					  ret void
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					define void @test_vst1x4_reg_update(double* %a, [4 x <2 x double>] %b.coerce, double** %ptr, i32 %inc) #2 {
 | 
				
			||||||
 | 
					; CHECK: test_vst1x4_reg_update
 | 
				
			||||||
 | 
					; CHECK: st1 {v{{[0-9]+}}.2d, v{{[0-9]+}}.2d, v{{[0-9]+}}.2d, v{{[0-9]+}}.2d}, [x{{[0-9]+|sp}}], x{{[0-9]+}}
 | 
				
			||||||
 | 
					  %1 = extractvalue [4 x <2 x double>] %b.coerce, 0
 | 
				
			||||||
 | 
					  %2 = extractvalue [4 x <2 x double>] %b.coerce, 1
 | 
				
			||||||
 | 
					  %3 = extractvalue [4 x <2 x double>] %b.coerce, 2
 | 
				
			||||||
 | 
					  %4 = extractvalue [4 x <2 x double>] %b.coerce, 3
 | 
				
			||||||
 | 
					  %5 = bitcast double* %a to i8*
 | 
				
			||||||
 | 
					  tail call void @llvm.aarch64.neon.vst1x4.v2f64(i8* %5, <2 x double> %1, <2 x double> %2, <2 x double> %3, <2 x double> %4, i32 8)
 | 
				
			||||||
 | 
					  %tmp1 = getelementptr double* %a, i32 %inc
 | 
				
			||||||
 | 
					  store double* %tmp1, double** %ptr
 | 
				
			||||||
 | 
					  ret void
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					declare { <16 x i8>, <16 x i8> } @llvm.aarch64.neon.vld1x2.v16i8(i8*, i32)
 | 
				
			||||||
 | 
					declare { <8 x i16>, <8 x i16> } @llvm.aarch64.neon.vld1x2.v8i16(i8*, i32)
 | 
				
			||||||
 | 
					declare { <2 x i64>, <2 x i64>, <2 x i64> } @llvm.aarch64.neon.vld1x3.v2i64(i8*, i32)
 | 
				
			||||||
 | 
					declare { <8 x i16>, <8 x i16>, <8 x i16> } @llvm.aarch64.neon.vld1x3.v8i16(i8*, i32)
 | 
				
			||||||
 | 
					declare { <4 x float>, <4 x float>, <4 x float>, <4 x float> } @llvm.aarch64.neon.vld1x4.v4f32(i8*, i32)
 | 
				
			||||||
 | 
					declare { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } @llvm.aarch64.neon.vld1x4.v8i8(i8*, i32)
 | 
				
			||||||
 | 
					declare void @llvm.aarch64.neon.vst1x2.v16i8(i8*, <16 x i8>, <16 x i8>, i32)
 | 
				
			||||||
 | 
					declare void @llvm.aarch64.neon.vst1x2.v8i16(i8*, <8 x i16>, <8 x i16>, i32)
 | 
				
			||||||
 | 
					declare void @llvm.aarch64.neon.vst1x3.v2i32(i8*, <2 x i32>, <2 x i32>, <2 x i32>, i32)
 | 
				
			||||||
 | 
					declare void @llvm.aarch64.neon.vst1x3.v1i64(i8*, <1 x i64>, <1 x i64>, <1 x i64>, i32)
 | 
				
			||||||
 | 
					declare void @llvm.aarch64.neon.vst1x4.v4f32(i8*, <4 x float>, <4 x float>, <4 x float>, <4 x float>, i32) #3
 | 
				
			||||||
 | 
					declare void @llvm.aarch64.neon.vst1x4.v2f64(i8*, <2 x double>, <2 x double>, <2 x double>, <2 x double>, i32) #3
 | 
				
			||||||
 
 | 
				
			|||||||
		Reference in New Issue
	
	Block a user